repo stringclasses 1k values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 6 values | commit_sha stringclasses 1k values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/CustomS3Async2StorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/CustomS3Async2StorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.net.URISyntaxException;
import java.net.URL;
import java.time.Duration;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import javax.naming.ConfigurationException;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import org.apache.commons.codec.digest.DigestUtils;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscription;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.gson.JsonObject;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerName;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MemqUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelOption;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.util.ReferenceCounted;
import reactor.core.publisher.Mono;
import reactor.netty.http.client.HttpClient;
import reactor.netty.http.client.HttpClientResponse;
import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRequest.Builder;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest;
import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest;
@StorageHandlerName(name = "s3v2", previousName = "customs3aync2")
public class CustomS3Async2StorageHandler extends AbstractS3StorageHandler {
private static final int HIGH_LATENCY_THRESHOLD = 5;
private static final int NANOSECONDS_TO_SECONDS = 1000_000_000;
private static final int ERROR_CODE = 500;
private static final int SUCCESS_CODE = 200;
private static final String SLASH = "/";
private static final String CONTENT_LENGTH = "Content-Length";
private static final String APPLICATION_OCTET_STREAM = "application/octet-stream";
private static final String CONTENT_MD5 = "Content-MD5";
private static final String CONTENT_TYPE = "Content-Type";
private static final String E_TAG = "ETag";
private static final String SEPARATOR = "_";
private static final int LAST_ATTEMPT_TIMEOUT = 60_000;
static {
java.security.Security.setProperty("networkaddress.cache.ttl", "1");
}
private static final String HOSTNAME = MiscUtils.getHostname();
private Logger logger = Logger.getLogger(CustomS3Async2StorageHandler.class.getName());
private String path;
private String bucket;
private KafkaNotificationSink notificationSink;
private String topic;
@SuppressWarnings("unused")
private boolean dryrun;
private boolean disableNotifications;
private boolean enableHashing;
private boolean enableMD5;
private volatile int maxAttempts;
private volatile int retryTimeoutMillis;
private S3Presigner signer;
private HttpClient secureClient;
private MetricRegistry registry;
private ExecutorService requestExecutor;
private ScheduledExecutorService executionTimer;
private Timer s3PutLatencyTimer;
private Timer s3PutInternalLatencyTimer;
private Timer notificationPublishingTimer;
private Counter s3RetryCounters;
private Counter s3RequestCounter;
private Counter notificationFailureCounter;
private Counter timeoutExceptionCounter;
public CustomS3Async2StorageHandler() {
}
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
this.logger = Logger.getLogger(CustomS3Async2StorageHandler.class.getName() + "-" + topic);
this.topic = topic;
this.registry = registry;
this.dryrun = Boolean.parseBoolean(outputHandlerConfig.getProperty("dryrun", "false"));
this.disableNotifications = Boolean
.parseBoolean(outputHandlerConfig.getProperty("disableNotifications", "false"));
if (!disableNotifications) {
this.notificationSink = new KafkaNotificationSink();
this.notificationSink.init(outputHandlerConfig);
}
this.s3RequestCounter = registry.counter("output.s3.requests");
this.timeoutExceptionCounter = registry.counter("output.timeout.exceptions");
this.notificationFailureCounter = registry.counter("output.notification.fail");
this.notificationPublishingTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.notification.publish.latency");
this.s3PutLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry, "output.s3.putobjectlatency");
this.s3PutInternalLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.s3.internalPutobjectlatency");
this.bucket = outputHandlerConfig.getProperty("bucket");
if (bucket == null) {
throw new ConfigurationException("Missing S3 bucket name");
}
this.enableMD5 = Boolean.parseBoolean(outputHandlerConfig.getProperty("enableMD5", "true"));
if (!enableMD5) {
logger.warning("MD5 hashes for uploads have been disabled");
}
this.enableHashing = Boolean
.parseBoolean(outputHandlerConfig.getProperty("enableHashing", "true"));
if (!enableHashing) {
logger.warning("Hashing has been disabled for object uploads");
}
this.path = outputHandlerConfig.getProperty("path", topic);
this.requestExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.executionTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory());
this.s3RetryCounters = registry.counter("output.s3.retries");
this.retryTimeoutMillis = Integer
.parseInt(outputHandlerConfig.getProperty("retryTimeoutMillis", "5000"));
this.maxAttempts = Integer.parseInt(outputHandlerConfig.getProperty("retryCount", "2")) + 1;
this.secureClient = HttpClient.create().option(ChannelOption.SO_SNDBUF, 4 * 1024 * 1024)
.option(ChannelOption.SO_LINGER, 0).secure();
signer = S3Presigner.builder()
.credentialsProvider(InstanceProfileCredentialsProvider.builder()
.asyncCredentialUpdateEnabled(true).asyncThreadName("IamCredentialUpdater").build())
.build();
}
@Override
public boolean reconfigure(Properties outputHandlerConfig) {
int newRetryTimeoutMillis = Integer
.parseInt(outputHandlerConfig.getProperty("retryTimeoutMillis", "5000"));
if (newRetryTimeoutMillis != retryTimeoutMillis) {
retryTimeoutMillis = newRetryTimeoutMillis;
}
int newMaxAttempts = Integer.parseInt(outputHandlerConfig.getProperty("retryCount", "2")) + 1;
if (newMaxAttempts != maxAttempts) {
maxAttempts = newMaxAttempts;
}
return true;
}
@Override
public void writeOutput(int objectSize,
int checksum,
final List<Message> messages) throws WriteFailedException {
Context timer = s3PutLatencyTimer.time();
ByteBuf batchHeader = StorageHandler.getBatchHeadersAsByteArray(messages);
final List<ByteBuf> messageBuffers = messageToBufferList(messages);
try {
final int currentMaxAttempts = maxAttempts;
final int currentRetryTimeoutMs = retryTimeoutMillis;
int contentLength = batchHeader.writerIndex() + objectSize;
String contentMD5 = null;
UploadResult result = null;
boolean hasSucceeded = false;
int attempt = 0;
Message firstMessage = messages.get(0);
// map used for cancellation
Map<String, Future<UploadResult>> futureMap = new HashMap<>();
Map<String, CompletableFuture<UploadResult>> taskMap = new HashMap<>();
final Publisher<ByteBuf> bodyPublisher = getBodyPublisher(messageBuffers, batchHeader);
while (attempt < currentMaxAttempts) {
final int timeout = attempt == currentMaxAttempts - 1 ? LAST_ATTEMPT_TIMEOUT
: currentRetryTimeoutMs;
final int k = attempt;
final String key = createKey(firstMessage.getClientRequestId(),
firstMessage.getServerRequestId(), k).toString();
CompletableFuture<UploadResult> task = new CompletableFuture<>();
Callable<UploadResult> uploadAttempt = () -> {
try {
UploadResult ur = attemptUpload(bodyPublisher, objectSize, checksum, contentLength,
contentMD5, key, k, 0);
task.complete(ur);
return ur;
} catch (Exception e) {
task.completeExceptionally(e);
throw e;
}
};
Future<UploadResult> future = requestExecutor.submit(uploadAttempt);
futureMap.put(key, future);
taskMap.put(key, task);
CompletableFuture<UploadResult> resultFuture = anyUploadResultOrTimeout(taskMap.values(),
Duration.ofMillis(timeout));
try {
result = resultFuture.get();
// start tracking response codes from s3
registry.counter("output.s3.responseCode." + result.getResponseCode()).inc();
if (result.getResponseCode() == SUCCESS_CODE) {
hasSucceeded = true;
break;
} else {
// remove the task so that it doesn't short circuit the next iteration
taskMap.remove(result.getKey());
logger.severe("Request failed reason:" + result + " attempt:" + result.getAttempt());
if (result.getResponseCode() >= 500 && result.getResponseCode() < 600) {
// retry 500s without increasing attempts
s3RetryCounters.inc();
// TODO: add circuit breaker for too many S3 failures
continue;
}
}
} catch (ExecutionException ee) {
if (ee.getCause() instanceof TimeoutException) {
timeoutExceptionCounter.inc();
} else {
logger.log(Level.SEVERE, "Request failed", ee);
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Request failed", e);
}
attempt++;
s3RetryCounters.inc();
}
// best effort cancel all outstanding uploads, no matter what the result is
for (Map.Entry<String, Future<UploadResult>> entry : futureMap.entrySet()) {
if (result != null && entry.getKey().equals(result.getKey())) {
continue;
}
entry.getValue().cancel(true);
}
if (result == null) {
throw new WriteFailedException("All upload attempts failed");
} else if (!hasSucceeded) {
throw new WriteFailedException(
"Upload failed due to error out: s3://" + bucket + "/" + result.getKey());
}
if (!disableNotifications) {
Context publishTime = notificationPublishingTimer.time();
JsonObject payload = buildPayload(topic, bucket, objectSize, messages.size(),
batchHeader.capacity(), result.getKey(), result.getAttempt());
if (contentMD5 != null) {
payload.addProperty(CONTENT_MD5, contentMD5);
}
try {
notificationSink.notify(payload, 0);
} catch (Exception e) {
notificationFailureCounter.inc();
throw e;
} finally {
publishTime.stop();
}
}
long latency = timer.stop() / NANOSECONDS_TO_SECONDS;
if (latency > HIGH_LATENCY_THRESHOLD) {
final String s3path = "s3://" + bucket + SLASH + result.getKey();
logger.info("Uploaded " + s3path + " latency(" + latency + ")s, successful on attempt "
+ result.getAttempt() + ", total tasks: " + futureMap.size());
}
} catch (Exception e) {
timer.stop();
throw new WriteFailedException(e);
} finally {
messageBuffers.forEach(ReferenceCounted::release);
batchHeader.release();
}
}
private UploadResult attemptUpload(final Publisher<ByteBuf> bodyPublisher,
int sizeInBytes,
int checksum,
int contentLength,
String contentMD5,
final String key,
final int count,
int timeout) throws URISyntaxException {
Context internalLatency = s3PutInternalLatencyTimer.time();
try {
Builder putRequestBuilder = PutObjectRequest.builder().bucket(bucket).key(key);
if (contentMD5 != null) {
putRequestBuilder.contentMD5(contentMD5);
}
putRequestBuilder.contentLength((long) contentLength);
PresignedPutObjectRequest presignPutObject = signer.presignPutObject(
PutObjectPresignRequest.builder().putObjectRequest(putRequestBuilder.build())
.signatureDuration(Duration.ofSeconds(2000)).build());
URL url = presignPutObject.url();
s3RequestCounter.inc();
Mono<HttpClientResponse> responseFuture = secureClient.headers(headers -> {
headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
if (contentMD5 != null) {
headers.set(CONTENT_MD5, contentMD5);
}
headers.set(CONTENT_LENGTH, String.valueOf(contentLength));
}).put().uri(url.toURI()).send(bodyPublisher).response();
HttpClientResponse response = responseFuture.block();
HttpResponseStatus status = response.status();
int responseCode = status.code();
HttpHeaders responseHeaders = response.responseHeaders();
if (responseCode != SUCCESS_CODE) {
logger.severe(responseCode + " reason:" + status.reasonPhrase() + "\t" + responseHeaders
+ " index:" + count + " url:" + url);
}
if (contentMD5 != null && responseCode == SUCCESS_CODE) {
try {
String eTagHex = responseHeaders.get(E_TAG);
String etagToBase64 = MemqUtils.etagToBase64(eTagHex.replace("\"", ""));
if (!contentMD5.equals(etagToBase64)) {
logger.severe("Request failed due to etag mismatch url:" + url);
responseCode = ERROR_CODE;
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to parse the returnedetag", e);
}
}
return new UploadResult(key, responseCode, responseHeaders, internalLatency.stop(), count);
} finally {
internalLatency.stop();
}
}
public static class UploadResult {
private final String key;
private final int responseCode;
private final HttpHeaders httpResponseHeaders;
private final long time;
private final int attempt;
public UploadResult(String key,
int responseCode,
HttpHeaders responseHeaders,
long time,
int attempt) {
this.key = key;
this.responseCode = responseCode;
this.httpResponseHeaders = responseHeaders;
this.time = time;
this.attempt = attempt;
}
public int getResponseCode() {
return responseCode;
}
public HttpHeaders getHttpResponseHeaders() {
return httpResponseHeaders;
}
public String getKey() {
return key;
}
public long getTime() {
return time;
}
public int getAttempt() {
return attempt;
}
@Override
public String toString() {
return "UploadResult [key=" + key + ", responseCode=" + responseCode
+ ", httpResponseHeaders=" + httpResponseHeaders + "]";
}
}
private StringBuilder createKey(long firstMessageClientRequestId,
long firstMessageServerRequestId,
int attempt) {
StringBuilder keyBuilder = new StringBuilder();
if (enableHashing) {
String hash = DigestUtils.md2Hex(String.valueOf(firstMessageClientRequestId));
keyBuilder.append(hash, 0, 2);
keyBuilder.append(SLASH);
}
keyBuilder.append(path);
keyBuilder.append(SLASH);
keyBuilder.append(firstMessageClientRequestId);
keyBuilder.append(SEPARATOR);
keyBuilder.append(firstMessageServerRequestId);
keyBuilder.append(SEPARATOR);
keyBuilder.append(System.currentTimeMillis());
keyBuilder.append(SEPARATOR);
keyBuilder.append(attempt);
keyBuilder.append(SEPARATOR);
keyBuilder.append(HOSTNAME);
return keyBuilder;
}
public static List<ByteBuf> messageToBufferList(List<Message> messages) {
return messages.stream().map(m -> m.getBuf().retainedDuplicate()).collect(Collectors.toList());
}
public static CompositeByteBuf messageAndHeaderToCompositeBuffer(final List<ByteBuf> messageByteBufs,
ByteBuf batchHeaders) {
CompositeByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.compositeBuffer();
byteBuf.addComponent(true, batchHeaders.retainedDuplicate());
byteBuf.addComponents(true,
messageByteBufs.stream().map(ByteBuf::retainedDuplicate).collect(Collectors.toList()));
return byteBuf;
}
public static Publisher<ByteBuf> getBodyPublisher(final List<ByteBuf> messageByteBufs,
ByteBuf batchHeaders) {
return s -> s.onSubscribe(new Subscription() {
@Override
public void request(long n) {
CompositeByteBuf byteBuf = messageAndHeaderToCompositeBuffer(messageByteBufs, batchHeaders);
s.onNext(byteBuf);
s.onComplete();
}
@Override
public void cancel() {
}
});
}
public CompletableFuture<UploadResult> anyUploadResultOrTimeout(Collection<CompletableFuture<UploadResult>> tasks,
Duration duration) {
final CompletableFuture<UploadResult> promise = new CompletableFuture<>();
executionTimer.schedule(() -> {
final TimeoutException ex = new TimeoutException(
"Timeout after " + duration.toMillis() + " milliseconds");
return promise.completeExceptionally(ex);
}, duration.toMillis(), TimeUnit.MILLISECONDS);
CompletableFuture<UploadResult> anyUploadResultFuture = CompletableFuture
.anyOf(tasks.toArray(new CompletableFuture[0])).thenApply(o -> (UploadResult) o);
return anyUploadResultFuture.applyToEither(promise, Function.identity());
}
public void closeWriter() {
notificationSink.close();
}
protected KafkaNotificationSink getNotificationSink() {
return notificationSink;
}
@Override
public String getReadUrl() {
return notificationSink.getReadUrl();
}
@Override
public Logger getLogger() {
return logger;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/ReactorNettyRequestClient.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/ReactorNettyRequestClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3.reader.client;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.FORBIDDEN_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.ISE_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.NOT_FOUND_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.OBJECT_FETCH_ERROR_KEY;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.UNAVAILABLE_EXCEPTION;
import com.pinterest.memq.commons.storage.s3.S3Exception;
import com.codahale.metrics.MetricRegistry;
import io.netty.handler.ssl.SslClosedEngineException;
import io.netty.handler.timeout.ReadTimeoutException;
import reactor.netty.http.client.HttpClient;
import reactor.util.retry.RetrySpec;
import software.amazon.awssdk.http.SdkHttpFullRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import io.netty.channel.ChannelOption;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import reactor.core.publisher.Mono;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest;
import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeoutException;
public class ReactorNettyRequestClient implements RequestClient {
public static final String READ_TIMEOUT_MS = "readTimeoutMs";
public static final String RESPONSE_TIMEOUT_MS = "responseTimeoutMs";
public static final String MAX_RETRIES = "maxRetries";
private static final Logger logger = LoggerFactory.getLogger(ReactorNettyRequestClient.class);
private static final long DEFAULT_READ_TIMEOUT_MS = 10000;
private final S3Presigner presigner;
private final MetricRegistry metricRegistry;
private HttpClient client;
private int maxRetries = 4;
private Duration readTimeoutDuration;
private Duration responseTimeoutDuration;
public ReactorNettyRequestClient(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
this.presigner = S3Presigner.builder().build();
}
@Override
public void initialize(Properties properties) {
if (properties.containsKey(MAX_RETRIES)) {
maxRetries = Integer.parseInt(properties.getProperty(MAX_RETRIES));
}
if (properties.containsKey(READ_TIMEOUT_MS)) {
readTimeoutDuration = Duration.ofMillis(Long.parseLong(properties.getProperty(READ_TIMEOUT_MS)));
} else {
readTimeoutDuration = Duration.ofMillis(DEFAULT_READ_TIMEOUT_MS);
}
if (properties.containsKey(RESPONSE_TIMEOUT_MS)) {
responseTimeoutDuration = Duration.ofMillis(Long.parseLong(properties.getProperty(RESPONSE_TIMEOUT_MS)));
} else {
// default to max number of attempts times read timeout plus a small buffer to avoid competing with read timeouts
responseTimeoutDuration = readTimeoutDuration.multipliedBy(maxRetries + 1).plusMillis(100);
}
this.client = createHttpClient();
}
private HttpClient createHttpClient() {
return HttpClient.create()
.option(ChannelOption.SO_SNDBUF, 4 * 1024 * 1024)
.option(ChannelOption.SO_LINGER, 0)
.responseTimeout(readTimeoutDuration)
.secure();
}
@Override
public InputStream tryObjectGet(GetObjectRequest request) throws IOException {
PresignedGetObjectRequest presignGetObject = presigner.presignGetObject(GetObjectPresignRequest
.builder()
.getObjectRequest(request)
.signatureDuration(Duration.ofMinutes(60)).build());
URL url = presignGetObject.url();
logger.debug("Fetching URL {}", url.toString());
try {
return tryObjectGetInterAsStream(url.toURI(), presignGetObject.signedHeaders());
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
@Override
public ByteBuf tryObjectGetAsBuffer(GetObjectRequest request) throws IOException {
PresignedGetObjectRequest presignGetObject = presigner.presignGetObject(GetObjectPresignRequest
.builder()
.getObjectRequest(request)
.signatureDuration(Duration.ofMinutes(60)).build());
URL url = presignGetObject.url();
logger.debug("Fetching URL{}", url.toString());
try {
return tryObjectGetInternal(url.toURI(), presignGetObject.signedHeaders());
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
protected InputStream tryObjectGetInterAsStream(URI uri,
Map<String, List<String>> headers) throws IOException {
return new ByteBufInputStream(tryObjectGetInternal(uri, headers), true);
}
protected ByteBuf tryObjectGetInternal(URI uri, Map<String, List<String>> headers) throws IOException {
return tryObjectGetInternal(uri, headers, true);
}
protected ByteBuf tryObjectGetInternal(URI uri, Map<String, List<String>> headers, boolean reinitializeClientOnSslClosedEngineException)
throws IOException {
try {
return client
.headers(
h -> headers.forEach(h::add)
)
.get()
.uri(uri)
.responseSingle((t, u) -> {
int code = t.status().code();
switch (code) {
case 200:
case 206:
return u.retain();
case 404:
return Mono.error(NOT_FOUND_EXCEPTION);
case 403:
return Mono.error(FORBIDDEN_EXCEPTION);
case 500:
return Mono.error(ISE_EXCEPTION);
case 503:
return Mono.error(UNAVAILABLE_EXCEPTION);
default:
return Mono.error(new S3Exception(code) {
private static final long serialVersionUID = 1L;
});
}
})
.doOnError((t) -> {
if (t instanceof S3Exception) {
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + ".s3." + ((S3Exception) t).getErrorCode()).inc();
} else if (t instanceof ReadTimeoutException) {
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + ".timeout.read").inc();
} else {
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + ".other").inc();
}
})
.retryWhen(RetrySpec
.max(maxRetries)
.filter((t) -> t instanceof ReadTimeoutException || t instanceof S3Exception.RetriableException)
.doBeforeRetry((rs) -> logger.warn("Retrying (retry: " + (rs.totalRetries() + 1) + "/" + (maxRetries) +") , exception " + rs.failure() + " when fetching from " + uri))
)
.timeout(responseTimeoutDuration)
.doOnError((t) -> {
if (t instanceof TimeoutException) {
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + ".timeout.response").inc();
}
})
.block();
} catch (RuntimeException e) {
if (e.getCause() instanceof SslClosedEngineException && reinitializeClientOnSslClosedEngineException) {
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + ".ssl.closed.engine").inc();
this.client = createHttpClient();
return tryObjectGetInternal(uri, headers, false);
}
else if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else if (e.getCause() instanceof ReadTimeoutException || e.getCause() instanceof TimeoutException) {
throw new IOException(e.getCause());
} else {
throw new IOException(e);
}
}
}
public InputStream tryObjectGet(SdkHttpFullRequest request) throws IOException {
try {
return tryObjectGetInterAsStream(request.getUri(), request.headers());
} catch (Exception exception) {
logger.error("Error fetching object via get call: " + request.getUri());
throw new IOException(exception);
}
}
public ByteBuf tryObjectGetAsBuffer(SdkHttpFullRequest request) throws IOException {
try {
return tryObjectGetInternal(request.getUri(), request.headers());
} catch (Exception exception) {
logger.error("Error fetching object from buffer: " + request.getUri());
throw new IOException(exception);
}
}
@Override
public void close() throws IOException {
presigner.close();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/ApacheRequestClient.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/ApacheRequestClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3.reader.client;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.FORBIDDEN_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.ISE_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.NOT_FOUND_EXCEPTION;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.OBJECT_FETCH_ERROR_KEY;
import static com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler.UNAVAILABLE_EXCEPTION;
import com.pinterest.memq.commons.storage.s3.S3Exception;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.buffer.PooledByteBufAllocator;
import com.codahale.metrics.MetricRegistry;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.commons.lang3.NotImplementedException;
import org.apache.http.HttpResponse;
import org.apache.http.client.ServiceUnavailableRetryStrategy;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.protocol.HttpContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.http.SdkHttpFullRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest;
import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.time.Duration;
import java.util.Properties;
public class ApacheRequestClient implements RequestClient {
private static final int MAX_ATTEMPTS = 5;
private static final Logger logger = LoggerFactory.getLogger(ApacheRequestClient.class);
public static final String DEFAULT_MAX_CONNECTIONS_PER_ROUTE = "defaultMaxConnectionsPerRoute";
public static final String MAX_CONNECTIONS = "maxConnections";
private final S3Presigner presigner;
private final MetricRegistry metricRegistry;
private CloseableHttpClient client;
public ApacheRequestClient(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
this.presigner = S3Presigner.builder().build();
}
@Override
public void initialize(Properties properties) {
PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
cm.setDefaultMaxPerRoute(Integer.parseInt(properties.getProperty(DEFAULT_MAX_CONNECTIONS_PER_ROUTE, "100")));
cm.setMaxTotal(Integer.parseInt(properties.getProperty(MAX_CONNECTIONS, "500")));
client = HttpClients.custom()
.setServiceUnavailableRetryStrategy(new ObjectGetRetryStrategy())
.setConnectionManager(cm)
.build();
}
@Override
public ByteBuf tryObjectGetAsBuffer(GetObjectRequest request) throws IOException {
ByteBufOutputStream bos = new ByteBufOutputStream(PooledByteBufAllocator.DEFAULT.buffer());
IOUtils.copy(tryObjectGet(request), bos);
return bos.buffer();
}
@Override
public InputStream tryObjectGet(GetObjectRequest request) throws IOException {
PresignedGetObjectRequest presignGetObject = presigner.presignGetObject(GetObjectPresignRequest
.builder()
.getObjectRequest(request)
.signatureDuration(Duration.ofMinutes(60)).build());
URL url = presignGetObject.url();
HttpGet actualRequest = new HttpGet(url.toString());
presignGetObject.signedHeaders().forEach((n, v) -> v.forEach(vv -> actualRequest.addHeader(n, vv)));
CloseableHttpResponse resp = client.execute(actualRequest);
try {
int code = resp.getStatusLine().getStatusCode();
switch (code) {
case 200:
case 206:
return resp.getEntity().getContent();
case 404:
throw NOT_FOUND_EXCEPTION;
case 403:
throw FORBIDDEN_EXCEPTION;
case 500:
throw ISE_EXCEPTION;
case 503:
throw UNAVAILABLE_EXCEPTION;
default:
throw new S3Exception(code) {
private static final long serialVersionUID = 1L;
};
}
} catch (IOException e) {
if (e instanceof S3Exception) {
int code = ((S3Exception) e).getErrorCode();
metricRegistry.counter(OBJECT_FETCH_ERROR_KEY + "." + code).inc();
}
logger.error("Failed to get object " + request.bucket() + "/" + request.key(), e);
resp.close();
throw e;
}
}
@Override
public void close() throws IOException {
try {
presigner.close();
} finally {
client.close();
}
}
private static class ObjectGetRetryStrategy implements ServiceUnavailableRetryStrategy {
@Override
public boolean retryRequest(HttpResponse httpResponse, int i, HttpContext httpContext) {
if (i >= MAX_ATTEMPTS) {
logger.warn("Failed to get response from s3 after " + i + " attempts: [" + httpResponse.getStatusLine().getStatusCode() + "] " + httpResponse.getStatusLine().getReasonPhrase());
return false;
}
switch (httpResponse.getStatusLine().getStatusCode()) {
case 500:
case 503:
return true;
default:
return false;
}
}
@Override
public long getRetryInterval() {
return 100;
}
}
@Override
public InputStream tryObjectGet(SdkHttpFullRequest request) throws IOException {
throw new NotImplementedException();
}
@Override
public ByteBuf tryObjectGetAsBuffer(SdkHttpFullRequest request) throws IOException {
throw new NotImplementedException();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/RequestClient.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/reader/client/RequestClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3.reader.client;
import software.amazon.awssdk.http.SdkHttpFullRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import io.netty.buffer.ByteBuf;
public interface RequestClient extends Closeable {
void initialize(Properties properties);
InputStream tryObjectGet(SdkHttpFullRequest request) throws IOException;
ByteBuf tryObjectGetAsBuffer(SdkHttpFullRequest request) throws IOException;
InputStream tryObjectGet(GetObjectRequest request) throws IOException;
ByteBuf tryObjectGetAsBuffer(GetObjectRequest request) throws IOException;
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/fs/FileSystemStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/fs/FileSystemStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.fs;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.commons.io.input.BoundedInputStream;
import org.apache.commons.io.input.RandomAccessFileInputStream;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons2.DataNotFoundException;
import com.pinterest.memq.client.commons2.TopicNotFoundException;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.storage.ReadBrokerStorageHandler;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerName;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.commons.storage.s3.CustomS3Async2StorageHandler;
import com.pinterest.memq.commons.storage.s3.KafkaNotificationSink;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.util.ReferenceCounted;
/**
* This StorageHandler is used for local filesystem based PubSub.
*/
@StorageHandlerName(name = "filesystem")
@SuppressWarnings("unused")
public class FileSystemStorageHandler extends ReadBrokerStorageHandler {
public static final String OPTIMIZATION_SENDFILE = "optimization.sendfile";
public static final String NOTIFICATIONS_DISABLE = "disableNotifications";
private static final String ROOT_DIRS = "rootDir";
public static final String STORAGE_DIRS = "storageDirs";
private static final String HOSTNAME = MiscUtils.getHostname();
public static final String PATH = "path";
public static final String TOPIC = "topic";
public static final String HEADER_SIZE = "headerSize";
public static final String NUMBER_OF_MESSAGES_IN_BATCH = "numBatchMessages";
private static final int LAST_ATTEMPT_TIMEOUT = 60_000;
private Logger logger = Logger.getLogger(FileSystemStorageHandler.class.getName());
private String[] storageDirs;
private KafkaNotificationSink notificationSink;
private String topic;
private MetricRegistry registry;
private boolean dryrun;
private boolean disableNotifications;
private Timer notificationPublishingTimer;
private ExecutorService requestExecutor;
private ScheduledExecutorService executionTimer;
private int retryTimeoutMillis;
private int maxAttempts;
private Counter timeoutExceptionCounter;
private List<String> storageDirList;
private boolean useSendFileOptimization;
private Timer persistTimer;
@Override
public void initWriter(Properties properties,
String topic,
MetricRegistry registry) throws Exception {
this.logger = Logger.getLogger(FileSystemStorageHandler.class.getName() + "-" + topic);
this.topic = topic;
this.registry = registry;
this.dryrun = Boolean.parseBoolean(properties.getProperty("dryrun", "false"));
this.disableNotifications = Boolean
.parseBoolean(properties.getProperty(NOTIFICATIONS_DISABLE, "true"));
if (!disableNotifications) {
this.notificationSink = new KafkaNotificationSink();
this.notificationSink.init(properties);
this.notificationPublishingTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.notification.publish.latency");
}
this.persistTimer = MiscUtils.oneMinuteWindowTimer(registry, "storage.persist.latency");
this.timeoutExceptionCounter = registry.counter("output.timeout.exceptions");
this.requestExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.executionTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory());
this.useSendFileOptimization = Boolean
.parseBoolean(properties.getProperty(OPTIMIZATION_SENDFILE, "false"));
initDataDirs(properties);
this.retryTimeoutMillis = Integer.parseInt(properties.getProperty("retryTimeoutMillis", "500"));
this.maxAttempts = storageDirs.length;
for (String dir : storageDirs) {
Files.createDirectories(Paths.get(dir, topic));
}
setLocalRead(true);
}
private void initDataDirs(Properties props) {
if (props.containsKey(ROOT_DIRS)) {
this.storageDirs = props.getProperty(ROOT_DIRS).split(",");
}
// added for backwards compatibility reasons
if (props.containsKey(STORAGE_DIRS)) {
this.storageDirs = props.getProperty(STORAGE_DIRS).split(",");
}
storageDirList = Arrays.asList(this.storageDirs);
}
@Override
public boolean reconfigure(Properties outputHandlerConfig) {
return super.reconfigure(outputHandlerConfig);
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
Message firstMessage = messages.get(0);
String relativeFileName = createFileName(topic, firstMessage.getClientRequestId(),
firstMessage.getServerRequestId());
List<ByteBuf> buffers = CustomS3Async2StorageHandler.messageToBufferList(messages);
ByteBuf batchHeader = StorageHandler.getBatchHeadersAsByteArray(messages);
CompositeByteBuf content = CustomS3Async2StorageHandler
.messageAndHeaderToCompositeBuffer(buffers, batchHeader);
Context persistTime = persistTimer.time();
String writePath = speculativeUpload(sizeInBytes, relativeFileName, content);
persistTime.stop();
buffers.forEach(ReferenceCounted::release);
content.release();
batchHeader.release();
if (!disableNotifications) {
final JsonObject payload = buildNotification(topic, writePath,
sizeInBytes + batchHeader.readableBytes(), messages.size(), batchHeader.capacity());
Timer.Context publishTime = notificationPublishingTimer.time();
try {
notificationSink.notify(payload, 0);
logger.fine(() -> "Notifying:" + payload);
} catch (Exception e) {
throw new WriteFailedException(e);
} finally {
publishTime.stop();
}
}
}
private String speculativeUpload(int sizeInBytes,
String baseFileName,
CompositeByteBuf content) throws WriteFailedException {
int attempt = 0;
Map<String, Future<String>> futureMap = new HashMap<>();
Map<String, CompletableFuture<String>> taskMap = new HashMap<>();
final int currentMaxAttempts = maxAttempts;
final int currentRetryTimeoutMs = retryTimeoutMillis;
String result = null;
boolean hasSucceeded = false;
if (maxAttempts == 1) {
// optimize for single write path and don't use executors
final int localAttemptId = attempt;
CompletableFuture<String> task = new CompletableFuture<>();
String key = new StringBuilder().append(topic).append("/").append(HOSTNAME)
.append("/").append(baseFileName).append("_").append(localAttemptId).toString();
try {
return writeFileTask(sizeInBytes, content, task, localAttemptId, key).call();
} catch (Exception e) {
throw new WriteFailedException(e);
}
} else {
while (attempt < maxAttempts) {
final int timeout = attempt == currentMaxAttempts - 1 ? LAST_ATTEMPT_TIMEOUT
: currentRetryTimeoutMs;
CompletableFuture<String> task = new CompletableFuture<>();
final int localAttemptId = attempt;
// add attempt number as a suffix
final String key = new StringBuilder().append(topic).append("/").append(HOSTNAME)
.append("/").append(baseFileName).append("_").append(localAttemptId).toString();
Callable<String> uploadAttempt = writeFileTask(sizeInBytes, content, task, localAttemptId,
key);
Future<String> future = requestExecutor.submit(uploadAttempt);
futureMap.put(key, future);
taskMap.put(key, task);
CompletableFuture<String> resultFuture = anyUploadResultOrTimeout(taskMap.values(),
Duration.ofMillis(timeout));
try {
result = resultFuture.get();
registry.counter("storage.fs.succeeded").inc();
hasSucceeded = true;
} catch (ExecutionException ee) {
if (ee.getCause() instanceof TimeoutException) {
timeoutExceptionCounter.inc();
} else {
logger.log(Level.SEVERE, "Request failed", ee);
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Request failed", e);
}
attempt++;
}
for (Map.Entry<String, Future<String>> entry : futureMap.entrySet()) {
if (result != null && entry.getKey().endsWith(result)) {
continue;
}
entry.getValue().cancel(true);
}
if (result == null) {
throw new WriteFailedException("All upload attempts failed");
} else if (!hasSucceeded) {
throw new WriteFailedException("Upload failed due to error out: " + result);
} else {
registry.counter("storage.fs.attempt." + attempt).inc();
return result;
}
}
}
public Callable<String> writeFileTask(int sizeInBytes,
CompositeByteBuf content,
CompletableFuture<String> task,
final int attemptId,
final String key) {
return new Callable<String>() {
@Override
public String call() throws Exception {
try {
File fileToWrite = new File(storageDirs[attemptId], key);
fileToWrite.getParentFile().mkdirs();
writeViaChannel(sizeInBytes, content, fileToWrite);
// writeViaOutputstream(content, fileToWrite);
String ur = fileToWrite.getAbsolutePath();
task.complete(ur);
return ur;
} catch (Exception e) {
task.completeExceptionally(e);
throw e;
}
}
private void writeViaOutputstream(CompositeByteBuf content,
File fileToWrite) throws WriteFailedException {
try (OutputStream os = new BufferedOutputStream(new FileOutputStream(fileToWrite))) {
if (!dryrun) {
byte[] buffer = new byte[content.readableBytes()];
content.readBytes(buffer);
os.write(buffer);
// content.readBytes(os, content.readableBytes());
}
os.close();
FileChannel fileChannel = null;
} catch (Exception e) {
throw new WriteFailedException(e);
}
}
private void writeViaChannel(int sizeInBytes,
CompositeByteBuf content,
File fileToWrite) throws FileNotFoundException, IOException {
RandomAccessFile raf = new RandomAccessFile(fileToWrite, "rw");
FileChannel channel = raf.getChannel();
int readableBytes = content.readableBytes();
int bytesRead = 0;
while (bytesRead < readableBytes) {
bytesRead += content.readBytes(channel, 0, readableBytes - bytesRead);
}
channel.force(true);
channel.close();
raf.close();
}
};
}
public static String createFileName(String topic, long clientRequestId, long serverRequestId) {
StringBuilder sb = new StringBuilder();
sb.append(clientRequestId).append("_").append(serverRequestId);
return sb.toString();
}
private JsonObject buildNotification(String topic,
String path,
int objectSize,
int numberOfMessages,
int batchHeaderLength) {
JsonObject payload = new JsonObject();
payload.addProperty(PATH, path);
payload.addProperty(SIZE, objectSize);
payload.addProperty(TOPIC, topic);
payload.addProperty(HEADER_SIZE, batchHeaderLength);
payload.addProperty(NUMBER_OF_MESSAGES_IN_BATCH, numberOfMessages);
return payload;
}
@Override
public String getReadUrl() {
return null;
}
@Override
public void closeWriter() {
super.closeWriter();
}
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
super.initReader(properties, registry);
useSendFileOptimization = Boolean
.parseBoolean(properties.getProperty(OPTIMIZATION_SENDFILE, "false"));
initDataDirs(properties);
}
@Override
public BatchData fetchBatchStreamForNotificationBuf(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
File file = validateAndGetReadPath(nextNotificationToProcess);
int length = (int) file.length();
if (useSendFileOptimization) {
logger.fine(
() -> "Sendfile Read:" + length + " into buffer for path:" + file.getAbsolutePath());
return new BatchData(length, file);
} else {
FileInputStream stream = new FileInputStream(file);
ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer(length);
int writeBytes = buffer.writeBytes(stream, length);
logger.fine(() -> "Read:" + writeBytes + " into buffer for path:" + file.getAbsolutePath());
return new BatchData(length, buffer);
}
}
@Override
public InputStream fetchBatchStreamForNotification(JsonObject objectNotification) throws IOException,
DataNotFoundException {
if (isLocalRead()) {
File filePath = validateAndGetReadPath(objectNotification);
return new FileInputStream(filePath);
} else {
try {
long ts = System.currentTimeMillis();
BatchData batch = readBatch(objectNotification.get(TOPIC).getAsString(),
objectNotification);
// release buffer on close
logger.fine(
() -> "Received:" + batch.getLength() + " Time:" + (System.currentTimeMillis() - ts));
return new ByteBufInputStream(batch.getDataAsBuf(), true);
} catch (TopicNotFoundException e) {
throw new IOException(e);
} catch (DataNotFoundException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
}
}
/**
* Validate read path in order to prevent random file reads from the disk.
*
* @param objectNotification
* @return
* @throws IOException
* @throws DataNotFoundException
*/
protected File validateAndGetReadPath(JsonObject objectNotification) throws IOException,
DataNotFoundException {
String filePath = objectNotification.get(PATH).getAsString();
String topic = objectNotification.get(TOPIC).getAsString();
if (!isValidReadRequest(topic, filePath)) {
throw new IOException("Invalid read path:" + filePath);
}
File file = new File(filePath);
if (!file.exists()) {
throw new DataNotFoundException(filePath + " does not exist");
}
return file;
}
@Override
public BatchData fetchHeaderForBatchBuf(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
BatchHeader header = fetchHeaderForBatch(nextNotificationToProcess);
ByteBuf dataBuf = header.writeHeaderToByteBuf(PooledByteBufAllocator.DEFAULT.buffer());
return new BatchData(dataBuf.readableBytes(), dataBuf);
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject objectNotification) throws IOException,
DataNotFoundException {
if (isLocalRead()) {
try (DataInputStream dis = new DataInputStream(
fetchBatchStreamForNotification(objectNotification))) {
return new BatchHeader(dis);
}
} else {
BatchData batch;
try {
batch = readBatchHeader(objectNotification.get(TOPIC).getAsString(), objectNotification);
} catch (Exception e) {
throw new IOException(e);
}
// release buffer on close
DataInputStream stream = new DataInputStream(
new ByteBufInputStream(batch.getDataAsBuf(), true));
BatchHeader batchHeader = new BatchHeader(stream);
stream.close();
return batchHeader;
}
}
@Override
public BatchData fetchMessageAtIndexBuf(JsonObject objectNotification,
BatchHeader.IndexEntry index) throws IOException,
DataNotFoundException {
DataInputStream stream = fetchMessageAtIndex(objectNotification, index);
ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer();
buffer.writeBytes(stream, index.getSize());
return new BatchData(index.getSize(), buffer);
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
BatchHeader.IndexEntry index) throws IOException,
DataNotFoundException {
if (isLocalRead()) {
File file = validateAndGetReadPath(objectNotification);
RandomAccessFile raf = new RandomAccessFile(file, "r");
raf.seek(index.getOffset());
RandomAccessFileInputStream is = new RandomAccessFileInputStream(raf, true);
return new DataInputStream(new BoundedInputStream(is, index.getSize()));
} else {
BatchData batch;
try {
logger.fine(
() -> "Making index message fetch request:" + objectNotification + " index:" + index);
batch = readBatchAtIndex(objectNotification.get(TOPIC).getAsString(), objectNotification,
index);
} catch (Exception e) {
throw new IOException(e);
}
return new DataInputStream(new ByteBufInputStream(batch.getDataAsBuf(), true));
}
}
/**
* A valid read request must start with storageDir/topic
*
* @param topic
* @param filePath
* @return
*/
protected boolean isValidReadRequest(String topic, String filePath) throws IOException {
String filePathCanonical = new File(filePath).getCanonicalPath();
return storageDirList.stream().anyMatch(storageDir -> filePathCanonical.startsWith(storageDir));
}
public CompletableFuture<String> anyUploadResultOrTimeout(Collection<CompletableFuture<String>> tasks,
Duration duration) {
final CompletableFuture<String> promise = new CompletableFuture<>();
executionTimer.schedule(() -> {
final TimeoutException ex = new TimeoutException(
"Timeout after " + duration.toMillis() + " milliseconds");
return promise.completeExceptionally(ex);
}, duration.toMillis(), TimeUnit.MILLISECONDS);
CompletableFuture<String> anyUploadResultFuture = CompletableFuture
.anyOf(tasks.toArray(new CompletableFuture[0])).thenApply(o -> (String) o);
return anyUploadResultFuture.applyToEither(promise, Function.identity());
}
@Override
public void closeReader() {
super.closeReader();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/ConsumerConfigs.java | memq-client/src/main/java/com/pinterest/memq/client/commons/ConsumerConfigs.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class ConsumerConfigs extends CommonConfigs {
public static final String CLIENT_ID = "clientId";
public static final String USE_STREAMING_ITERATOR = "useStreamingIterator";
public static final String NOTIFICATION_SOURCE_TYPE_KEY = "notificationSourceType";
public static final String NOTIFICATION_SOURCE_PROPS_KEY = "notificationSourceProps";
public static final String NOTIFICATION_SOURCE_PROPS_PREFIX_KEY = "notification.";
public static final String STORAGE_PROPS_PREFIX_KEY = "storage.";
public static final String BUFFER_TO_FILE_CONFIG_KEY = "bufferToFile";
public static final String USE_DIRECT_BUFFER_KEY = "directBuffer";
public static final String BUFFER_FILES_DIRECTORY_KEY = "bufferFilename";
public static final String KEY_DESERIALIZER_CLASS_KEY = "key.deserializerclass";
public static final String VALUE_DESERIALIZER_CLASS_KEY = "value.deserializerclass";
public static final String KEY_DESERIALIZER_CLASS_CONFIGS_KEY = "key.deserializerclass.configs";
public static final String VALUE_DESERIALIZER_CLASS_CONFIGS_KEY = "value.deserializerclass.configs";
public static final String AUTO_COMMIT_PER_POLL_KEY = "autoCommitPerPoll";
public static final String DRY_RUN_KEY = "dryRun";
public static final String DIRECT_CONSUMER = "directConsumer";
public static final String GROUP_ID = "group.id";
public static final String TOPIC_INTERNAL_PROP = "topic";
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/SimpleMessageId.java | memq-client/src/main/java/com/pinterest/memq/client/commons/SimpleMessageId.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.nio.ByteBuffer;
import com.pinterest.memq.commons.MessageId;
public class SimpleMessageId extends MessageId {
public SimpleMessageId(long id) {
super(ByteBuffer.allocate(8).putLong(id).array());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/MemqLogMessageIterator.java | memq-client/src/main/java/com/pinterest/memq/client/commons/MemqLogMessageIterator.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Consumer;
import java.util.logging.Logger;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.audit.Auditor;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.CloseableIterator;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.MessageId;
import com.pinterest.memq.core.utils.MemqUtils;
public class MemqLogMessageIterator<K, V> implements CloseableIterator<MemqLogMessage<K, V>> {
private static final Logger logger = Logger
.getLogger(MemqLogMessageIterator.class.getCanonicalName());
public static final String METRICS_PREFIX = "memqConsumer";
public static final String MESSAGES_PROCESSED_COUNTER_KEY = METRICS_PREFIX
+ ".messagesProcessedCounter";
public static final String BYTES_PROCESSED_METER_KEY = METRICS_PREFIX + ".bytesProcessedCounter";
protected DataInputStream uncompressedBatchInputStream;
protected Deserializer<V> valueDeserializer;
protected Deserializer<K> headerDeserializer;
protected int messagesToRead;
protected MetricRegistry metricRegistry;
protected JsonObject currNotificationObj;
protected int currentMessageOffset;
protected DataInputStream stream;
private int notificationPartitionId;
private long notificationPartitionOffset;
private long notificationReadTimestamp;
private int objectSize;
private Auditor auditor;
private String cluster;
private String topic;
private MemqMessageHeader header;
private byte[] messageIdHash;
private int auditedMessageCount;
private BatchHeader batchHeader = null;
private String clientId;
protected MemqLogMessageIterator() {
}
public MemqLogMessageIterator(String cluster,
String clientId,
DataInputStream stream,
JsonObject currNotificationObj,
Deserializer<K> headerDeserializer,
Deserializer<V> valueDeserializer,
MetricRegistry metricRegistry,
boolean skipHeaderRead,
Auditor auditor) throws IOException {
this.cluster = cluster;
this.clientId = clientId;
this.stream = stream;
this.currNotificationObj = currNotificationObj;
this.auditor = auditor;
try {
this.topic = currNotificationObj.get(MemqLogMessage.INTERNAL_FIELD_TOPIC).getAsString();
this.objectSize = currNotificationObj.get(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE).getAsInt();
this.notificationPartitionId = currNotificationObj
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID).getAsInt();
this.notificationPartitionOffset = currNotificationObj
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET).getAsLong();
this.notificationReadTimestamp = currNotificationObj
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP).getAsLong();
} catch (Exception e) {
}
this.headerDeserializer = headerDeserializer;
this.valueDeserializer = valueDeserializer;
this.metricRegistry = metricRegistry;
if (!skipHeaderRead) {
batchHeader = new BatchHeader(stream);
}
readHeaderAndLoadBatch();
}
@Override
public boolean hasNext() {
// no more notifications
try {
if (stream.available() <= 0) {
// last batch remaining to read
return messagesToRead > 0;
}
} catch (IOException e) {
throw new RuntimeException(e);
}
// more notifications or object not fully read
return true;
}
@Override
public MemqLogMessage<K, V> next() {
if (messagesToRead > 0) {
// still more MemqLogMessages in this batch
try {
return getMemqLogMessage();
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
// no more MemqLogMessages in this batch
try {
if (stream.available() > 0) {
// there are more batches, process new batch
if (readHeaderAndLoadBatch()) {
return next();
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
throw new RuntimeException(new DataCorruptionException("No next"));
}
/**
* Reads the next batch's header and returns whether there are bytes to read
*
* @return true if there are batch bytes to read, false otherwise
*/
protected boolean readHeaderAndLoadBatch() {
try {
// since we are about to read a brand new batch therefore we should reset the
// messageIdHash
messageIdHash = null;
auditedMessageCount = 0;
header = readHeader();
logger.fine(() -> "Message header:" + header);
byte[] batch = new byte[header.getMessageLength()];
stream.read(batch);
if (!CommonUtils.crcChecksumMatches(batch, header.getCrc())) {
// CRC checksum mismatch
throw new RuntimeException(new DataCorruptionException("CRC checksum mismatch"));
}
if (uncompressedBatchInputStream != null) {
uncompressedBatchInputStream.close();
}
// using byte array here rather than ByteBuf since the getMemqLogMessage will iterate through one message at a time,
// which would be in the range of tens of KBs, unless the message itself has a extremely high compression ratio
uncompressedBatchInputStream = CommonUtils.getUncompressedInputStream(header.getCompression(),
new ByteArrayInputStream(batch));
} catch (Exception e) {
throw new RuntimeException(e);
}
return messagesToRead > 0;
}
protected MemqMessageHeader readHeader() throws IOException {
MemqMessageHeader header = new MemqMessageHeader(stream);
if (header.getMessageLength() > objectSize) {
logger.severe("BatchLength (" + header.getMessageLength() + ") is larger than objectSize ("
+ objectSize + ")");
throw new IOException(
"Invalid batchLength found:" + header.getMessageLength() + " vs:" + objectSize);
} else if (header.getMessageLength() < 0) {
logger.severe("BatchLength (" + header.getMessageLength() + ") is less than zero");
throw new IOException("Invalid batchLength found:" + header.getMessageLength());
}
messagesToRead = header.getLogmessageCount();
return header;
}
/**
* This method allows skipping to last message of a Batch efficiently. <br>
* Using this method allows clients to skip decompression of one message at the
* time and simply scroll to the last message. <br>
* <br>
* This method skips over all Messages but the last one and then reads the last
* message till (N-1)th message leaving the last message to be accessed via the
* next() method.
*
* @throws IOException
*/
public void skipToLastLogMessage() throws IOException {
currentMessageOffset = 0;
if (stream == null) {
throw new IOException("Stream is null can't skip to last");
}
while (stream.available() > 0) {
readHeaderAndLoadBatch();
}
while (messagesToRead > 1) {
getMemqLogMessage();
}
}
/**
* Reads the uncompressed input stream and returns a MemqLogMessage
*
* @return a MemqLogMessage
* @throws IOException
*/
protected MemqLogMessage<K, V> getMemqLogMessage() throws IOException {
short logMessageInternalFieldsLength = uncompressedBatchInputStream.readShort();
long writeTimestamp = 0;
MessageId messageId = null;
Map<String, byte[]> headers = null;
if (logMessageInternalFieldsLength > 0) {
writeTimestamp = uncompressedBatchInputStream.readLong();
// messageId
int messageIdLength = uncompressedBatchInputStream.read();
if (messageIdLength > 0) {
byte[] messageIdAry = new byte[messageIdLength];
uncompressedBatchInputStream.readFully(messageIdAry);
messageId = new MessageId(messageIdAry);
messageIdHash = MemqUtils.calculateMessageIdHash(messageIdHash, messageId.toByteArray());
auditedMessageCount++;
}
// headers
short headerLength = uncompressedBatchInputStream.readShort();
if (headerLength > 0) {
headers = deserializeHeaders(headerLength, uncompressedBatchInputStream);
}
// ###################################################
// do something with internal headers here in future
// ###################################################
}
int keyLength = uncompressedBatchInputStream.readInt();
byte[] keyBytes = null;
if (keyLength > 0) {
keyBytes = new byte[keyLength];
uncompressedBatchInputStream.readFully(keyBytes);
}
int logMessageBytesToRead = uncompressedBatchInputStream.readInt();
byte[] logMessageBytes = new byte[logMessageBytesToRead];
uncompressedBatchInputStream.readFully(logMessageBytes);
messagesToRead--;
if (messagesToRead == 0) {
tryAndSendAuditMessage();
}
metricRegistry.counter(MESSAGES_PROCESSED_COUNTER_KEY).inc();
metricRegistry.meter(BYTES_PROCESSED_METER_KEY).mark(logMessageBytesToRead);
MemqLogMessage<K, V> logMessage = new MemqLogMessage<>(messageId, headers,
headerDeserializer.deserialize(keyBytes), valueDeserializer.deserialize(logMessageBytes), messagesToRead == 0);
populateInternalFields(writeTimestamp, logMessage);
currentMessageOffset++;
return logMessage;
}
private void tryAndSendAuditMessage() throws IOException {
// send audit message if the header and auditor are present
// note that this logic basically represents that we send the message hash after
// we have read all LogMessages from a given Message within a batch
if (auditor != null && messageIdHash != null) {
if (header != null) {
auditor.auditMessage(cluster.getBytes(MemqUtils.CHARSET), topic.getBytes(MemqUtils.CHARSET),
header.getProducerAddress(), header.getProducerEpoch(), header.getProducerRequestId(),
messageIdHash, auditedMessageCount, false, clientId);
} else {
logger.warning("Header is null, we can't send audit");
}
}
}
private void populateInternalFields(long writeTimestamp, MemqLogMessage<K, V> logMessage) {
logMessage.setWriteTimestamp(writeTimestamp);
logMessage.setMessageOffsetInBatch(currentMessageOffset);
try {
logMessage.setNotificationPartitionId(notificationPartitionId);
logMessage.setNotificationPartitionOffset(notificationPartitionOffset);
logMessage.setNotificationReadTimestamp(notificationReadTimestamp);
} catch (Exception e) {
}
}
protected Map<String, byte[]> deserializeHeaders(short headerLength,
DataInputStream input) throws IOException {
Map<String, byte[]> map = new HashMap<>();
while (headerLength > 0) {
short keyLength = input.readShort();
byte[] key = new byte[keyLength];
input.readFully(key);
short valueLength = input.readShort();
byte[] value = new byte[valueLength];
input.readFully(value);
map.put(new String(key), value);
headerLength -= 4 + keyLength + valueLength;
}
return map;
}
@Override
public void remove() {
// do nothing
}
@Override
public void forEachRemaining(Consumer<? super MemqLogMessage<K, V>> action) {
while (hasNext()) {
action.accept(next());
}
}
@Override
public void close() throws IOException {
if (stream != null) {
stream.close();
stream = null;
}
if (uncompressedBatchInputStream != null) {
uncompressedBatchInputStream.close();
uncompressedBatchInputStream = null;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/CommonUtils.java | memq-client/src/main/java/com/pinterest/memq/client/commons/CommonUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.CRC32;
public class CommonUtils {
/**
* Validate whether CRC checksum for a batch matches the given headerCrc
*
* @param batch
* @param headerCrc
* @return
*/
public static boolean crcChecksumMatches(byte[] batch, int headerCrc) {
CRC32 crc = new CRC32();
crc.update(batch);
if ((int) crc.getValue() != headerCrc) {
return false;
}
return true;
}
/**
* Given a compression id and compressed InputStream, return an uncompressed
* DataInputStream
*
* @param compression the compression id
* @param original the compressed InputStream
* @return an uncompressed DataInputStream
* @throws IOException
* @throws UnknownCompressionException
*/
public static DataInputStream getUncompressedInputStream(byte compression,
InputStream original) throws IOException,
UnknownCompressionException {
if (compression == 0) {
return new DataInputStream(original);
}
for (Compression comp : Compression.values()) {
if (comp.id == compression) {
return new DataInputStream(comp.getCompressStream(original));
}
}
throw new UnknownCompressionException("Compression id " + compression + " is not supported");
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/ResponseHandler.java | memq-client/src/main/java/com/pinterest/memq/client/commons/ResponseHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.util.Map;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pinterest.memq.commons.protocol.ResponsePacket;
public class ResponseHandler {
private static final Logger logger = LoggerFactory.getLogger(ResponseHandler.class);
private Map<String, Consumer<ResponsePacket>> requestMap;
public ResponseHandler() {
}
public void handle(ResponsePacket responsePacket) throws Exception {
Consumer<ResponsePacket> consumer = requestMap
.remove(MemqCommonClient.makeResponseKey(responsePacket));
if (consumer != null) {
consumer.accept(responsePacket);
} else {
// no handler for response skipping
logger.error("No handler for request:" + responsePacket.getRequestType());
}
}
public void setRequestMap(Map<String, Consumer<ResponsePacket>> requestMap) {
this.requestMap = requestMap;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/CommonConfigs.java | memq-client/src/main/java/com/pinterest/memq/client/commons/CommonConfigs.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class CommonConfigs {
public static final String CLUSTER = "cluster";
public static final String SERVERSET_FILE = "serverset.file";
public static final String BOOTSTRAP_SERVERS = "bootstrap.servers";
public static final String CLIENT_LOCALITY = "client.locality";
public static final String AUDITOR_CONFIG_PREFIX = "auditor.";
public static final String AUDITOR_ENABLED = AUDITOR_CONFIG_PREFIX + "enabled";
public static final String AUDITOR_CLASS = AUDITOR_CONFIG_PREFIX + "class";
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/DataCorruptionException.java | memq-client/src/main/java/com/pinterest/memq/client/commons/DataCorruptionException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class DataCorruptionException extends Exception {
private static final long serialVersionUID = 1L;
public DataCorruptionException(String message) {
super(message);
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/MemqMessageHeader.java | memq-client/src/main/java/com/pinterest/memq/client/commons/MemqMessageHeader.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.zip.CRC32;
import com.pinterest.memq.client.producer.TaskRequest;
import com.pinterest.memq.client.producer2.Request;
import com.pinterest.memq.core.utils.MemqUtils;
import io.netty.buffer.ByteBuf;
public class MemqMessageHeader {
private short headerLength;
private short version;
private short additionalHeaderLength;
private int crc;
private byte compression;
private int logmessageCount;
private int messageLength;
private long producerEpoch;
private long producerRequestId;
private byte[] producerAddress;
private TaskRequest taskRequest = null;
private Request request = null;
public MemqMessageHeader(ByteBuf byteBuf) {
headerLength = byteBuf.readShort();
version = byteBuf.readShort();
additionalHeaderLength = byteBuf.readShort();
if (additionalHeaderLength > 0) {
producerAddress = new byte[byteBuf.readByte()];
byteBuf.readBytes(producerAddress);
producerEpoch = byteBuf.readLong();
producerRequestId = byteBuf.readLong();
}
crc = byteBuf.readInt();
compression = byteBuf.readByte();
logmessageCount = byteBuf.readInt();
messageLength = byteBuf.readInt();
}
public MemqMessageHeader(ByteBuffer buf) {
headerLength = buf.getShort();
version = buf.getShort();
additionalHeaderLength = buf.getShort();
if (additionalHeaderLength > 0) {
byte[] additionalInfo = new byte[additionalHeaderLength];
buf.get(additionalInfo);
ByteBuffer wrap = ByteBuffer.wrap(additionalInfo);
producerAddress = new byte[wrap.get()];
wrap.get(producerAddress);
producerEpoch = wrap.getLong();
producerRequestId = wrap.getLong();
}
crc = buf.getInt();
compression = buf.get();
logmessageCount = buf.getInt();
messageLength = buf.getInt();
}
public MemqMessageHeader(DataInputStream stream) throws IOException {
headerLength = stream.readShort();
version = stream.readShort();
additionalHeaderLength = stream.readShort();
if (additionalHeaderLength > 0) {
producerAddress = new byte[stream.readByte()];
stream.readFully(producerAddress);
producerEpoch = stream.readLong();
producerRequestId = stream.readLong();
}
crc = stream.readInt();
compression = stream.readByte();
logmessageCount = stream.readInt();
messageLength = stream.readInt();
}
/**
* @param taskRequest
*/
public MemqMessageHeader(TaskRequest taskRequest) {
this.taskRequest = taskRequest;
}
public MemqMessageHeader(Request request) {
this.request = request;
}
public static short getHeaderLength() {
return (short) (2 // header length encoding
+ 2 // version of the header
+ 2 // extra header content
+ MemqUtils.HOST_IPV4_ADDRESS.length + 17
// placeholder for any additional headers info to add
+ 4 // bytes for crc of the message body
+ 1 // compression scheme
+ 4 // for count of logmessages in the body
+ 4 // bytes for length of the message body
);
}
public void writeHeader(final ByteBuf buffer) {
boolean useTaskRequest = taskRequest != null;
CRC32 crc = new CRC32();
ByteBuf wrap = buffer.duplicate();
wrap.writerIndex(0);
wrap.writeShort(getHeaderLength()); // 2bytes
if (useTaskRequest) {
wrap.writeShort(taskRequest.getVersion()); // 2bytes
} else {
wrap.writeShort(request.getVersion()); // 2bytes
}
// add extra stuff
byte[] extraHeaderContent = getExtraHeaderContent();
wrap.writeShort((short) extraHeaderContent.length);// 2bytes
wrap.writeBytes(extraHeaderContent);
ByteBuf tmp = buffer.duplicate();
tmp.readerIndex(getHeaderLength());
ByteBuf slice = tmp.slice();
crc.update(slice.nioBuffer());
int checkSum = (int) crc.getValue();
// write crc checksum of the body
wrap.writeInt(checkSum);// 4bytes
// compression scheme encoding
if (useTaskRequest) {
wrap.writeByte(taskRequest.getCompression().id);// 1byte
wrap.writeInt(taskRequest.getLogmessageCount()); // 4bytes
} else {
wrap.writeByte(request.getCompression().id);// 1byte
wrap.writeInt(request.getMessageCount()); // 4bytes
}
// write the length of remaining bytes
int payloadLength = buffer.readableBytes() - getHeaderLength();
wrap.writeInt(payloadLength); // 4bytes
}
private byte[] getExtraHeaderContent() {
boolean useTaskRequest = taskRequest != null;
// add tracking info
byte[] hostAddress = MemqUtils.HOST_IPV4_ADDRESS;
ByteBuffer extrainfo = ByteBuffer.allocate(hostAddress.length + 1 + 8 + 8);
extrainfo.put((byte) hostAddress.length);
extrainfo.put(hostAddress);
if (useTaskRequest) {
// add producer epoch
extrainfo.putLong(taskRequest.getEpoch());
// add client request id
extrainfo.putLong(taskRequest.getId());
} else {
// add producer epoch
extrainfo.putLong(request.getEpoch());
// add client request id
extrainfo.putLong(request.getClientRequestId());
}
return extrainfo.array();
}
public short getVersion() {
return version;
}
public short getBytesToSkip() {
return additionalHeaderLength;
}
public int getCrc() {
return crc;
}
public byte getCompression() {
return compression;
}
public int getMessageLength() {
return messageLength;
}
public int getLogmessageCount() {
return logmessageCount;
}
public long getProducerEpoch() {
return producerEpoch;
}
public long getProducerRequestId() {
return producerRequestId;
}
public byte[] getProducerAddress() {
return producerAddress;
}
@Override
public String toString() {
return "BatchHeader [headerLength=" + headerLength + ", version=" + version + ", bytesToSkip="
+ additionalHeaderLength + ", additionalInfo=["
+ (producerAddress != null && producerAddress.length == 4
? MemqUtils.getStringFromByteAddress(producerAddress)
: "N/A")
+ "," + producerEpoch + ", " + producerRequestId + "], crc=" + crc + ", compression="
+ compression + ", logmessageCount=" + logmessageCount + ", batchLength=" + messageLength
+ "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/Compression.java | memq-client/src/main/java/com/pinterest/memq/client/commons/Compression.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import com.github.luben.zstd.RecyclingBufferPool;
import com.github.luben.zstd.ZstdInputStreamNoFinalizer;
import com.github.luben.zstd.ZstdOutputStreamNoFinalizer;
public enum Compression {
NONE(0, 0, is -> is, os -> os),
GZIP(1, 512,
GZIPInputStream::new,
outputStream -> new GZIPOutputStream(outputStream, true)
),
ZSTD(2, 0,
is -> new ZstdInputStreamNoFinalizer(is, RecyclingBufferPool.INSTANCE),
os -> new ZstdOutputStreamNoFinalizer(os, RecyclingBufferPool.INSTANCE)
);
public byte id;
public int minBufferSize;
private final CompressionWrapper<InputStream> inputStreamCompressionWrapper;
private final CompressionWrapper<OutputStream> outputStreamCompressionWrapper;
Compression(int id, int minBufferSize,
CompressionWrapper<InputStream> inputStreamCompressionWrapper,
CompressionWrapper<OutputStream> outputStreamCompressionWrapper) {
this.id = (byte) id;
this.minBufferSize = minBufferSize;
this.inputStreamCompressionWrapper = inputStreamCompressionWrapper;
this.outputStreamCompressionWrapper = outputStreamCompressionWrapper;
}
@FunctionalInterface
private interface CompressionWrapper<T> {
T getWrappedInstance(T t) throws IOException;
}
public InputStream getCompressStream(InputStream is) throws IOException {
return inputStreamCompressionWrapper.getWrappedInstance(is);
}
public OutputStream getDecompressStream(OutputStream os) throws IOException {
return outputStreamCompressionWrapper.getWrappedInstance(os);
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/Deserializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/Deserializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.util.Properties;
public interface Deserializer<T> {
public default void init(Properties props) {
}
public T deserialize(byte[] bytes);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/MemqNettyClientSideResponseHandler.java | memq-client/src/main/java/com/pinterest/memq/client/commons/MemqNettyClientSideResponseHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
public class MemqNettyClientSideResponseHandler extends ChannelInboundHandlerAdapter {
private static final Logger logger = LoggerFactory
.getLogger(MemqNettyClientSideResponseHandler.class);
private ResponseHandler responseHandler;
public MemqNettyClientSideResponseHandler(ResponseHandler responseHandler) {
this.responseHandler = responseHandler;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf buf = (ByteBuf) msg;
try {
ResponsePacket responsePacket = new ResponsePacket();
responsePacket.readFields(buf, RequestType.PROTOCOL_VERSION);
logger.debug("Response received {}", responsePacket);
if (responsePacket.getProtocolVersion() != RequestType.PROTOCOL_VERSION) {
// might not be able to handle this request.
// in future multiple protocol versions can / should be handled here
logger.debug("Server responded in protocol different than client request:{} vs {}",
responsePacket.getProtocolVersion(), RequestType.PROTOCOL_VERSION);
} else {
responseHandler.handle(responsePacket);
}
} catch (Exception e) {
logger.error("Failed to handle server responses", e);
throw e;
} finally {
buf.release();
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/UnknownCompressionException.java | memq-client/src/main/java/com/pinterest/memq/client/commons/UnknownCompressionException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class UnknownCompressionException extends Exception {
private static final long serialVersionUID = 1L;
public UnknownCompressionException(String message) {
super(message);
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/AuditorUtils.java | memq-client/src/main/java/com/pinterest/memq/client/commons/AuditorUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.IOException;
import java.util.Map.Entry;
import java.util.Properties;
public class AuditorUtils {
public static Properties extractAuditorConfig(Properties properties) throws IOException {
Properties auditProps = new Properties();
for (Entry<Object, Object> entry : properties.entrySet()) {
String key = entry.getKey().toString();
if (key.startsWith(CommonConfigs.AUDITOR_CONFIG_PREFIX)) {
auditProps.put(key.replace(CommonConfigs.AUDITOR_CONFIG_PREFIX, ""), entry.getValue());
}
}
return auditProps;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/MemqWriteResult.java | memq-client/src/main/java/com/pinterest/memq/client/commons/MemqWriteResult.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class MemqWriteResult {
private long clientRequestId;
private int writeLatency;
private int ackLatency;
private int bytesWritten;
public MemqWriteResult() {
}
public MemqWriteResult(long clientRequestId, int writeLatency, int ackLatency, int bytesWritten) {
super();
this.writeLatency = writeLatency;
this.ackLatency = ackLatency;
this.bytesWritten = bytesWritten;
}
public int getWriteLatency() {
return writeLatency;
}
public void setWriteLatency(int writeLatency) {
this.writeLatency = writeLatency;
}
public int getAckLatency() {
return ackLatency;
}
public void setAckLatency(int ackLatency) {
this.ackLatency = ackLatency;
}
public int getBytesWritten() {
return bytesWritten;
}
public void setBytesWritten(int bytesWritten) {
this.bytesWritten = bytesWritten;
}
public long getClientRequestId() {
return clientRequestId;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/MemqCommonClient.java | memq-client/src/main/java/com/pinterest/memq/client/commons/MemqCommonClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pinterest.memq.client.producer.http.DaemonThreadFactory;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import com.pinterest.memq.core.utils.MemqUtils;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.ssl.ClientAuth;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
/**
* Common Client layer to enable connection handling for Producer and Consumer
* without code duplication. This class defines logic for connection,
* reconnection etc. including locality awareness.
*
* This also auto attaches the common handlers regardless of specific handler
* types.
*/
public class MemqCommonClient {
private static final Logger logger = LoggerFactory.getLogger(MemqNettyProducer.class);
private EventLoopGroup group;
private volatile Channel channel;
private CompletableFuture<Channel> channelFuture;
private ChannelFuture connect;
private String locality;
private SSLConfig sslConfig;
private ResponseHandler responseHandler;
private Map<String, Consumer<ResponsePacket>> responseMap = new ConcurrentHashMap<>();
private Set<Broker> brokers;
public MemqCommonClient() {
this.responseHandler = new ResponseHandler();
responseHandler.setRequestMap(responseMap);
}
public MemqCommonClient(InetSocketAddress suppliedServer,
SSLConfig sslConfig) throws Exception {
this();
this.sslConfig = sslConfig;
doConnect(suppliedServer, 10, 2);
waitForConnectOrTimeout();
}
public MemqCommonClient(Set<Broker> brokers,
SSLConfig sslConfig) throws Exception {
this();
this.brokers = brokers;
this.sslConfig = sslConfig;
doConnect(null, 10, 2);
waitForConnectOrTimeout();
}
public MemqCommonClient(Channel channel) {
this();
this.channel = channel;
}
public MemqCommonClient(String cluster,
String serversetFile,
String locality,
SSLConfig sslConfig) throws Exception {
this();
InetSocketAddress suppliedServer = MemqNettyProducer.tryAndGetAZLocalServer(serversetFile,
locality);
this.sslConfig = sslConfig;
doConnect(suppliedServer, 10, 2);
waitForConnectOrTimeout();
}
private void doConnect(final List<InetSocketAddress> suppliedServer,
final int retryTimeSeconds) throws Exception {
Collections.shuffle(suppliedServer);
doConnect(suppliedServer.get(0), retryTimeSeconds, 1);
}
private void doConnect(final InetSocketAddress suppliedServer,
final int retryTimeSeconds,
int maxRetries) throws Exception {
if (maxRetries < 0) {
throw new Exception("Failed to connect, exhausted retries");
}
if (group == null || group.isShutdown()) {
this.group = new NioEventLoopGroup(new DaemonThreadFactory("MemqCommonClientNettyGroup"));
}
InetSocketAddress localServer = getLocalServer(suppliedServer);
Bootstrap clientBootstrap = new Bootstrap();
clientBootstrap.group(group);
clientBootstrap.channel(NioSocketChannel.class);
clientBootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000);
clientBootstrap.handler(new ClientChannelInitializer(sslConfig));
connect = clientBootstrap.connect(localServer);
channelFuture = new CompletableFuture<>();
logger.info("Attempting to connect to " + localServer);
connect.addListener((ChannelFuture f) -> {
if (!f.isSuccess()) {
// schedule reconnect
logger.warn("Failed to connect to " + localServer + " retry in " + retryTimeSeconds
+ "s, reason:" + f.cause().getMessage());
f.channel().eventLoop().schedule(() -> {
try {
doConnect(suppliedServer, retryTimeSeconds, maxRetries - 1);
} catch (Exception e) {
logger.error("Failed to connect during schedule", e);
throw new RuntimeException(e);
}
}, retryTimeSeconds, TimeUnit.SECONDS);
} else {
// set channel variable so request executors can use it
channel = f.channel();
channelFuture.complete(channel);
logger.info("Connected to " + localServer);
// listen for close and reconnect
channel.closeFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!isActive()) {
logger.warn("Disconnected from broker will attempt to reconnect now.");
Channel channel = future.channel();
channel.disconnect();
doConnect(suppliedServer, retryTimeSeconds, maxRetries - 1);
} else {
logger.info("Closing connection to broker");
channelFuture = null;
}
}
});
}
});
}
private InetSocketAddress getLocalServer(final InetSocketAddress suppliedServer) throws IOException {
InetSocketAddress localServer;
if (suppliedServer == null) {
List<InetSocketAddress> localServers = getLocalServers(brokers, locality);
Collections.shuffle(localServers);
localServer = localServers.get(0);
} else {
localServer = suppliedServer;
}
return localServer;
}
private void waitForConnectOrTimeout() throws InterruptedException {
for (int i = 0; i < 10; i++) {
if (connect == null) {
Thread.sleep(1000);
} else {
try {
connect.sync().await(5, TimeUnit.SECONDS);
break;
} catch (Exception e) {
continue;
}
}
}
}
public Future<ResponsePacket> sendRequestPacketAndReturnResponseFuture(RequestPacket request,
boolean throwException) {
CompletableFuture<ResponsePacket> future = new CompletableFuture<>();
try {
sendRequestPacket(request, response -> {
if (response.getResponseCode() == ResponseCodes.OK) {
future.complete(response);
} else {
if (throwException) {
future.completeExceptionally(new Exception("Request failed with code:"
+ response.getResponseCode() + " and error:" + response.getErrorMessage()));
} else {
future.complete(response);
}
}
});
} catch (Exception e) {
logger.error("Failed to send request packet", e);
future.completeExceptionally(e);
}
return future;
}
protected void sendRequestPacket(RequestPacket request,
Consumer<ResponsePacket> responseConsumer) throws Exception {
ByteBuf buffer = PooledByteBufAllocator.DEFAULT
.buffer(request.getSize(RequestType.PROTOCOL_VERSION));
request.write(buffer, RequestType.PROTOCOL_VERSION);
if (channel == null) {
channel = channelFuture.get(10, TimeUnit.SECONDS);
}
if (responseConsumer != null) {
responseMap.put(makeResponseKey(request), responseConsumer);
}
channel.writeAndFlush(buffer);
}
public static String makeResponseKey(RequestPacket request) {
return request.getRequestType() + "_" + request.getClientRequestId();
}
public static String makeResponseKey(ResponsePacket request) {
return request.getRequestType() + "_" + request.getClientRequestId();
}
public boolean isActive() {
return channel != null;
}
public boolean isClosed() {
return group.isShutdown();
}
public void closeChannel() throws InterruptedException {
if (channel != null && channel.isOpen()) {
channel.flush();
channel.close().sync();
}
channel = null;
channelFuture = null;
}
public synchronized void close() throws IOException {
try {
if (group != null) {
group.shutdownGracefully().sync().get();
}
closeChannel();
} catch (InterruptedException | ExecutionException e) {
throw new IOException("Interrupted closing request", e);
}
}
public static List<InetSocketAddress> getLocalServers(Set<Broker> brokers, String locality) {
List<Broker> collect = brokers.stream().filter(b -> b.getLocality().equalsIgnoreCase(locality))
.collect(Collectors.toList());
if (collect.isEmpty()) {
collect = new ArrayList<>(brokers);
}
return collect.stream().map(ep -> {
return InetSocketAddress.createUnresolved(ep.getBrokerIP(), ep.getBrokerPort());
}).collect(Collectors.toList());
}
public boolean awaitConnect(int timeout, TimeUnit timeunit) throws InterruptedException {
return connect.sync().await(timeout, timeunit);
}
public static TopicMetadata getTopicMetadata(String cluster,
String serverset,
String topic,
int timeoutMillis) throws Exception {
MemqCommonClient client = new MemqCommonClient(cluster, serverset, "na", null);
client.awaitConnect(5, TimeUnit.SECONDS);
TopicMetadata topicMetadata = getTopicMetadata(client, topic, timeoutMillis);
client.close();
return topicMetadata;
}
public static TopicMetadata getTopicMetadata(String cluster,
Set<Broker> bootstrapServer,
String topic,
int timeoutMillis) throws Exception {
MemqCommonClient client = new MemqCommonClient(bootstrapServer, null);
client.awaitConnect(5, TimeUnit.SECONDS);
TopicMetadata topicMetadata = getTopicMetadata(client, topic, timeoutMillis);
client.close();
return topicMetadata;
}
public static TopicMetadata getTopicMetadata(MemqCommonClient client,
String topic,
int timeoutMillis) throws InterruptedException,
ExecutionException,
TimeoutException {
Future<ResponsePacket> response = client.sendRequestPacketAndReturnResponseFuture(
new RequestPacket(RequestType.PROTOCOL_VERSION, ThreadLocalRandom.current().nextLong(),
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket(topic)),
true);
ResponsePacket responsePacket = response.get(timeoutMillis, TimeUnit.MILLISECONDS);
TopicMetadataResponsePacket resp = ((TopicMetadataResponsePacket) responsePacket.getPacket());
return resp.getMetadata();
}
public final class ClientChannelInitializer extends ChannelInitializer<SocketChannel> {
private static final int FRAME_LENGTH_ENCODING_SIZE = 4;
private static final int MAX_FRAME_SIZE = 4 * 1024 * 1024;
private SSLConfig sslConfig;
public ClientChannelInitializer(SSLConfig sslConfig) {
this.sslConfig = sslConfig;
}
protected void initChannel(SocketChannel channel) throws Exception {
try {
ChannelPipeline pipeline = channel.pipeline();
if (sslConfig != null) {
KeyManagerFactory kmf = MemqUtils.extractKMFFromSSLConfig(sslConfig);
TrustManagerFactory tmf = MemqUtils.extractTMPFromSSLConfig(sslConfig);
SslContext ctx = SslContextBuilder.forClient().protocols(sslConfig.getProtocols())
.keyManager(kmf).clientAuth(ClientAuth.REQUIRE).trustManager(tmf).build();
pipeline.addLast(ctx.newHandler(channel.alloc()));
}
pipeline.addLast(new LengthFieldBasedFrameDecoder(ByteOrder.BIG_ENDIAN, MAX_FRAME_SIZE, 0,
FRAME_LENGTH_ENCODING_SIZE, 0, 0, false));
pipeline.addLast(new MemqNettyClientSideResponseHandler(responseHandler));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
}
public synchronized void reconnect(String topic) throws Exception {
TopicMetadata md = getTopicMetadata(this, topic, 10000);
this.close();
Set<Broker> brokers = md.getWriteBrokers();
List<InetSocketAddress> localServers = getLocalServers(brokers, locality);
doConnect(localServers, 10);
}
public static Set<Broker> getBootstrapBrokers(String bootstrapServers) {
Set<Broker> seedBrokers = Arrays.asList(bootstrapServers.split(",")).stream().map(e -> {
String[] parts = e.split(":");
return new Broker(parts[0], Short.parseShort(parts[1]), "n/a", "n/a", BrokerType.WRITE, new HashSet<>());
}).collect(Collectors.toSet());
return seedBrokers;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/ProducerConfigs.java | memq-client/src/main/java/com/pinterest/memq/client/commons/ProducerConfigs.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
public class ProducerConfigs extends CommonConfigs {
public static final String TOPIC_NAME = "topic.name";
public static final String VALUE_SERIALIZER = "value.serializer";
public static final String KEY_SERIALIZER = "key.serializer";
public static final String CLIENT_TYPE = "client.type";
public static final String REQUEST_ACKS_TIMEOUT_MS = "request.acks.timeout.ms";
public static final String REQUEST_ACKS_CHECKPOLLINTERVAL_MS = "request.acks.checkpollinterval.ms";
public static final String REQUEST_ACKS_DISABLE = "request.acks.disable";
public static final String REQUEST_COMPRESSION_TYPE = "request.compression.type";
public static final String REQUEST_MAX_PAYLOADBYTES = "request.max.payloadbytes";
public static final String REQUEST_MAX_INFLIGHTREQUESTS = "request.max.inflightrequests";
public static final String REQUEST_TIMEOUT = "request.timeout";
// Defaults
public static final String DEFAULT_REQUEST_ACKS_TIMEOUT_MS = "60000";
public static final String DEFAULT_ACK_CHECKPOLLINTERVAL_MS = "100";
public static final String DEFAULT_DISABLE_ACKS = "false";
public static final String DEFAULT_COMPRESSION_TYPE = Compression.ZSTD.name();
public static final String DEFAULT_MAX_PAYLOADBYTES = String.valueOf(1024 * 1024);
public static final String DEFAULT_MAX_INFLIGHT_REQUESTS = "30";
public static final String DEFAULT_LOCALITY = "none";
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ByteArrayDeserializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ByteArrayDeserializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.serde;
import com.pinterest.memq.client.commons.Deserializer;
public class ByteArrayDeserializer implements Deserializer<byte[]> {
@Override
public byte[] deserialize(byte[] bytes) {
return bytes;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/serde/Serializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/serde/Serializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.serde;
public interface Serializer<T> {
public byte[] serialize(T data);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ByteArraySerializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ByteArraySerializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.serde;
public class ByteArraySerializer implements Serializer<byte[]> {
@Override
public byte[] serialize(byte[] data) {
return data;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/serde/StringDeserializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/serde/StringDeserializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.serde;
import com.pinterest.memq.client.commons.Deserializer;
public class StringDeserializer implements Deserializer<String> {
@Override
public String deserialize(byte[] bytes) {
return new String(bytes);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ThriftDeserializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/serde/ThriftDeserializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.serde;
import java.util.Properties;
import org.apache.thrift.TBase;
import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import com.pinterest.memq.client.commons.Deserializer;
@SuppressWarnings("rawtypes")
public class ThriftDeserializer implements Deserializer<TBase> {
public static final String TBASE_OBJECT_CONFIG = "tBaseObject";
private TBase tbase;
@Override
public void init(Properties props) {
if (!props.containsKey(TBASE_OBJECT_CONFIG)) {
throw new RuntimeException("ThriftDeserializer must have TBASE_OBJECT_CONFIG");
}
tbase = (TBase) props.get(TBASE_OBJECT_CONFIG);
}
@Override
public TBase deserialize(byte[] bytes) {
try {
TDeserializer tDeserializer = new TDeserializer();
tDeserializer.deserialize(tbase, bytes);
return tbase;
} catch (TException e) {
throw new RuntimeException(e);
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/audit/Auditor.java | memq-client/src/main/java/com/pinterest/memq/client/commons/audit/Auditor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.audit;
import java.io.IOException;
import java.util.Properties;
public abstract class Auditor {
public Auditor() {
}
public abstract void init(Properties props) throws Exception;
public abstract void auditMessage(byte[] cluster,
byte[] topic,
byte[] hostAddress,
long epoch,
long id,
byte[] hash,
int count,
boolean isProducer,
String clientId) throws IOException;
public abstract void close();
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/audit/KafkaBackedAuditor.java | memq-client/src/main/java/com/pinterest/memq/client/commons/audit/KafkaBackedAuditor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.audit;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArraySerializer;
public class KafkaBackedAuditor extends Auditor {
private static final Map<String, KafkaProducer<byte[], byte[]>> producers = new ConcurrentHashMap<>();
private String auditTopic;
private KafkaProducer<byte[], byte[]> producer;
public void init(Properties props) throws IOException {
this.auditTopic = props.getProperty("topic");
String serverset = props.getProperty("serverset");
this.producer = getProducer(serverset, props);
}
protected static KafkaProducer<byte[], byte[]> getProducer(String serverset, Properties props) throws IOException {
if (!props.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
Files.readAllLines(new File(serverset).toPath()).get(0));
} else {
serverset = props.getProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
}
if (!props.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
throw new IOException("Missing bootstrap server");
}
props.put(ProducerConfig.ACKS_CONFIG, "-1");
props.put(ProducerConfig.LINGER_MS_CONFIG, "10");
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, String.valueOf(100 * 1024));
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
ByteArraySerializer.class.getCanonicalName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
ByteArraySerializer.class.getCanonicalName());
return producers.computeIfAbsent(serverset, s -> new KafkaProducer<>(props));
}
public void auditMessage(byte[] cluster,
byte[] topic,
byte[] hostAddress,
long epoch,
long id,
byte[] hash,
int count,
boolean isProducer,
String clientId) throws IOException {
ByteArrayOutputStream os = new ByteArrayOutputStream();
ByteArrayOutputStream keyOs = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(os);
DataOutputStream keyDos = new DataOutputStream(keyOs);
dos.writeShort(cluster.length);
dos.write(cluster);
dos.writeShort(topic.length);
dos.write(topic);
dos.writeShort(hostAddress.length);
dos.write(hostAddress);
dos.writeLong(epoch);
dos.writeLong(id);
keyDos.write(hostAddress);
keyDos.writeLong(epoch);
keyDos.writeLong(id);
dos.writeShort(hash.length);
dos.write(hash);
dos.writeInt(count);
dos.writeBoolean(isProducer);
producer.send(new ProducerRecord<>(auditTopic, keyOs.toByteArray(), os.toByteArray()));
}
public void close() {
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons/audit/KafkaBackedAuditAnalyzer.java | memq-client/src/main/java/com/pinterest/memq/client/commons/audit/KafkaBackedAuditAnalyzer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.audit;
import java.io.File;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Arrays;
import java.util.Base64;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Scanner;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import io.netty.util.Recycler;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.codahale.metrics.SlidingTimeWindowArrayReservoir;
import com.codahale.metrics.Timer;
import com.pinterest.memq.commons.mon.OpenTSDBClient;
import com.pinterest.memq.commons.mon.OpenTSDBReporter;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
public class KafkaBackedAuditAnalyzer {
public static class Payload {
private static final Recycler<Payload> RECYCLER = new Recycler<Payload>() {
@Override
protected Payload newObject(Handle<Payload> handle) {
return new Payload(handle);
}
};
byte[] hash;
long ts;
byte producer;
int count;
Recycler.Handle<Payload> handle;
private Payload(Recycler.Handle<Payload> handle) {
this.handle = handle;
}
public static Payload newInstance(byte[] hash, long ts, byte producer, int count) {
Payload payload = RECYCLER.get();
payload.hash = hash;
payload.ts = ts;
payload.producer = producer;
payload.count = count;
return payload;
}
public void recycle() {
hash = null;
ts = 0;
producer = 0;
count = 0;
handle.recycle(this);
}
}
private KafkaConsumer<byte[], byte[]> consumer;
private ScheduledExecutorService es;
private String hostName;
private final Map<String, KafkaTopicAuditAnalyzer> topicAnalyzers = new ConcurrentHashMap<>();
public KafkaBackedAuditAnalyzer() {
}
public void init(Properties props) throws Exception {
hostName = InetAddress.getLocalHost().getHostName();
if (hostName.contains(".")) {
hostName = hostName.substring(0, hostName.indexOf("."));
}
Thread th = new Thread(() -> {
Scanner sc = new Scanner(System.in);
while (sc.hasNext()) {
String next = sc.next();
switch (next) {
case "purge":
for (KafkaTopicAuditAnalyzer a : topicAnalyzers.values()) {
a.purge();
}
System.out.println("Purged");
break;
}
}
System.out.println("Completed");
sc.close();
});
th.setDaemon(true);
th.start();
consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(props.getProperty("audittopic")));
es = Executors.newScheduledThreadPool(1, new DaemonThreadFactory());
es.schedule(() -> {
int c = 0;
for (KafkaTopicAuditAnalyzer a : topicAnalyzers.values()) {
c += a.truncateInitialDelta();
}
System.out.println("Purging initial delta records:" + c);
}, 1, TimeUnit.MINUTES);
es.scheduleAtFixedRate(() -> {
long delta = 0;
long noAcksCount = 0;
long trackingSize = 0;
for (KafkaTopicAuditAnalyzer a : topicAnalyzers.values()) {
delta += a.getDelta();
noAcksCount += a.getNoAckCount();
trackingSize += a.getTrackingSize();
}
System.out.println(
new Date() + " audit events>300s MissingAcks:" + delta
+ " NoAcksFor300s:" + noAcksCount + " AuditMapSize:" + trackingSize);
}, 0, 60, TimeUnit.SECONDS);
}
public void run() {
while (true) {
ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(2000));
for (ConsumerRecord<byte[], byte[]> record : records) {
ByteBuffer wrap = ByteBuffer.wrap(record.value());
byte[] cluster = new byte[wrap.getShort()];
wrap.get(cluster);
byte[] topic = new byte[wrap.getShort()];
wrap.get(topic);
long recordTimestamp = record.timestamp();
String clusterStr = new String(cluster);
String topicStr = new String(topic);
String key = String.format("%s-%s", clusterStr, topicStr);
KafkaTopicAuditAnalyzer ta = topicAnalyzers.get(key);
if (ta == null) {
try {
ta = new KafkaTopicAuditAnalyzer(clusterStr, topicStr);
topicAnalyzers.put(key, ta);
} catch (Exception e) {
System.out.println("Failed to init topic audit analyzer for " + key);
}
}
if (ta != null) {
ta.audit(wrap, recordTimestamp);
}
}
consumer.commitAsync();
}
}
public static void main(String[] args) throws Exception {
KafkaBackedAuditAnalyzer analyzer = new KafkaBackedAuditAnalyzer();
String auditTopicServerset = args[0];
String auditTopic = args[1];
String groupId = args[2];
Properties props = new Properties();
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServersFromServerset(auditTopicServerset));
props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "10000");
props.setProperty("audittopic", auditTopic);
analyzer.init(props);
analyzer.run();
}
private class KafkaTopicAuditAnalyzer {
private String cluster;
private String topic;
private Map<String, Payload> auditTrackerMap = new ConcurrentHashMap<>();
private AtomicLong counter = new AtomicLong();
private AtomicLong ackedCounter = new AtomicLong();
private Counter logmessageCounter;
private Counter duplicateAckCounter;
private Counter hashMismatchCounter;
private Timer ackLatency;
public KafkaTopicAuditAnalyzer(String cluster, String topic) throws Exception {
this.cluster = cluster;
this.topic = topic;
init();
}
public void init() throws Exception {
MetricRegistry registry = new MetricRegistry();
registry.register("delta", (Gauge<Long>) () -> counter.get() - ackedCounter.get());
registry.register("trackersize", (Gauge<Long>) () -> (long) auditTrackerMap.size());
ackLatency = registry.timer("latency", () -> new Timer(new SlidingTimeWindowArrayReservoir(1, TimeUnit.MINUTES)));
logmessageCounter = registry.counter("logmessages");
duplicateAckCounter = registry.counter("duplicate.acks");
hashMismatchCounter = registry.counter("hash.mismatch");
Map<String, Object> tags = new HashMap<>();
tags.put("cluster", cluster);
tags.put("topic", topic);
ScheduledReporter
reporter =
OpenTSDBReporter.createReporterWithTags("memqaudit", registry, cluster,
(String name, Metric m) -> true, TimeUnit.SECONDS, TimeUnit.SECONDS,
new OpenTSDBClient("localhost", 18126), hostName, tags);
reporter.start(1, TimeUnit.MINUTES);
}
public void audit(ByteBuffer message, long recordTimestamp) {
// host address + epochId
byte[] key = new byte[message.getShort() + 16];
message.get(key);
byte[] hash = new byte[message.getShort()];
message.get(hash);
String keyString = Base64.getEncoder().encodeToString(key);
int count = message.getInt();
byte producer = message.get();
Payload output = auditTrackerMap.get(keyString);
if (output == null) {
auditTrackerMap.put(keyString, Payload.newInstance(hash, recordTimestamp, producer, count));
if (producer == 1) {
// only increment counter if producer message comes first
// if consumer message comes first, we want to ignore the count so singer restarts won't corrupt the counter
counter.addAndGet(count);
}
} else {
byte[] array = output.hash;
if (producer == output.producer) {
duplicateAckCounter.inc();
return;
}
if (!Arrays.equals(hash, array)) {
hashMismatchCounter.inc();
} else {
long latency = Math.abs(recordTimestamp - output.ts);
ackLatency.update(latency, TimeUnit.MILLISECONDS);
if (producer == 1) {
// producer message comes after consumer message
// need to bump both counters so data loss cancels out
counter.addAndGet(count);
}
ackedCounter.addAndGet(count);
logmessageCounter.inc(count);
}
auditTrackerMap.remove(keyString).recycle();
}
}
public int truncateInitialDelta() {
Iterator<Entry<String, Payload>> iterator = auditTrackerMap.entrySet().iterator();
int c = 0;
while (iterator.hasNext()) {
Entry<String, Payload> next = iterator.next();
if (System.currentTimeMillis() - next.getValue().ts > 300) {
iterator.remove();
c++;
}
}
return c;
}
public void purge() {
auditTrackerMap.clear();
}
public long getDelta() {
return counter.get() - ackedCounter.get();
}
public long getTrackingSize() {
return auditTrackerMap.size();
}
public long getNoAckCount() {
return auditTrackerMap.values().stream().filter(v -> System.currentTimeMillis() - v.ts > 300_000).count();
}
}
private static String getBootstrapServersFromServerset(String serversetFile) throws Exception {
return String.join(",", Files.readAllLines(new File(serversetFile).toPath()));
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/TopicNotFoundException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/TopicNotFoundException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
public class TopicNotFoundException extends Exception {
private static final long serialVersionUID = 1L;
public TopicNotFoundException() {
}
public TopicNotFoundException(String message) {
super(message);
}
public TopicNotFoundException(String message, Throwable cause) {
super(message, cause);
}
public TopicNotFoundException(Throwable cause) {
super(cause);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/ClosedException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/ClosedException.java | package com.pinterest.memq.client.commons2;
public class ClosedException extends Exception {
private static final long serialVersionUID = 1L;
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/MemqPooledByteBufAllocator.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/MemqPooledByteBufAllocator.java | package com.pinterest.memq.client.commons2;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A wrapper around Netty's PooledByteBufAllocator that provides a retry mechanism for memory allocation.
*
* The behavior is as follows:
* 1. If an OutOfMemoryError occurs during buffer allocation, it will retry allocation up to a specified maximum block time.
* 2. The retry interval starts at a base value and doubles with each retry, up to a maximum block time.
* 3. If the allocation fails after all retries, a MemoryAllocationException is thrown.
*
* If maxBlockMs is set to 0, it will try once and throw an exception immediately if allocation fails.
*/
public class MemqPooledByteBufAllocator {
private static final Logger logger = LoggerFactory.getLogger(MemqPooledByteBufAllocator.class);
private static final long BASE_RETRY_INTERVAL_MS = 5;
private static final int DEFAULT_MAX_BLOCK_MS = 0;
public static ByteBuf buffer(int initialCapacity) throws MemoryAllocationException {
return buffer(initialCapacity, Integer.MAX_VALUE);
}
public static ByteBuf buffer(int initialCapacity, int maxCapacity) throws MemoryAllocationException {
return buffer(initialCapacity, maxCapacity, DEFAULT_MAX_BLOCK_MS);
}
public static ByteBuf buffer(int initialCapacity, int maxCapacity, int maxBlockMs) throws MemoryAllocationException {
long retryIntervalMs = Math.max(BASE_RETRY_INTERVAL_MS, maxBlockMs / 100);
long startTimeMs = System.currentTimeMillis();
int tries = 0;
while (tries == 0 || System.currentTimeMillis() - startTimeMs < maxBlockMs || maxBlockMs <= 0) {
try {
return PooledByteBufAllocator.DEFAULT.buffer(initialCapacity, maxCapacity);
} catch (OutOfMemoryError oom) {
if (maxBlockMs <= 0) {
break;
}
logger.trace("Not enough memory to allocate buffer with initialCapacity=" + initialCapacity + ", maxCapacity=" + maxCapacity + ", retrying in " + retryIntervalMs + "ms");
retryIntervalMs = backoff(maxBlockMs, retryIntervalMs);
tries++;
}
}
throw new MemoryAllocationException("Failed to allocate buffer with initialCapacity=" + initialCapacity + ", maxCapacity=" + maxCapacity + " within " + maxBlockMs + "ms");
}
private static long backoff(int maxBlockMs, long retryIntervalMs) throws MemoryAllocationException {
try {
Thread.sleep(retryIntervalMs);
retryIntervalMs = Math.max(retryIntervalMs * 2, maxBlockMs / 10);
} catch (InterruptedException e) {
throw new MemoryAllocationException("Interrupted while waiting to allocate buffer");
}
return retryIntervalMs;
}
public static long usedDirectMemory() {
return PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/MemoryAllocationException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/MemoryAllocationException.java | package com.pinterest.memq.client.commons2;
import java.io.IOException;
public class MemoryAllocationException extends Exception {
public MemoryAllocationException(Throwable e) {
super(e);
}
public MemoryAllocationException(String message) {
super(message);
}
public MemoryAllocationException(String message, Throwable e) {
super(message, e);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/TransportPacketIdentifier.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/TransportPacketIdentifier.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
import com.pinterest.memq.commons.protocol.TransportPacket;
import io.netty.buffer.ByteBuf;
import java.io.IOException;
public class TransportPacketIdentifier extends TransportPacket {
public TransportPacketIdentifier(TransportPacket packet) {
protocolVersion = packet.getProtocolVersion();
clientRequestId = packet.getClientRequestId();
requestType = packet.getRequestType();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TransportPacketIdentifier that = (TransportPacketIdentifier) o;
if (protocolVersion != that.protocolVersion) {
return false;
}
if (clientRequestId != that.clientRequestId) {
return false;
}
return requestType == that.requestType;
}
@Override
public int hashCode() {
int result = protocolVersion;
result = 31 * result + (int) (clientRequestId ^ (clientRequestId >>> 32));
result = 31 * result + requestType.hashCode();
return result;
}
@Override
public String toString() {
return "PacketIdentifier{" +
"protocolVersion=" + protocolVersion +
", clientId=" + clientRequestId +
", requestType=" + requestType +
'}';
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
}
@Override
public int getSize(short protocolVersion) {
return 0;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/DataNotFoundException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/DataNotFoundException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
public class DataNotFoundException extends Exception {
private static final long serialVersionUID = 1L;
public DataNotFoundException() {
super();
}
public DataNotFoundException(String message,
Throwable cause,
boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
public DataNotFoundException(String message, Throwable cause) {
super(message, cause);
}
public DataNotFoundException(String message) {
super(message);
}
public DataNotFoundException(Throwable cause) {
super(cause);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/MemqCommonClient.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/MemqCommonClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
import static com.pinterest.memq.client.commons2.Endpoint.DEFAULT_LOCALITY;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons2.network.NetworkClient;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
public class MemqCommonClient implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(MemqCommonClient.class);
private static final int MAX_SEND_RETRIES = 3;
public static final String CONFIG_NUM_WRITE_ENDPOINTS = "numWriteEndpoints"; // number of endpoints for writes
private final NetworkClient networkClient;
private long connectTimeout = 500;
private int numWriteEndpoints = 1;
private String locality = DEFAULT_LOCALITY;
private volatile List<Endpoint> localityEndpoints;
private volatile List<Endpoint> writeEndpoints;
private Map<Endpoint, Integer> failureCounts;
private final AtomicInteger writeRotateIdx = new AtomicInteger(0);
private final AtomicInteger localityRotateIdx = new AtomicInteger(0);
protected MemqCommonClient(SSLConfig sslConfig, Properties networkProperties) {
if (networkProperties != null) {
if (networkProperties.containsKey(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS)) {
this.connectTimeout = Long
.parseLong(networkProperties.getProperty(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS));
}
if (networkProperties.containsKey(CONFIG_NUM_WRITE_ENDPOINTS)) {
this.numWriteEndpoints = Math.max(1, Integer.parseInt(networkProperties.getProperty(CONFIG_NUM_WRITE_ENDPOINTS)));
}
}
writeEndpoints = Collections.emptyList();
failureCounts = new ConcurrentHashMap<>();
networkClient = new NetworkClient(networkProperties, sslConfig);
}
public MemqCommonClient(String locality, SSLConfig sslConfig, Properties networkProperties) {
this(sslConfig, networkProperties);
this.locality = locality;
}
public void initialize(List<Endpoint> endpoints) throws Exception {
resetEndpoints(endpoints);
}
public void resetEndpoints(List<Endpoint> endpoints) throws Exception {
this.localityEndpoints = Collections.unmodifiableList(getLocalityEndpoints(endpoints));
validateEndpoints();
}
private void validateEndpoints() throws Exception {
if (localityEndpoints.isEmpty()) {
throw new Exception("No endpoints available");
}
}
/**
* Send a request packet and return a future for the response.
*
* The choice of endpoint to try is the first endpoint in the list returned by getEndpointsToTry().
* The order of the list is based on the logic in getEndpointsToTry().
*
* If the request succeeds and there are less than numWriteEndpoints in the set of write endpoints, the endpoint is added to the set,
* and the next endpoint is chosen by rotating the locality endpoints.
*
* If the request succeeds and there are already numWriteEndpoints in the set of write endpoints, the endpoint must already be in the set for it to be chosen,
* so nothing is added to the set. The next endpoint is chosen by rotating the write endpoints, which will give us another endpoint in the set of write endpoints.
*
* If the request fails before reaching the retry limit, the dead endpoint is removed from the set of write endpoints in rotation,
* and the next working endpoint not already in the rotation is added to the set. The next retry will try the next endpoint in the list.
*
* If an endpoint fails 2 times in a row, it is removed from both the set of write endpoints and the set of locality endpoints so it is not considered for future requests.
*
* If the request fails after reaching the retry limit, the exception is propagated without further refreshing.
*
* For example:
* <pre>
* {@code
* numWriteEndpoints = 3
* localityEndpoints = [A, B, C, D, E, F]
* writeEndpoints = []
*
* getEndpointsToTry() returns rotate(localityEndpoints) -->[A, B, C, D, E, F]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint A --> succeed --> writeEndpoints = [A]
* getEndpointsToTry() returns rotate(localityEndpoints) --> [B, C, D, E, F, A]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint B --> succeed --> writeEndpoints = [A, B]
* getEndpointsToTry() returns rotate(localityEndpoints) --> [C, D, E, F, A, B]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint C --> succeed --> writeEndpoints = [A, B, C]
*
* ------- writeEndpoints is full -------
*
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [B, C, A, D, E, F]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint B --> succeed --> writeEndpoints = [B, C, A]
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [C, A, B, D, E, F]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint C --> succeed --> writeEndpoints = [C, A, B]
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [A, B, C, D, E, F]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint A --> succeed --> writeEndpoints = [A, B, C]
*
* ...
*
* ------- now endpoint A is dead -------
*
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [A, B, C, D, E, F]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint A --> fail --> deprioritizeDeadEndpoint(A) --> retry --> try endpoint B --> succeed --> writeEndpoints = [B, C]
*
* ------- writeEndpoints is not full -------
*
* getEndpointsToTry() returns rotate(localityEndpoints) --> [B, C, D, E, F, A]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint B --> succeed --> B is already in writeEndpoints, so writeEndpoints = [B, C]
* getEndpointsToTry() returns rotate(localityEndpoints) --> [C, D, E, F, A, B]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint C --> succeed --> writeEndpoints = [C, B]
* getEndpointsToTry() returns rotate(localityEndpoints) --> [D, E, F, A, B, C]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint D --> succeed --> writeEndpoints = [C, B, D]
*
* ------- writeEndpoints is full -------
*
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [B, C, D, E, F, A]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint B --> succeed --> writeEndpoints = [B, C, D]
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [C, D, B, E, F, A]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint C --> succeed --> writeEndpoints = [C, D, B]
* getEndpointsToTry() returns rotate(writeEndpoints) U localityEndpoints --> [D, B, C, E, F, A]
* sendRequestPacketAndReturnResponseFuture() --> try endpoint D --> succeed --> writeEndpoints = [D, B, C]
*
* ...
* }
* </pre>
*
* @param request
* @param topic
* @param timeoutMillis
* @return the CompletableFuture for the response
* @throws InterruptedException
* @throws TimeoutException
* @throws ExecutionException
*/
public CompletableFuture<ResponsePacket> sendRequestPacketAndReturnResponseFuture(RequestPacket request,
String topic,
long timeoutMillis) throws InterruptedException,
TimeoutException,
ExecutionException {
if (localityEndpoints == null || writeEndpoints == null || localityEndpoints.isEmpty()) {
throw new IllegalStateException("Client not initialized yet");
}
CompletableFuture<ResponsePacket> future = null;
List<Endpoint> endpointsToTry = getEndpointsToTry();
long elapsed = 0;
long start = System.currentTimeMillis();
int retryCount = Math.min(MAX_SEND_RETRIES, endpointsToTry.size());
for (int retry = 0; retry < retryCount; retry++) {
if (elapsed > timeoutMillis) {
throw new TimeoutException("Failed to send after " + timeoutMillis + " ms");
}
Endpoint endpoint = endpointsToTry.get(retry);
try {
future = networkClient.send(endpoint.getAddress(), request,
Duration.ofMillis(timeoutMillis - elapsed));
maybeRegisterWriteEndpoint(endpoint, topic);
break;
} catch (ExecutionException e) {
if (e.getCause() instanceof ConnectException) {
if (retry == retryCount - 1) {
logger.error("Failed to send request packet for topic=" + topic, e);
throw e;
} else {
logger.warn("Retrying send request after failure for topic=" + topic, e);
try {
deprioritizeDeadEndpoint(endpoint, topic); // this endpoint is down even after retries in NetworkClient, remove it from the write endpoints and take another one from locality endpoints
} catch (Exception ex) {
logger.error("Failed to refresh write endpoints", ex);
throw e;
}
}
} else {
throw e;
}
} finally {
elapsed = System.currentTimeMillis() - start;
}
}
if (future == null) {
future = new CompletableFuture<>();
future.completeExceptionally(new Exception("No suitable endpoints"));
}
return future;
}
public List<Endpoint> currentWriteEndpoints() {
return writeEndpoints;
}
public TopicMetadata getTopicMetadata(String topic,
long timeoutMillis) throws TopicNotFoundException,
ExecutionException,
InterruptedException, TimeoutException {
Future<ResponsePacket> response = sendRequestPacketAndReturnResponseFuture(
new RequestPacket(RequestType.PROTOCOL_VERSION, ThreadLocalRandom.current().nextLong(),
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket(topic)),
topic,
timeoutMillis);
ResponsePacket responsePacket = response.get(timeoutMillis, TimeUnit.MILLISECONDS);
if (responsePacket.getResponseCode() == ResponseCodes.NOT_FOUND) {
throw new TopicNotFoundException("Topic " + topic + " not found");
}
TopicMetadataResponsePacket resp = ((TopicMetadataResponsePacket) responsePacket.getPacket());
writeEndpoints = Collections.emptyList();
return resp.getMetadata();
}
public TopicMetadata getTopicMetadata(String topic) throws TopicNotFoundException,
ExecutionException, InterruptedException,
TimeoutException {
return getTopicMetadata(topic, connectTimeout);
}
public synchronized void reconnect(String topic, boolean isConsumer) throws Exception {
logger.warn("Reconnecting topic " + topic);
TopicMetadata md = getTopicMetadata(topic, connectTimeout);
networkClient.reset();
Set<Broker> brokers = null;
if (isConsumer) {
brokers = md.getReadBrokers();
} else {
brokers = md.getWriteBrokers();
}
localityEndpoints = Collections.unmodifiableList(
getLocalityEndpoints(brokers.stream().map(Endpoint::fromBroker).collect(Collectors.toList())));
validateEndpoints();
}
protected List<Endpoint> randomizedEndpoints(List<Endpoint> servers) {
List<Endpoint> shuffle = new ArrayList<>(servers);
Collections.shuffle(shuffle);
return shuffle;
}
/**
* Get the endpoints to try for a given request, ordered by priority in the following way:<br>
* 1. N rotated write endpoints, where N = numWriteEndpoints (config) and the write endpoints are rotated by 1 after each call<br>
* 2. Remaining locality endpoints, the order of which was already shuffled during initialization<br>
*
* A given request will attempt to be sent to the first endpoint in the list, and if that fails, the next endpoint in the list will be tried, and so on.<br>
*
* <pre>
* {@code
* Example:
* numEndpoints = 3
* localityEndpoints = [A, B, C, D, E, F]
* writeEndpoints = [A, B, C]
*
* Example:
* getEndpointsToTry() returns [A, B, C, D, E, F]
* getEndpointsToTry() returns [C, A, B, D, E, F]
* getEndpointsToTry() returns [B, C, A, D, E, F]
* getEndpointsToTry() returns [A, B, C, D, E, F]
* ...
* }
* </pre>
*
* This ensures that the write endpoints are used in a round-robin manner.
*
* If there are less than numWriteEndpoints in the set of writeEndpoints, the localityEndpoints are rotated by 1 and returned. The client will add working endpoints
* to the set of writeEndpoints until the size of writeEndpoints reaches numWriteEndpoints. An example is provided in javadoc for sendRequestPacketAndReturnResponseFuture().
*
* @return the endpoints to try
*/
protected List<Endpoint> getEndpointsToTry() {
// Snapshot current lists to avoid races and in-place mutation
List<Endpoint> writes = this.writeEndpoints;
List<Endpoint> locals = this.localityEndpoints;
List<Endpoint> endpointsToTry = new ArrayList<>(writes.size() + locals.size());
if (writes.size() == numWriteEndpoints) {
// If the set of writeEndpoints is full, rotate the writeEndpoints by 1 and add the localityEndpoints that are not in the set of writeEndpoints to the end of the list
int start = Math.floorMod(writeRotateIdx.getAndIncrement(), Math.max(1, writes.size()));
for (int i = 0; i < writes.size(); i++) {
endpointsToTry.add(writes.get((start + i) % writes.size()));
}
for (Endpoint e : locals) {
if (!writes.contains(e)) {
endpointsToTry.add(e);
}
}
} else {
// If the set of writeEndpoints is not full, rotate the localityEndpoints by 1
int start = Math.floorMod(localityRotateIdx.getAndIncrement(), Math.max(1, locals.size()));
for (int i = 0; i < locals.size(); i++) {
endpointsToTry.add(locals.get((start + i) % locals.size()));
}
}
return endpointsToTry;
}
/**
* The provided endpoint had just succeeded, so register the endpoint as a write endpoint if it is not already in the set of write endpoints and if the set of write endpoints is not full.
*
* The endpoint's failure count is reset to 0 since it had just succeeded.
*
* If the set of write endpoints is full, nothing is done.
*
* @param endpoint
* @param topic
*/
protected void maybeRegisterWriteEndpoint(Endpoint endpoint, String topic) {
failureCounts.remove(endpoint);
List<Endpoint> currentWrites = this.writeEndpoints;
if (currentWrites.size() < numWriteEndpoints && !currentWrites.contains(endpoint)) {
logger.info("Registering write endpoint: " + endpoint + " for topic: " + topic);
List<Endpoint> newWrites = new ArrayList<>(currentWrites);
newWrites.add(endpoint);
this.writeEndpoints = Collections.unmodifiableList(newWrites);
}
List<Endpoint> currentLocals = this.localityEndpoints;
if (!currentLocals.contains(endpoint)) {
logger.info("Registering locality endpoint: " + endpoint + " for topic: " + topic);
List<Endpoint> newLocals = new ArrayList<>(currentLocals);
newLocals.add(endpoint);
this.localityEndpoints = Collections.unmodifiableList(newLocals);
}
}
/**
* Deprioritize the dead endpoint by removing it from the set of write endpoints and moving it to the end of locality endpoints.
*
* If the endpoint has failed 2 times in a row, it is removed from both the set of write endpoints and the set of locality endpoints so it is not considered for future requests.
*
* If the endpoint has failed 1 time but succeeds again in a future request, its failure count is reset to 0 in maybeRegisterWriteEndpoint().
*
* An example is provided in javadoc for sendRequestPacketAndReturnResponseFuture().
*
* @param deadEndpoint
* @param topic
* @throws Exception
*/
protected void deprioritizeDeadEndpoint(Endpoint deadEndpoint, String topic) throws Exception {
failureCounts.compute(deadEndpoint, (k, v) -> v == null ? 1 : v + 1);
int failures = failureCounts.get(deadEndpoint);
List<Endpoint> newLocals = new ArrayList<>(this.localityEndpoints);
newLocals.remove(deadEndpoint);
List<Endpoint> newWrites = new ArrayList<>(this.writeEndpoints);
newWrites.remove(deadEndpoint);
if (failures >= 2) {
logger.warn("Dead endpoint " + deadEndpoint + " has failed 2 times, removing from future consideration");
// Do not re-add to locals
} else {
logger.warn("Dead endpoint " + deadEndpoint + " has failed " + failures + " times, deprioritizing it");
newLocals.add(deadEndpoint); // move to end
}
this.localityEndpoints = Collections.unmodifiableList(newLocals);
this.writeEndpoints = Collections.unmodifiableList(newWrites);
validateEndpoints();
}
protected List<Endpoint> getLocalityEndpoints(List<Endpoint> servers) {
List<Endpoint> collect = servers.stream().filter(b -> locality.equals(b.getLocality()))
.collect(Collectors.toList());
if (collect.isEmpty()) {
collect = servers;
}
Collections.shuffle(collect);
logger.info("Locality endpoints: " + collect);
return collect;
}
@Override
public void close() throws IOException {
if (!networkClient.isClosed()) {
networkClient.close();
}
}
public static List<Endpoint> generateEndpointsFromBrokers(Set<Broker> brokers) {
return brokers.stream().map(Endpoint::fromBroker).collect(Collectors.toList());
}
public static List<Endpoint> parseServersetFile(String serversetFile) throws IOException {
Gson gson = new Gson();
List<String> lines = Files.readAllLines(new File(serversetFile).toPath());
return lines.stream().map(l -> gson.fromJson(l, JsonObject.class)).filter(g -> g.size() > 0)
.map(g -> new Endpoint(InetSocketAddress.createUnresolved(g.get("ip").getAsString(), 9092),
g.get("az").getAsString()))
.collect(Collectors.toList());
}
public static List<Endpoint> getEndpointsFromBootstrapServerString(String bootstrapServers) {
return Arrays.stream(bootstrapServers.split(",")).map(e -> {
String[] parts = e.split(":");
return new Endpoint(InetSocketAddress.createUnresolved(parts[0], Short.parseShort(parts[1])));
}).collect(Collectors.toList());
}
public boolean isClosed() {
return networkClient.isClosed();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/Endpoint.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/Endpoint.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
import com.pinterest.memq.commons.protocol.Broker;
import java.net.InetSocketAddress;
public class Endpoint {
public static final String DEFAULT_LOCALITY = "n/a";
private InetSocketAddress address;
private String locality = DEFAULT_LOCALITY;
public Endpoint(InetSocketAddress address, String locality) {
this.address = address;
this.locality = locality;
}
public Endpoint(InetSocketAddress address) {
this.address = address;
}
public InetSocketAddress getAddress() {
return address;
}
public void setAddress(InetSocketAddress address) {
this.address = address;
}
public String getLocality() {
return locality;
}
public void setLocality(String locality) {
this.locality = locality;
}
public static Endpoint fromBroker(Broker broker) {
return new Endpoint(InetSocketAddress.createUnresolved(broker.getBrokerIP(),
broker.getBrokerPort()), broker.getLocality());
}
@Override
public String toString() {
return String.format("Endpoint[address=%s, locality=%s]", address, locality);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Endpoint) {
return address.equals(((Endpoint) obj).address) && locality.equals(((Endpoint) obj).locality);
}
return false;
}
@Override
public int hashCode() {
return address.hashCode() + locality.hashCode();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/UniformRetryStrategy.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/UniformRetryStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.retry;
import java.time.Duration;
public class UniformRetryStrategy extends RetryStrategy {
private int retryIntervalMs = 1000;
public UniformRetryStrategy() {
}
public void setRetryIntervalMs(int retryIntervalMs) {
this.retryIntervalMs = retryIntervalMs;
}
@Override
public Duration calculateNextRetryInterval(int attempts) {
return Duration.ofMillis(retryIntervalMs);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/RetryStrategy.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/RetryStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.retry;
import java.time.Duration;
public abstract class RetryStrategy {
/**
* returns an interval till the next attempt
* @param attempts number of attempts that are already done
* @return null if no retries are left, the next interval otherwise
*/
public abstract Duration calculateNextRetryInterval(int attempts);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/FullJitterRetryStrategy.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/FullJitterRetryStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.retry;
import java.time.Duration;
import java.util.concurrent.ThreadLocalRandom;
public class FullJitterRetryStrategy extends RetryStrategy {
private long baseRetryIntervalMs = 200;
private long maxRetryIntervalMs = 10000;
private int maxAttempts = Integer.MAX_VALUE - 1;
public FullJitterRetryStrategy() {
}
public void setBaseRetryIntervalMs(long baseRetryIntervalMs) {
this.baseRetryIntervalMs = baseRetryIntervalMs;
}
public void setMaxRetryIntervalMs(long maxRetryIntervalMs) {
this.maxRetryIntervalMs = maxRetryIntervalMs;
}
public void setMaxAttempts(int maxAttempts) {
this.maxAttempts = maxAttempts;
}
@Override
public Duration calculateNextRetryInterval(int attempts) {
if (attempts >= maxAttempts ) {
return null;
}
long upper = Math.min((Math.round(Math.pow(2, attempts) * baseRetryIntervalMs)), maxRetryIntervalMs);
return Duration.ofMillis(ThreadLocalRandom.current().nextLong(1, upper + 1));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/ExponentialBackoffRetryStrategy.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/retry/ExponentialBackoffRetryStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.retry;
import java.time.Duration;
public class ExponentialBackoffRetryStrategy
extends RetryStrategy {
private int maxRetries = 2;
private long baseRetryIntervalMs = 500;
public ExponentialBackoffRetryStrategy() {
}
public ExponentialBackoffRetryStrategy(int maxRetries, long baseRetryIntervalMs) {
this.maxRetries = maxRetries;
this.baseRetryIntervalMs = baseRetryIntervalMs;
}
public void setMaxRetries(int maxRetries) {
this.maxRetries = maxRetries;
}
public void setBaseRetryIntervalMs(long baseRetryIntervalMs) {
this.baseRetryIntervalMs = baseRetryIntervalMs;
}
@Override
public Duration calculateNextRetryInterval(int attempts) {
if (attempts >= maxRetries) {
return null;
}
return Duration.ofMillis(Math.round(baseRetryIntervalMs * Math.pow(2, attempts)));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/NetworkClient.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/NetworkClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.time.Duration;
import java.util.Properties;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import com.pinterest.memq.client.commons2.MemqPooledByteBufAllocator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.pinterest.memq.client.commons2.TransportPacketIdentifier;
import com.pinterest.memq.client.commons2.network.netty.ClientChannelInitializer;
import com.pinterest.memq.client.commons2.retry.ExponentialBackoffRetryStrategy;
import com.pinterest.memq.client.commons2.retry.RetryStrategy;
import com.pinterest.memq.client.producer.http.DaemonThreadFactory;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.ReferenceCountUtil;
// No thread-safety guarantees
public class NetworkClient implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(NetworkClient.class);
public static final String CONFIG_INITIAL_RETRY_INTERVAL_MS = "initialRetryIntervalMs";
public static final String CONFIG_MAX_RETRY_COUNT = "maxRetryCount";
public static final String CONFIG_IDLE_TIMEOUT_MS = "idleTimeoutMs";
public static final String CONFIG_CONNECT_TIMEOUT_MS = "connectTimeoutMs";
public static final String CONFIG_IRRESPONSIVE_TIMEOUT_MS = "irresponsiveTimeoutMs";
private ExponentialBackoffRetryStrategy retryStrategy = new ExponentialBackoffRetryStrategy();
private int idleTimeoutMs = 60000;
private int connectTimeoutMs = 500;
private int irresponsiveTimeoutMs = 60000;
private final ScheduledExecutorService scheduler;
private final ResponseHandler responseHandler;
private final Bootstrap bootstrap;
private final EventLoopGroup eventLoopGroup;
private final AtomicBoolean closed = new AtomicBoolean(false);
// maintain a pool of connections keyed by endpoint
private final Map<InetSocketAddress, ChannelFuture> channelPool = new ConcurrentHashMap<>();
// kept for testing visibility to return the last acquired connection
private volatile ChannelFuture lastConnectFuture;
public NetworkClient() {
this(null, null);
}
public NetworkClient(Properties properties) {
this(properties, null);
}
public NetworkClient(Properties properties, SSLConfig sslConfig) {
if (properties != null) {
if (properties.containsKey(CONFIG_INITIAL_RETRY_INTERVAL_MS)) {
retryStrategy.setBaseRetryIntervalMs(Integer.parseInt(properties.get(CONFIG_INITIAL_RETRY_INTERVAL_MS).toString()));
}
if (properties.containsKey(CONFIG_MAX_RETRY_COUNT)) {
retryStrategy.setMaxRetries(Integer.parseInt(properties.get(CONFIG_MAX_RETRY_COUNT).toString()));
}
if (properties.containsKey(CONFIG_CONNECT_TIMEOUT_MS)) {
connectTimeoutMs = Integer.parseInt(properties.get(CONFIG_CONNECT_TIMEOUT_MS).toString());
}
if (properties.containsKey(CONFIG_IDLE_TIMEOUT_MS)) {
idleTimeoutMs = Integer.parseInt(properties.get(CONFIG_IDLE_TIMEOUT_MS).toString());
}
if (properties.containsKey(CONFIG_IRRESPONSIVE_TIMEOUT_MS)) {
irresponsiveTimeoutMs = Integer.parseInt(properties.get(CONFIG_IRRESPONSIVE_TIMEOUT_MS).toString());
}
}
this.responseHandler = new ResponseHandler();
bootstrap = new Bootstrap();
if (Epoll.isAvailable()) {
eventLoopGroup = new EpollEventLoopGroup(1, new DaemonThreadFactory("MemqCommonClientNettyGroup"));
bootstrap.channel(EpollSocketChannel.class);
} else {
eventLoopGroup = new NioEventLoopGroup(1, new DaemonThreadFactory("MemqCommonClientNettyGroup"));
bootstrap.channel(NioSocketChannel.class);
}
bootstrap.group(eventLoopGroup);
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs);
bootstrap.handler(new ClientChannelInitializer(responseHandler, sslConfig, idleTimeoutMs));
ScheduledThreadPoolExecutor tmpScheduler = new ScheduledThreadPoolExecutor(1);
tmpScheduler.setRemoveOnCancelPolicy(true);
this.scheduler = tmpScheduler;
}
public CompletableFuture<ResponsePacket> send(InetSocketAddress socketAddress, RequestPacket request) throws Exception {
return send(socketAddress, request, Duration.ofMillis(irresponsiveTimeoutMs));
}
public CompletableFuture<ResponsePacket> send(InetSocketAddress socketAddress, RequestPacket request, Duration timeout)
throws ExecutionException, InterruptedException {
final long startMs = System.currentTimeMillis();
if (closed.get()) {
throw new IllegalStateException("Cannot send since client is closed");
}
CompletableFuture<ResponsePacket> returnFuture = new CompletableFuture<>();
final TransportPacketIdentifier identifier = new TransportPacketIdentifier(request);
// no need to remove listeners since they are removed by Netty after fired
acquireChannel(socketAddress).addListener((ChannelFutureListener) channelFuture -> {
if (channelFuture.isSuccess()) {
long elapsedMs = System.currentTimeMillis() - startMs;
// register this request with the channel so only this channel's inflight requests are affected on close
responseHandler.registerRequest(channelFuture.channel(), identifier, returnFuture);
try {
final ScheduledFuture<?> scheduledCleanup = scheduler.schedule(() -> {
responseHandler.cancelRequest(identifier, new TimeoutException("Failed to receive response after " + timeout.toMillis() + " ms"));
}, timeout.toMillis() - elapsedMs, TimeUnit.MILLISECONDS);
returnFuture.handleAsync((responsePacket, throwable) -> {
if (!scheduledCleanup.isDone()) {
scheduledCleanup.cancel(true);
}
return null;
});
} catch (RejectedExecutionException ree) {
if (!isClosed()) {
logger.error("Failed to schedule clean up task: ", ree);
}
}
ByteBuf buffer = request.getPreAllocOutBuf(); // try the pre-alloc buffer first
try {
if (buffer == null) {
// alloc a new buffer
buffer = MemqPooledByteBufAllocator.buffer(request.getSize(RequestType.PROTOCOL_VERSION)); // this will not block
request.write(buffer, RequestType.PROTOCOL_VERSION);
}
channelFuture.channel().writeAndFlush(buffer);
} catch (Exception e) {
logger.warn("Failed to write request " + request.getClientRequestId(), e);
ReferenceCountUtil.release(buffer);
responseHandler.cancelRequest(identifier, e);
}
} else {
responseHandler.cancelRequest(identifier, channelFuture.cause());
}
});
return returnFuture;
}
/**
* Acquire channel for the given address by checking if it exists in the channelPool.
* If not, create it and register it in the channelPool for future use.
*
* @param socketAddress
* @return the ChannelFuture for this address
* @throws ExecutionException
* @throws InterruptedException
*/
protected ChannelFuture acquireChannel(InetSocketAddress socketAddress) throws ExecutionException, InterruptedException {
ChannelFuture existing = channelPool.get(socketAddress);
if (existing == null || !existing.channel().isActive()) {
synchronized (getPoolLock(socketAddress)) {
existing = channelPool.get(socketAddress);
if (existing == null || !existing.channel().isActive()) {
CompletableFuture<ChannelFuture> connectReadyFuture = new CompletableFuture<>();
doConnect(socketAddress, connectReadyFuture, 0);
ChannelFuture newFuture = connectReadyFuture.get();
channelPool.put(socketAddress, newFuture);
lastConnectFuture = newFuture;
return newFuture;
}
}
}
lastConnectFuture = existing;
return existing;
}
private Object getPoolLock(InetSocketAddress socketAddress) {
// simple striped locking per address using the pool map's computeIfAbsent on a dummy value
// we avoid extra structures by synchronizing on the socketAddress object itself
return socketAddress;
}
private void doConnect(InetSocketAddress socketAddress, CompletableFuture<ChannelFuture> connectReadyFuture, int attempts) {
logger.info("Connecting to " + socketAddress + ", attempt " + (attempts + 1));
// no need to remove listeners since they are removed by Netty after fired
bootstrap.connect(socketAddress).addListener(new RetryListener(socketAddress, connectReadyFuture, attempts, retryStrategy));
}
@Override
public void close() throws IOException {
logger.debug("Closing network client");
closed.set(true);
// close all channels
for (ChannelFuture cf : channelPool.values()) {
try {
if (cf != null && cf.channel() != null) {
cf.channel().close().await();
}
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
channelPool.clear();
lastConnectFuture = null;
responseHandler.close();
scheduler.shutdown();
eventLoopGroup.shutdownGracefully();
}
public boolean isClosed() {
return closed.get();
}
// blocking
public void reset() throws IOException, InterruptedException {
logger.info("Resetting network client");
for (ChannelFuture cf : channelPool.values()) {
if (cf != null && cf.channel() != null) {
cf.channel().close().await();
}
}
channelPool.clear();
lastConnectFuture = null;
}
@VisibleForTesting
protected Map<InetSocketAddress, ChannelFuture> getChannelPool() {
return channelPool;
}
private final class RetryListener implements ChannelFutureListener {
private final CompletableFuture<ChannelFuture> connectReadyFuture;
private final InetSocketAddress socketAddress;
private final int attempts;
private final RetryStrategy retryStrategy;
public RetryListener(
InetSocketAddress socketAddress,
CompletableFuture<ChannelFuture> connectReadyFuture,
int attempts,
RetryStrategy retryStrategy
) {
this.connectReadyFuture = connectReadyFuture;
this.socketAddress = socketAddress;
this.attempts = attempts;
this.retryStrategy = retryStrategy;
}
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
Duration nextRetryInterval = retryStrategy.calculateNextRetryInterval(attempts);
if (nextRetryInterval == null) {
connectReadyFuture.completeExceptionally(future.cause());
} else {
logger.warn("Failed to connect to " + socketAddress + ", retry in " + nextRetryInterval.toMillis()
+ " ms, reason:" + future.cause());
scheduler.schedule(
() -> doConnect(
socketAddress,
connectReadyFuture,
attempts + 1
),
nextRetryInterval.toMillis(),
TimeUnit.MILLISECONDS);
future.channel().close().await();
}
} else {
connectReadyFuture.complete(future);
}
}
}
public int getInflightRequestCount() {
return responseHandler.getInflightRequestCount();
}
@VisibleForTesting
protected ChannelFuture getConnectFuture() {
return lastConnectFuture;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ResponseHandler.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ResponseHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network;
import com.pinterest.memq.client.commons2.TransportPacketIdentifier;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import io.netty.channel.Channel;
import io.netty.channel.ChannelId;
public class ResponseHandler implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(ResponseHandler.class);
private final AtomicReference<Map<TransportPacketIdentifier, CompletableFuture<ResponsePacket>>>
inflightRequestMapRef = new AtomicReference<>(new ConcurrentHashMap<>());
// Track which requests belong to which channel so we can reject only that channel's inflight on close
private final Map<ChannelId, Set<TransportPacketIdentifier>> channelToRequests = new ConcurrentHashMap<>();
private final Map<TransportPacketIdentifier, ChannelId> requestToChannel = new ConcurrentHashMap<>();
public void handle(ResponsePacket responsePacket) throws Exception {
CompletableFuture<ResponsePacket> future = removeRequest(new TransportPacketIdentifier(responsePacket));
if (future != null) {
future.complete(responsePacket);
} else {
// no handler for response skipping
logger.error("No handler for request:" + responsePacket.getRequestType());
}
}
public void setInflightRequestMap(
Map<TransportPacketIdentifier, CompletableFuture<ResponsePacket>> inflightRequestMap) {
inflightRequestMapRef.set(inflightRequestMap);
}
public void addRequest(TransportPacketIdentifier identifier,
CompletableFuture<ResponsePacket> future) {
inflightRequestMapRef.get().put(identifier, future);
}
public void registerRequest(Channel channel,
TransportPacketIdentifier identifier,
CompletableFuture<ResponsePacket> future) {
inflightRequestMapRef.get().put(identifier, future);
ChannelId channelId = channel.id();
channelToRequests.computeIfAbsent(channelId, k -> ConcurrentHashMap.newKeySet()).add(identifier);
requestToChannel.put(identifier, channelId);
}
public CompletableFuture<ResponsePacket> removeRequest(TransportPacketIdentifier identifier) {
CompletableFuture<ResponsePacket> future = inflightRequestMapRef.get().remove(identifier);
ChannelId channelId = requestToChannel.remove(identifier);
if (channelId != null) {
Set<TransportPacketIdentifier> set = channelToRequests.get(channelId);
if (set != null) {
set.remove(identifier);
if (set.isEmpty()) {
channelToRequests.remove(channelId);
}
}
}
return future;
}
public boolean cancelRequest(TransportPacketIdentifier identifier, Throwable reason) {
CompletableFuture<ResponsePacket> future = removeRequest(identifier);
if (future == null) {
return false;
}
rejectRequestFuture(future, reason);
return true;
}
private void rejectRequestFuture(CompletableFuture<ResponsePacket> future, Throwable reason) {
future.completeExceptionally(reason);
}
public int getInflightRequestCount() {
return inflightRequestMapRef.get().size();
}
public void cleanAndRejectInflightRequests(Throwable reason) {
logger.debug("Replacing and cleaning inflight requests due to " + reason);
Map<TransportPacketIdentifier, CompletableFuture<ResponsePacket>> oldMap = inflightRequestMapRef.getAndSet(new ConcurrentHashMap<>());
// reject futures with the provided reason and remove them from the map
for (Map.Entry<TransportPacketIdentifier, CompletableFuture<ResponsePacket>> entry : oldMap
.entrySet()) {
rejectRequestFuture(entry.getValue(), reason);
}
channelToRequests.clear();
requestToChannel.clear();
}
public void cleanAndRejectInflightRequestsForChannel(Channel channel, Throwable reason) {
ChannelId channelId = channel.id();
Set<TransportPacketIdentifier> identifiers = channelToRequests.remove(channelId);
if (identifiers == null || identifiers.isEmpty()) {
return;
}
for (TransportPacketIdentifier identifier : identifiers) {
CompletableFuture<ResponsePacket> future = inflightRequestMapRef.get().remove(identifier);
requestToChannel.remove(identifier);
if (future != null) {
rejectRequestFuture(future, reason);
}
}
}
public void close() {
cleanAndRejectInflightRequests(new ClientClosedException());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ClientClosedException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ClientClosedException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network;
public class ClientClosedException extends Exception {
private static final long serialVersionUID = 1L;
public ClientClosedException() {
}
public ClientClosedException(String message) {
super(message);
}
public ClientClosedException(String message, Throwable cause) {
super(message, cause);
}
public ClientClosedException(Throwable cause) {
super(cause);
}
public ClientClosedException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ClosedConnectionException.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/ClosedConnectionException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network;
import java.io.IOException;
public class ClosedConnectionException extends IOException {
private static final long serialVersionUID = 1L;
public ClosedConnectionException() {
}
public ClosedConnectionException(String message) {
super(message);
}
public ClosedConnectionException(String message, Throwable cause) {
super(message, cause);
}
public ClosedConnectionException(Throwable cause) {
super(cause);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/ClientChannelInitializer.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/ClientChannelInitializer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network.netty;
import java.nio.ByteOrder;
import java.util.concurrent.TimeUnit;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManagerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.pinterest.memq.client.commons2.network.ResponseHandler;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.core.utils.MemqUtils;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.ssl.ClientAuth;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.timeout.IdleStateHandler;
public final class ClientChannelInitializer extends ChannelInitializer<SocketChannel> {
private static final int FRAME_LENGTH_ENCODING_SIZE = 4;
private static final int MAX_FRAME_SIZE = 4 * 1024 * 1024;
private static ChannelHandler wiretapper = null;
private final ResponseHandler handler;
private final SSLConfig sslConfig;
private final long idleTimeoutMs;
public ClientChannelInitializer(ResponseHandler handler,
SSLConfig sslConfig, long idleTimeoutMs) {
this.handler = handler;
this.sslConfig = sslConfig;
this.idleTimeoutMs = idleTimeoutMs;
}
protected void initChannel(SocketChannel channel) throws Exception {
try {
ChannelPipeline pipeline = channel.pipeline();
if (wiretapper != null) {
pipeline.addLast(wiretapper);
}
pipeline.addLast(new IdleStateHandler(0, 0, idleTimeoutMs, TimeUnit.MILLISECONDS));
pipeline.addLast(new ConnectionLifecycleHandler(handler));
if (sslConfig != null) {
KeyManagerFactory kmf = MemqUtils.extractKMFFromSSLConfig(sslConfig);
TrustManagerFactory tmf = MemqUtils.extractTMPFromSSLConfig(sslConfig);
SslContext ctx = SslContextBuilder.forClient().protocols(sslConfig.getProtocols())
.keyManager(kmf).clientAuth(ClientAuth.REQUIRE).trustManager(tmf).build();
pipeline.addLast(ctx.newHandler(channel.alloc()));
}
pipeline.addLast(new LengthFieldBasedFrameDecoder(ByteOrder.BIG_ENDIAN, MAX_FRAME_SIZE, 0,
FRAME_LENGTH_ENCODING_SIZE, 0, 0, false));
pipeline.addLast(new MemqNettyClientSideResponseHandler(handler));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
@VisibleForTesting
public static void setWiretapper(ChannelHandler wiretapper) {
ClientChannelInitializer.wiretapper = wiretapper;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/MemqNettyClientSideResponseHandler.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/MemqNettyClientSideResponseHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network.netty;
import com.pinterest.memq.client.commons2.network.ResponseHandler;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MemqNettyClientSideResponseHandler extends ChannelInboundHandlerAdapter {
private static final Logger logger = LoggerFactory.getLogger(MemqNettyClientSideResponseHandler.class);
private ResponseHandler responseHandler;
public MemqNettyClientSideResponseHandler(ResponseHandler responseHandler) {
this.responseHandler = responseHandler;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf buf = (ByteBuf) msg;
try {
ResponsePacket responsePacket = new ResponsePacket();
responsePacket.readFields(buf, RequestType.PROTOCOL_VERSION);
logger.debug("Response received " + responsePacket);
if (responsePacket.getProtocolVersion() != RequestType.PROTOCOL_VERSION) {
// might not be able to handle this request.
// in future multiple protocol versions can / should be handled here
logger.debug("Server responded in protocol different than client request: " +responsePacket.getProtocolVersion() + " vs " + RequestType.PROTOCOL_VERSION);
} else {
responseHandler.handle(responsePacket);
}
} catch (Exception e) {
logger.error("Failed to handle server responses: ", e);
throw e;
} finally {
buf.release();
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/ConnectionLifecycleHandler.java | memq-client/src/main/java/com/pinterest/memq/client/commons2/network/netty/ConnectionLifecycleHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network.netty;
import com.pinterest.memq.client.commons2.network.ClosedConnectionException;
import com.pinterest.memq.client.commons2.network.ResponseHandler;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.SocketAddress;
final class ConnectionLifecycleHandler extends ChannelDuplexHandler {
private final Logger logger = LoggerFactory.getLogger(ConnectionLifecycleHandler.class);
private final ResponseHandler handler;
public ConnectionLifecycleHandler(ResponseHandler responseHandler) {
this.handler = responseHandler;
}
@Override
public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress,
SocketAddress localAddress, ChannelPromise promise) throws Exception {
logger.info("Connecting to " + remoteAddress);
super.connect(ctx, remoteAddress, localAddress, promise);
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
logger.info("[" + ctx.channel().id() + "] Connected to " + ctx.channel().remoteAddress());
super.channelActive(ctx);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
logger.info("[" + ctx.channel().id() + "] Closing connection to server: " + ctx.channel()
.remoteAddress());
handler.cleanAndRejectInflightRequestsForChannel(
ctx.channel(),
new ClosedConnectionException("Connection " + ctx.channel().remoteAddress() + " closed"));
super.channelInactive(ctx);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("[" + ctx.channel().id() + "] Exception caught in inbound pipeline: ", cause);
if (cause instanceof IOException && (cause).getMessage().equals("Connection reset by peer")) {
handler.cleanAndRejectInflightRequestsForChannel(
ctx.channel(),
new ClosedConnectionException("Connection " + ctx.channel().remoteAddress() + " closed by server"));
} else {
handler.cleanAndRejectInflightRequestsForChannel(ctx.channel(), cause);
}
ctx.close();
}
// idle event handling
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof IdleStateEvent) {
IdleStateEvent e = (IdleStateEvent) evt;
if (e.state() == IdleState.ALL_IDLE) {
logger.warn("Disconnecting to " + ctx.channel().remoteAddress() + " due to idle activity");
ctx.close();
}
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer2/RawRecord.java | memq-client/src/main/java/com/pinterest/memq/client/producer2/RawRecord.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import com.pinterest.memq.commons.MessageId;
import io.netty.util.Recycler;
public class RawRecord {
public static final AtomicLong counter = new AtomicLong(0);
private static final Recycler<RawRecord> RECYCLER = new Recycler<RawRecord>() {
@Override
protected RawRecord newObject(Handle<RawRecord> handle) {
counter.incrementAndGet();
return new RawRecord(handle);
}
};
private byte[] messageIdBytes;
private byte[] headerBytes;
private byte[] keyBytes;
private byte[] valueBytes;
private long writeTimestamp;
private final Recycler.Handle<RawRecord> handle;
private RawRecord(Recycler.Handle<RawRecord> handle) {
this.handle = handle;
}
public static RawRecord newInstance(MessageId messageId, Map<String, byte[]> headers, byte[] keyBytes,
byte[] valueBytes, long writeTimestamp)
throws IOException {
RawRecord record = RECYCLER.get();
if (valueBytes == null) {
throw new IOException("value can not be null");
}
record.messageIdBytes = messageId != null ? messageId.toByteArray() : null;
record.headerBytes = serializeHeadersToByteArray(headers);
record.keyBytes = keyBytes;
record.valueBytes = valueBytes;
record.writeTimestamp = writeTimestamp;
return record;
}
public void recycle() {
this.messageIdBytes = null;
this.headerBytes = null;
this.keyBytes = null;
this.valueBytes = null;
this.writeTimestamp = 0;
handle.recycle(this);
}
public byte[] getMessageIdBytes() {
return messageIdBytes;
}
public byte[] getHeaderBytes() {
return headerBytes;
}
public byte[] getKeyBytes() {
return keyBytes;
}
public byte[] getValueBytes() {
return valueBytes;
}
public long getWriteTimestamp() {
return writeTimestamp;
}
protected static byte[] serializeHeadersToByteArray(Map<String, byte[]> headers) throws IOException {
if (headers != null) {
ByteArrayOutputStream out = new ByteArrayOutputStream(11);
DataOutputStream str = new DataOutputStream(out);
for (Map.Entry<String, byte[]> entry : headers.entrySet()) {
byte[] k = entry.getKey().getBytes();
byte[] v = entry.getValue();
str.writeShort(k.length);
str.write(k);
str.writeShort(v.length);
str.write(v);
}
str.close();
return out.toByteArray();
} else {
return null;
}
}
public int calculateEncodedLogMessageLength() {
/* LogMessage layout
* |--Additional Header Length (2B)----|-------------Timestamp (8B)---------------| //
* Additional Header Fields
* |--MessageId Length (1B)--|-----------------------MessageId (Var.)-------------| //
* |--Header Length (2B)-----|-----------------------Headers (Var.)---------------| //
* |------------------------------Key Length (4B)---------------------------------| // Key
* Fields
* |------------------------------Key (Var.)--------------------------------------| //
* |------------------------------Value Length (4B)-------------------------------| // Value
* Fields
* |------------------------------Value (Var.)------------------------------------| //
*
* Fixed Overhead: 2 + 8 + 1 + 2 + 4 + 4 = 21 B
*/
return
+ 2 + getAdditionalHeaderLength() // request additional headers length + additional fields
+ 4 + (keyBytes != null ? keyBytes.length : 0) // key length + key
+ 4 + valueBytes.length // value length + value
;
}
public void writeToOutputStream(OutputStream outputStream) throws IOException {
/* LogMessage layout
* |-Additional Header Len (2B)------|------------------------Timestamp (8B)-------------------| // Additional Header Fields
* |-MsgId Len (1B)-|-----------------------MessageId (Var.)-----------------------------------| //
* |-Header Len (2B)-----------------|------------------------Headers (Var.)-------------------| //
* |--------------------------------Key Len (4B)-----------------------------------------------| // Key Fields
* |--------------------------------Key (Var.)-------------------------------------------------| //
* |--------------------------------Value Length (4B)------------------------------------------| // Value Fields
* |--------------------------------Value (Var.)-----------------------------------------------| //
*
* Fixed Overhead: 2 + 8 + 1 + 2 + 4 + 4 = 21 B
*/
// #######################################
// write additional fields here in future
// #######################################
// 8 bytes for write ts
// 1 byte for messageId length
// 2 bytes for header length
// #######################################
// write additional fields here in future
// #######################################
// buffered OS with underlying buffer of the first fields
DataOutputStream dataOutputStream = new DataOutputStream(new BufferedOutputStream(outputStream, 2 + getAdditionalHeaderLength() + 4));
dataOutputStream.writeShort((short) getAdditionalHeaderLength());
// write timestamp
dataOutputStream.writeLong(writeTimestamp);
// message id
if (messageIdBytes != null) {
dataOutputStream.write((byte) messageIdBytes.length);
dataOutputStream.write(messageIdBytes);
} else {
dataOutputStream.write((byte) 0);
}
// encode and write user defined headers
if (headerBytes != null) {
dataOutputStream.writeShort(headerBytes.length);
dataOutputStream.write(headerBytes);
} else {
dataOutputStream.writeShort(0);
}
if (keyBytes != null) {
// mark keys present
dataOutputStream.writeInt(keyBytes.length);
dataOutputStream.write(keyBytes);
} else {
dataOutputStream.writeInt(0);
}
dataOutputStream.writeInt(valueBytes.length);
dataOutputStream.flush();
outputStream.write(valueBytes);
outputStream.flush();
}
private int getAdditionalHeaderLength() {
return + 8 // timestamp (long)
+ 1 + (messageIdBytes != null ? messageIdBytes.length : 0) // messageId length + messageId
+ 2 + (headerBytes != null ? headerBytes.length : 0) // message header length + message header
;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer2/RequestManager.java | memq-client/src/main/java/com/pinterest/memq/client/producer2/RequestManager.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons2.MemoryAllocationException;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.retry.RetryStrategy;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* This class manages Request creation upon write requests.
*/
public class RequestManager implements Closeable {
private final ScheduledExecutorService scheduler;
private final ExecutorService dispatcher;
private final MemqCommonClient client;
private final String topic;
private final MemqProducer<?, ?> producer;
private final long sendRequestTimeout;
private final int maxPayloadBytes;
private final int lingerMs;
private final long maxInflightRequestsMemoryBytes;
private final int maxInflightRequests;
private final Semaphore requestCountPermits; // to limit the number of inflight requests
// TODO: perhaps the inflightMemoryPermits should be a static variable shared in the JVM
private final Semaphore inflightMemoryPermits; // to limit the memory used by inflight requests
private final Compression compression;
private final boolean disableAcks;
private final RetryStrategy retryStrategy;
private final AtomicInteger clientIdGenerator = new AtomicInteger(0);
private final MetricRegistry metricRegistry;
private final int maxBlockMs;
private volatile Request currentRequest;
private Counter requestCounter;
public RequestManager(MemqCommonClient client,
String topic,
MemqProducer<?, ?> producer,
long sendRequestTimeout,
RetryStrategy retryStrategy,
int maxPayloadBytes,
int lingerMs,
int maxBlockMs,
int maxInflightRequestsMemoryBytes,
int maxInflightRequests,
Compression compression,
boolean disableAcks,
MetricRegistry metricRegistry) {
ScheduledThreadPoolExecutor tmpScheduler = new ScheduledThreadPoolExecutor(1);
tmpScheduler.setRemoveOnCancelPolicy(true);
this.scheduler = tmpScheduler;
this.dispatcher = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("request-dispatch-" + topic).build());
this.client = client;
this.topic = topic;
this.producer = producer;
this.sendRequestTimeout = sendRequestTimeout;
this.maxPayloadBytes = maxPayloadBytes;
this.lingerMs = lingerMs;
this.maxBlockMs = maxBlockMs;
this.maxInflightRequestsMemoryBytes = maxInflightRequestsMemoryBytes;
this.inflightMemoryPermits = new Semaphore(maxInflightRequestsMemoryBytes);
this.maxInflightRequests = maxInflightRequests;
this.requestCountPermits = new Semaphore(maxInflightRequests);
this.compression = compression;
this.disableAcks = disableAcks;
this.retryStrategy = retryStrategy;
this.metricRegistry = metricRegistry;
initializeMetrics();
}
@VisibleForTesting
protected int getRequestCountAvailablePermits() {
return requestCountPermits.availablePermits();
}
@VisibleForTesting
protected int getInflightMemoryAvailablePermits() {
return inflightMemoryPermits.availablePermits();
}
private void initializeMetrics() {
requestCounter = metricRegistry.counter("requests.created");
metricRegistry.gauge("requests.inflight", () -> () -> maxInflightRequests - requestCountPermits.availablePermits());
metricRegistry.gauge("requests.memory.inflight", () -> () -> maxInflightRequestsMemoryBytes - inflightMemoryPermits.availablePermits());
}
public Future<MemqWriteResult> write(RawRecord record) throws IOException, MemoryAllocationException {
if (client.isClosed()) {
throw new IOException("Cannot write to topic " + topic + " when client is closed");
}
Request request = getAvailableRequest();
while (request != null) {
Future<MemqWriteResult> ret = request.write(record); // completes exceptionally with MemoryAllocationException if it happens in NetworkClient
if(ret != null) {
return ret;
} else {
request = getAvailableRequest();
}
}
return null;
}
/**
* Get an available request for the topic. If the current request is not available, it will try to create a new one.
*
* When a new request needs to be created, it will try to acquire the request count semaphore and the inflight memory semaphore.
* The request count semaphore limits the number of inflight requests, while the inflight memory semaphore limits the total memory used by inflight requests.
*
* If it successfully acquires both semaphores, it will create a new Request object and return it.
* Request creation will allocate a buffer of size maxPayloadBytes, which is the maximum payload size for the request.
*
* If it fails to acquire either of them, it will throw an IOException upon exhausting the request count semaphore,
* or MemoryAllocationException upon exhausting the inflight memory semaphore.
*
* This method is synchronized to ensure that only one thread can create a new request at a time.
*
* @return an available Request object
* @throws IOException
* @throws MemoryAllocationException
*/
public Request getAvailableRequest() throws IOException, MemoryAllocationException {
if (currentRequest == null || !currentRequest.isAvailable()) {
synchronized (this) {
if (currentRequest == null || !currentRequest.isAvailable()) {
boolean countPermitAcquired = false;
boolean memoryPermitAcquired = false;
try {
countPermitAcquired = requestCountPermits.tryAcquire(0, TimeUnit.MILLISECONDS);
memoryPermitAcquired = inflightMemoryPermits.tryAcquire(maxPayloadBytes, maxBlockMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException ie) {
maybeReleaseMemoryAndCountPermit(memoryPermitAcquired, countPermitAcquired);
throw new IOException("Failed to acquire request locks for topic " + topic + " :", ie);
}
if (!countPermitAcquired) {
maybeReleaseMemoryAndCountPermit(memoryPermitAcquired, countPermitAcquired);
throw new IOException(
String.format(
"Could not acquire request count semaphore. " +
"Current count: %s, Max count: %s for topic: %s",
maxInflightRequests - requestCountPermits.availablePermits(), maxInflightRequests, topic
)
);
}
if (!memoryPermitAcquired) {
maybeReleaseMemoryAndCountPermit(memoryPermitAcquired, countPermitAcquired);
throw new MemoryAllocationException(
String.format(
"Could not acquire inflight request memory semaphore in %sms. " +
"Current memory: %s bytes, Max memory: %s bytes for topic: %s",
maxBlockMs,
maxInflightRequestsMemoryBytes - inflightMemoryPermits.availablePermits(),
maxInflightRequestsMemoryBytes, topic
)
);
}
try {
if (client.isClosed()) {
throw new IOException("Cannot write to topic " + topic + " when client is closed");
}
currentRequest = new Request(
dispatcher,
scheduler,
client,
this,
requestCountPermits,
inflightMemoryPermits,
topic,
clientIdGenerator.getAndIncrement(),
maxPayloadBytes,
lingerMs,
maxBlockMs,
sendRequestTimeout,
retryStrategy,
disableAcks,
compression,
metricRegistry);
requestCounter.inc();
} catch (MemoryAllocationException ibme) {
// specifically re-throw MemoryAllocationException to let upstream handle it
requestCountPermits.release();
inflightMemoryPermits.release(maxPayloadBytes);
throw new MemoryAllocationException("Failed to allocate buffer memory for topic " + topic + ": ", ibme);
} catch (Throwable t) {
requestCountPermits.release();
inflightMemoryPermits.release(maxPayloadBytes);
throw t;
}
}
return currentRequest;
}
}
return currentRequest;
}
private void maybeReleaseMemoryAndCountPermit(boolean memoryPermitAcquired, boolean countPermitAcquired) {
if (countPermitAcquired) {
requestCountPermits.release();
}
if (memoryPermitAcquired) {
inflightMemoryPermits.release(maxPayloadBytes);
}
}
public void flush() {
if (currentRequest != null) {
currentRequest.flush();
}
}
public MemqProducer<?, ?> getProducer() {
return producer;
}
@Override
public void close() throws IOException {
flush();
scheduler.shutdown();
dispatcher.shutdown();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer2/Request.java | memq-client/src/main/java/com/pinterest/memq/client/producer2/Request.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.audit.Auditor;
import com.pinterest.memq.client.commons2.MemoryAllocationException;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.MemqPooledByteBufAllocator;
import com.pinterest.memq.client.commons2.network.ClosedConnectionException;
import com.pinterest.memq.client.commons2.retry.RetryStrategy;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.core.utils.MemqUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
import java.time.Duration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.CRC32;
/**
* Request class is responsible for managing the lifecycle of a request.
*
* At a high level, it handles the following:
* 1. ByteBuf allocation - it allocates a large ByteBuf of size maxPayloadSize that is sliced into 3 parts,
* each with their own read/write indices:
* - RequestPacket header
* - WriteRequestPacket header
* - Payload ByteBuf
* 2. Writing new messages to the request payload ByteBuf
* 3. Sealing the request - this is done when the request is ready to be dispatched either via size or time threshold
* 4. Dispatching the request - this is done by submitting a Dispatch task to the dispatcher executor
* 5. Handling the response - this is done by the Dispatch task, which handles the response and resolves the result future
* 6. Retrying the request - if the request fails due to a closed connection, it will retry the request based on the retry strategy
* 7. Releasing resources - it releases the ByteBuf and request count + inflight memory permits when the request is done
*/
public class Request {
private static final Logger logger = LoggerFactory.getLogger(Request.class);
private final ExecutorService dispatcher;
private final ScheduledExecutorService scheduler;
private final MemqCommonClient client;
private final RequestManager requestManager;
private final String topic;
private final int clientRequestId;
private final int maxRequestSize;
private final int lingerMs;
private final long sendRequestTimeoutMs;
private final RetryStrategy retryStrategy;
private final boolean disableAcks;
private final Compression compression;
private final AtomicBoolean available = new AtomicBoolean(true);
private final AtomicInteger activeWrites = new AtomicInteger(0);
private final CompletableFuture<MemqWriteResult> resultFuture = new CompletableFuture<>();
private final MetricRegistry metricRegistry;
private volatile Future<?> timeDispatchTask;
private volatile long startTime = System.currentTimeMillis();
private volatile boolean dispatching = false;
private final ByteBuf largeByteBuf;
private final ByteBuf requestPacketHeaderByteBuf;
private final ByteBuf writeRequestPacketHeaderByteBuf;
private final ByteBuf payloadByteBuf;
private OutputStream outputStream;
private byte[] messageIdHash;
private int messageCount;
private MemqMessageHeader header = new MemqMessageHeader(this);
private Counter sentBytesCounter;
private Counter ackedBytesCounter;
private Timer sendTimer;
private Timer requestWriteTimer;
private Timer dispatchTimer;
private Counter successCounter;
private Timer responseTimer;
private Timer ackTimer;
private RequestPacket requestPacket;
public Request(ExecutorService dispatcher,
ScheduledExecutorService scheduler,
MemqCommonClient client,
RequestManager requestManager,
Semaphore requestCountPermits,
Semaphore inflightMemoryPermits,
String topic,
int clientRequestId,
int maxPayloadSize,
int lingerMs,
int maxBlockMs,
long sendRequestTimeoutMs,
RetryStrategy retryStrategy,
boolean disableAcks,
Compression compression,
MetricRegistry metricRegistry) throws IOException, MemoryAllocationException {
this.dispatcher = dispatcher;
this.scheduler = scheduler;
this.client = client;
this.requestManager = requestManager;
this.topic = topic;
this.clientRequestId = clientRequestId;
this.maxRequestSize = maxPayloadSize;
this.lingerMs = lingerMs;
this.sendRequestTimeoutMs = sendRequestTimeoutMs;
this.retryStrategy = retryStrategy;
this.disableAcks = disableAcks;
this.compression = compression;
this.metricRegistry = metricRegistry;
int bufferCapacity = getByteBufCapacity(maxRequestSize, compression);
largeByteBuf = MemqPooledByteBufAllocator.buffer(bufferCapacity, bufferCapacity, maxBlockMs);
requestPacketHeaderByteBuf = largeByteBuf.retainedSlice(0, RequestPacket.getHeaderSize());
writeRequestPacketHeaderByteBuf = largeByteBuf.retainedSlice(RequestPacket.getHeaderSize(), WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, topic));
payloadByteBuf = largeByteBuf.retainedSlice(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, topic), bufferCapacity - requestPacketHeaderByteBuf.readableBytes() - writeRequestPacketHeaderByteBuf.readableBytes());
requestPacketHeaderByteBuf.resetWriterIndex();
writeRequestPacketHeaderByteBuf.resetWriterIndex();
payloadByteBuf.resetWriterIndex();
try {
initializeOutputStream();
} catch (IOException ioe) {
// release bytebuf if exception happened to avoid bytebuf leaks
largeByteBuf.release();
throw ioe;
}
initializeMetrics();
scheduleTimeBasedDispatch();
// release request lock once the request is done
resultFuture.handle((r, t) -> {
requestCountPermits.release();
inflightMemoryPermits.release(maxRequestSize);
return null;
});
}
private void initializeMetrics() {
sentBytesCounter = metricRegistry.counter("requests.sent.bytes");
ackedBytesCounter = metricRegistry.counter("requests.acked.bytes");
responseTimer = MiscUtils.oneMinuteWindowTimer(metricRegistry, "requests.response.time");
ackTimer = MiscUtils.oneMinuteWindowTimer(metricRegistry, "requests.acked.time");
successCounter = metricRegistry.counter("requests.success.count");
requestWriteTimer = metricRegistry.timer("requests.write.time");
sendTimer = MiscUtils.oneMinuteWindowTimer(metricRegistry, "requests.send.time");
dispatchTimer = MiscUtils.oneMinuteWindowTimer(metricRegistry, "requests.dispatch.time");
}
private void initializeOutputStream() throws IOException {
OutputStream stream = new ByteBufOutputStream(payloadByteBuf);
int headerLength = MemqMessageHeader.getHeaderLength();
stream.write(new byte[headerLength]);
if (compression != null) {
outputStream = compression.getDecompressStream(stream);
} else {
outputStream = Compression.NONE.getDecompressStream(stream);
}
}
private int getByteBufCapacity(int maxRequestSize, Compression compression) {
return Math.max(maxRequestSize, compression.minBufferSize);
}
protected void scheduleTimeBasedDispatch() {
if (lingerMs == 0) {
return;
}
if (timeDispatchTask != null) {
timeDispatchTask.cancel(true);
}
timeDispatchTask = scheduler.schedule(() -> {
if (!Thread.interrupted()) {
if (System.currentTimeMillis() - startTime >= lingerMs) {
// if seal() returns true, the payload was sealed due to time threshold, so we should try to dispatch
// if it was false, it means that a write has been initiated and sealed the payload, so the dispatching is on that write
synchronized (this) {
if (seal() && isReadyToUpload()) {
tryDispatch();
}
}
}
}
}, lingerMs, TimeUnit.MILLISECONDS);
}
public Future<MemqWriteResult> write(RawRecord record) throws IOException {
int payloadSize = record.calculateEncodedLogMessageLength();
activeWrites.getAndIncrement();
try {
if (!isAvailable()) {
return null;
}
// synchronized to ensure bytebuf doesn't get out-of-order writes
synchronized (payloadByteBuf) {
if (payloadSize > payloadByteBuf.writableBytes()) {
seal();
return null;
}
try (Timer.Context ctx = requestWriteTimer.time()) {
writeMemqLogMessage(record);
} finally {
record.recycle();
}
}
if (lingerMs == 0) {
seal();
}
return resultFuture;
} finally {
activeWrites.decrementAndGet();
// In general, the last write needs to seal the request (close the door) and dispatch, unless the linger threshold was breached
// 1. if the request is still available, it means that the dispatch criteria hasn't been met, so we don't dispatch
// 2. if the request is not available (to write), but the batch is not ready to upload,
// it means that there is still an active write happening
// 3. if the request is not available (to write) and there are no active writes after this current write,
// we can try to dispatch. tryDispatch might be invoked by the time dispatch task concurrently, and only one will
// proceed
if (!isAvailable() && isReadyToUpload()) {
tryDispatch();
}
}
}
// true if there are no active writes on this request
protected boolean isReadyToUpload() {
return activeWrites.get() == 0;
}
protected void tryDispatch() {
if (!dispatching) {
synchronized (this) {
if (!dispatching) {
dispatching = true;
dispatch();
}
}
}
}
public void dispatch() {
try {
outputStream.close();
} catch (IOException e) {
logger.warn("Failed to close output stream: ", e);
}
try {
header.writeHeader(payloadByteBuf);
int payloadSizeBytes = payloadByteBuf.readableBytes();
if (payloadSizeBytes == 0) { // don't upload 0 byte payloads
resultFuture.complete(new MemqWriteResult(clientRequestId, 0, 0, 0));
return;
}
requestPacket = createWriteRequestPacket(payloadByteBuf.asReadOnly().retainedDuplicate());
dispatcher.submit(new Dispatch(payloadSizeBytes));
timeDispatchTask.cancel(true);
} finally {
payloadByteBuf.release();
}
}
/**
* Given the payload ByteBuf, this method returns a RequestPacket that can be sent to the broker
* via the NetworkClient.
*
* The RequestPacket's payload is constructed by stitching together the 3 sliced ByteBufs which were derived from
* a large ByteBuf during Request creation. This single CompositeByteBuf is the overall payload of the RequestPacket which
* will be sent to the broker.
*
* @param payload the payloadByteBuf
* @return RequestPacket that can be sent to the broker
*/
public RequestPacket createWriteRequestPacket(ByteBuf payload) {
CRC32 crc32 = new CRC32();
crc32.update(payload.duplicate().nioBuffer());
int checksum = (int) crc32.getValue();
WriteRequestPacket writeRequestPacket = new WriteRequestPacket(disableAcks,
topic.getBytes(), true, checksum, payload.duplicate());
writeRequestPacket.writeHeader(writeRequestPacketHeaderByteBuf, RequestType.PROTOCOL_VERSION);
RequestPacket requestPacket = new RequestPacket(RequestType.PROTOCOL_VERSION, clientRequestId, RequestType.WRITE,
writeRequestPacket);
requestPacket.writeHeader(requestPacketHeaderByteBuf, RequestType.PROTOCOL_VERSION);
CompositeByteBuf finalCompositeByteBuf = PooledByteBufAllocator.DEFAULT.compositeBuffer();
finalCompositeByteBuf.addComponent(true, requestPacketHeaderByteBuf);
finalCompositeByteBuf.addComponent(true, writeRequestPacketHeaderByteBuf);
finalCompositeByteBuf.addComponent(true, payloadByteBuf);
requestPacket.setPreAllocOutBuf(finalCompositeByteBuf);
return requestPacket;
}
public boolean isAvailable() {
return available.get();
}
/**
*
* @return true if sealed by this call
*/
public boolean seal() {
return available.getAndSet(false);
}
public void flush() {
if (this.seal() && isReadyToUpload()) { // the flush is the initiator of the dispatch
this.tryDispatch();
}
}
public void writeMemqLogMessage(RawRecord record) throws IOException {
record.writeToOutputStream(outputStream);
// record the messageId
addMessageId(record.getMessageIdBytes());
messageCount++;
}
protected void addMessageId(byte[] messageIdBytes) {
if (messageIdBytes == null) {
return;
}
messageIdHash = MemqUtils.calculateMessageIdHash(messageIdHash, messageIdBytes);
}
public short getVersion() {
return 1_0_0;
}
public Compression getCompression() {
return compression;
}
public int getMessageCount() {
return messageCount;
}
public long getEpoch() {
return requestManager.getProducer() != null ? requestManager.getProducer().getEpoch() : System.currentTimeMillis();
}
public int getClientRequestId() {
return clientRequestId;
}
private void release(RequestPacket requestPacket) {
try {
requestPacket.release();
} catch (IOException e) {
logger.error("Failed to release request packet", e);
}
}
protected class Dispatch implements Runnable {
private final int payloadSizeBytes;
private final int attempts;
private final int redirects;
private final long dispatchTimeoutMs;
private final long dispatchTimestamp = System.currentTimeMillis();
private long writeTimestamp;
private int writeLatency;
public Dispatch(int payloadSizeBytes) {
this.payloadSizeBytes = payloadSizeBytes;
this.attempts = 0;
this.redirects = 0;
this.dispatchTimeoutMs = sendRequestTimeoutMs;
}
protected Dispatch(int payloadSizeBytes, int attempts, int redirects,
long requestDeadline) {
this.payloadSizeBytes = payloadSizeBytes;
this.attempts = attempts;
this.redirects = redirects;
this.dispatchTimeoutMs = requestDeadline - dispatchTimestamp;
}
@Override
public void run() {
if (dispatchTimeoutMs < 0) {
release(requestPacket);
resolve(new TimeoutException("Request timed out before retry: " + attempts));
return;
}
sentBytesCounter.inc(payloadSizeBytes);
Timer.Context dispatchTime = dispatchTimer.time();
try {
writeTimestamp = System.currentTimeMillis();
Timer.Context sendTime = sendTimer.time();
Timer.Context responseTime = responseTimer.time();
Timer.Context ackTime = ackTimer.time();
requestPacket.getPreAllocOutBuf().retain();
CompletableFuture<ResponsePacket> response = client.sendRequestPacketAndReturnResponseFuture(requestPacket, topic, dispatchTimeoutMs);
sendTime.stop();
writeLatency = (int) (System.currentTimeMillis() - writeTimestamp);
response
.whenCompleteAsync((responsePacket, throwable) -> {
try {
if (throwable != null) {
handleException(throwable);
} else {
handleResponse(responsePacket, responseTime, ackTime);
}
} finally {
try {
requestPacket.release();
if (responsePacket != null) {
responsePacket.release();
}
} catch (IOException e) {
logger.warn("Failed to release packets", e);
}
}
}, dispatcher);
} catch (Exception e) {
try {
requestPacket.release();
} catch (IOException ioe) {
logger.warn("Failed to release packets", ioe);
}
logger.error("Failed to send request " + clientRequestId, e);
resolve(e);
} finally {
dispatchTime.stop();
}
}
protected void handleException(Throwable throwable) {
if (throwable instanceof ClosedConnectionException) {
Duration nextRetryIntervalDuration = retryStrategy.calculateNextRetryInterval(attempts); // if the next interval is invalid, fail the result future
if (nextRetryIntervalDuration == null || dispatchTimeoutMs <= nextRetryIntervalDuration.toMillis()) {
resolve(new TimeoutException("Request timed out after " + sendRequestTimeoutMs + " ms and " + attempts + " retries : " + throwable.getMessage()));
} else {
logger.warn(throwable.getMessage() + ", retrying request " + clientRequestId + " after " + nextRetryIntervalDuration.toMillis() + " ms");
requestPacket.retry(); // retain the bytebuf since the finally clause in this Dispatch will release the local refCnt
try {
scheduler.schedule(() -> {
try {
dispatcher.submit(new Dispatch(payloadSizeBytes, attempts + 1, redirects, dispatchTimeoutMs + dispatchTimestamp));
} catch (Exception e) {
release(requestPacket);
resolve(e);
}
}, nextRetryIntervalDuration.toMillis(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
release(requestPacket);
resolve(e);
}
}
} else if (throwable instanceof Exception) {
Exception resultException = (Exception) throwable;
while (resultException instanceof ExecutionException && resultException.getCause() instanceof Exception) {
resultException = (Exception) resultException.getCause();
}
resolve(resultException);
} else {
logger.error("Failed to send request " + clientRequestId, throwable);
resolve(throwable);
}
}
protected void handleResponse(ResponsePacket responsePacket, Timer.Context responseTime, Timer.Context ackTime) {
short responseCode = responsePacket.getResponseCode();
responseTime.stop();
switch (responseCode) {
case ResponseCodes.OK:
ackedBytesCounter.inc(payloadSizeBytes);
sendAuditMessageIfAuditEnabled();
int ackLatency = (int) (System.currentTimeMillis() - writeTimestamp);
ackTime.stop();
logger.debug("Request acked in:" + ackLatency + " " + clientRequestId);
resolve(new MemqWriteResult(clientRequestId, writeLatency, ackLatency, payloadSizeBytes));
break;
case ResponseCodes.REDIRECT:
if (redirects > 1) {
resolve(new Exception("Write request failed after multiple attempts"));
return;
}
try {
client.reconnect(topic, false);
} catch (Exception e) {
resolve(e);
return;
}
try {
requestPacket.retry();
dispatcher.submit(
new Dispatch(
payloadSizeBytes,
attempts,
redirects + 1,
dispatchTimeoutMs + dispatchTimestamp
)
);
} catch (Exception e) {
logger.error("Error: ", e);
}
break;
case ResponseCodes.BAD_REQUEST:
resolve(new Exception("Bad request, id: " + clientRequestId));
break;
case ResponseCodes.NOT_FOUND:
resolve(new Exception("Topic not found: " + topic));
break;
case ResponseCodes.INTERNAL_SERVER_ERROR:
resolve(new Exception("Unknown server error: " + clientRequestId));
break;
case ResponseCodes.REQUEST_FAILED:
resolve(new Exception("Request failed: " + clientRequestId));
break;
case ResponseCodes.SERVICE_UNAVAILABLE:
resolve(new Exception("Server out of capacity: " + topic));
break;
default:
resolve(new Exception("Unknown response code: " + responseCode));
break;
}
}
private void sendAuditMessageIfAuditEnabled() {
Auditor auditor = requestManager.getProducer().getAuditor();
if (auditor != null) {
MemqProducer<?, ?> producer = requestManager.getProducer();
try {
auditor.auditMessage(producer.getCluster().getBytes(MemqUtils.CHARSET),
topic.getBytes(MemqUtils.CHARSET), MemqUtils.HOST_IPV4_ADDRESS,
getEpoch(), clientRequestId, messageIdHash, messageCount, true, "producer");
} catch (IOException e) {
logger.error("Failed to log audit record for topic:" + topic, e);
}
}
}
private void resolve(MemqWriteResult writeResult) {
resultFuture.complete(writeResult);
successCounter.inc();
largeByteBuf.release();
}
private void resolve(Throwable e) {
resultFuture.completeExceptionally(e);
largeByteBuf.release();
}
public long getDeadline() {
return dispatchTimeoutMs + dispatchTimeoutMs;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer2/MemqProducer.java | memq-client/src/main/java/com/pinterest/memq/client/producer2/MemqProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.audit.Auditor;
import com.pinterest.memq.client.commons.audit.KafkaBackedAuditor;
import com.pinterest.memq.client.commons.serde.Serializer;
import com.pinterest.memq.client.commons2.Endpoint;
import com.pinterest.memq.client.commons2.MemoryAllocationException;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.retry.FullJitterRetryStrategy;
import com.pinterest.memq.client.commons2.retry.RetryStrategy;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.commons.MessageId;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.NoopMetricRegistry;
import com.codahale.metrics.Timer;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Future;
public class MemqProducer<K, V> implements Closeable {
protected static final Map<String, MemqProducer<?, ?>> clientMap = new HashMap<>();
private static final Logger logger = LoggerFactory.getLogger(MemqProducer.class);
private final String cluster;
private final int maxPayloadBytes;
private final Serializer<K> keySerializer;
private final Serializer<V> valueSerializer;
private final long epoch = System.currentTimeMillis();
private final MemqCommonClient client;
private final RequestManager requestManager;
private final Auditor auditor;
private final MetricRegistry metricRegistry;
private Counter writeTooLargeMessagesCounter;
private Histogram writeMessageSizeHistogram;
private Counter writeMessageCounter;
private Timer writeTimer;
protected MemqProducer(String cluster,
String topic,
String locality,
List<Endpoint> bootstrapEndpoints,
Serializer<K> keySerializer,
Serializer<V> valueSerializer,
Properties auditProperties,
Properties networkProperties,
SSLConfig sslConfig,
long sendRequestTimeout,
RetryStrategy retryStrategy,
int maxPayloadBytes,
int lingerMs,
int maxBlockMs,
int maxInflightRequestsMemoryBytes,
int maxInflightRequests,
Compression compression,
boolean disableAcks,
MetricRegistry metricRegistry,
MemqCommonClient memqCommonClient) throws Exception {
this.cluster = cluster;
this.maxPayloadBytes = maxPayloadBytes;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.metricRegistry = metricRegistry;
try {
if (memqCommonClient != null) {
this.client = memqCommonClient;
} else {
this.client = new MemqCommonClient(locality, sslConfig, networkProperties);
}
this.requestManager =
new RequestManager(client, topic, this, sendRequestTimeout, retryStrategy, maxPayloadBytes, lingerMs, maxBlockMs, maxInflightRequestsMemoryBytes, maxInflightRequests, compression, disableAcks, metricRegistry);
if (auditProperties != null) {
String
auditorClass =
auditProperties.getProperty("class", KafkaBackedAuditor.class.getCanonicalName());
this.auditor = Class.forName(auditorClass).asSubclass(Auditor.class).newInstance();
this.auditor.init(auditProperties);
} else {
this.auditor = null;
}
initializeMetrics();
initializeTopicConnection(bootstrapEndpoints, topic);
} catch (Exception e) {
close();
throw e;
}
}
private void initializeMetrics() {
writeTooLargeMessagesCounter = metricRegistry.counter("producer.write.too_large_messages");
writeMessageCounter = metricRegistry.counter("producer.write.message");
writeMessageSizeHistogram = metricRegistry.histogram("producer.write.message.size");
writeTimer = metricRegistry.timer("producer.write.time");
}
private void initializeTopicConnection(List<Endpoint> bootstrapEndpoints, String topic) throws Exception {
client.initialize(bootstrapEndpoints);
TopicMetadata topicMetadata = client.getTopicMetadata(topic);
Set<Broker> brokers = topicMetadata.getWriteBrokers();
logger.debug("Fetched topic metadata, now reconnecting to one of the serving brokers:" + brokers);
client.resetEndpoints(MemqCommonClient.generateEndpointsFromBrokers(brokers));
}
/**
* @param key
* @param value
* @return
* @throws IOException
*/
public Future<MemqWriteResult> write(K key,
V value) throws IOException, MemoryAllocationException {
return write(null, null, key, value, System.currentTimeMillis());
}
public Future<MemqWriteResult> write(K key,
V value,
long writeTimestamp) throws IOException, MemoryAllocationException {
return write(null, null, key, value, writeTimestamp);
}
public Future<MemqWriteResult> write(MessageId messageId,
K key,
V value,
long writeTimestamp) throws IOException, MemoryAllocationException {
return write(messageId, null, key, value, writeTimestamp);
}
public Future<MemqWriteResult> write(MessageId messageId,
K key,
V value) throws IOException, MemoryAllocationException {
return write(messageId, null, key, value, System.currentTimeMillis());
}
public Future<MemqWriteResult> write(Map<String, byte[]> headers,
K key,
V value) throws IOException, MemoryAllocationException {
return write(null, headers, key, value, System.currentTimeMillis());
}
// returns null if the record is dropped
public Future<MemqWriteResult> write(MessageId messageId,
Map<String, byte[]> headers,
K key,
V value,
long writeTimestamp) throws IOException, MemoryAllocationException {
byte[] keyBytes = keySerializer.serialize(key);
byte[] valueBytes = valueSerializer.serialize(value);
RawRecord record = RawRecord.newInstance(messageId, headers, keyBytes, valueBytes, writeTimestamp);
int encodedMessageLength = record.calculateEncodedLogMessageLength();
if (encodedMessageLength > maxPayloadBytes - MemqMessageHeader.getHeaderLength()) {
writeTooLargeMessagesCounter.inc();
return null;
}
try(Timer.Context ctx = writeTimer.time()) {
writeMessageCounter.inc();
writeMessageSizeHistogram.update(encodedMessageLength);
return requestManager.write(record);
}
}
@VisibleForTesting
protected List<Endpoint> getWriteEndpoints() {
return client.currentWriteEndpoints();
}
public MetricRegistry getMetricRegistry() {
return metricRegistry;
}
public void flush() {
requestManager.flush();
}
@Override
public void close() throws IOException {
if (requestManager != null) {
requestManager.close();
}
if (client != null) {
client.close();
}
if (auditor != null) {
auditor.close();
}
}
@VisibleForTesting
protected int getRequestCountAvailablePermits() {
return requestManager.getRequestCountAvailablePermits();
}
@VisibleForTesting
protected int getInflightMemoryAvailablePermits() {
return requestManager.getInflightMemoryAvailablePermits();
}
public String getCluster() {
return cluster;
}
public long getEpoch() {
return epoch;
}
public Auditor getAuditor() {
return auditor;
}
public static class Builder<K, V> {
private String cluster;
private String topic;
private String locality = Endpoint.DEFAULT_LOCALITY;
private String serversetFile;
private String bootstrapServers;
private Serializer<K> keySerializer;
private Serializer<V> valueSerializer;
private Properties auditProperties = null;
private Properties networkProperties = null;
private SSLConfig sslConfig = null;
private long sendRequestTimeout = 5000;
private RetryStrategy retryStrategy = null;
private int maxPayloadBytes = 1024 * 1024; // 1 MB
private int lingerMs = 10;
private int maxInflightRequests = 30;
private Compression compression = Compression.ZSTD;
private boolean disableAcks = false;
private MetricRegistry metricRegistry = null;
private MemqCommonClient client = null;
private boolean memoize = false;
private int maxBlockMs;
private int maxInflightRequestsMemoryBytes = 32 * 1024 * 1024; // 32 MB
public Builder() {
}
public Builder(Builder<K, V> builder) {
cluster = builder.cluster;
topic = builder.topic;
locality = builder.locality;
serversetFile = builder.serversetFile;
bootstrapServers = builder.bootstrapServers;
keySerializer = builder.keySerializer;
valueSerializer = builder.valueSerializer;
auditProperties = builder.auditProperties;
networkProperties = builder.networkProperties;
sslConfig = builder.sslConfig;
sendRequestTimeout = builder.sendRequestTimeout;
retryStrategy = builder.retryStrategy;
maxPayloadBytes = builder.maxPayloadBytes;
lingerMs = builder.lingerMs;
maxBlockMs = builder.maxBlockMs;
maxInflightRequestsMemoryBytes = builder.maxInflightRequestsMemoryBytes;
maxInflightRequests = builder.maxPayloadBytes;
compression = builder.compression;
disableAcks = builder.disableAcks;
metricRegistry = builder.metricRegistry;
client = builder.client;
memoize = builder.memoize;
}
public Builder<K, V> cluster(String cluster) {
this.cluster = cluster;
return this;
}
public Builder<K, V> topic(String topic) {
this.topic = topic;
return this;
}
public Builder<K, V> locality(String locality) {
this.locality = locality;
return this;
}
public Builder<K, V> keySerializer(Serializer<K> keySerializer) {
this.keySerializer = keySerializer;
return this;
}
public Builder<K, V> valueSerializer(Serializer<V> valueSerializer) {
this.valueSerializer = valueSerializer;
return this;
}
public Builder<K, V> serversetFile(String serversetFile) {
this.serversetFile = serversetFile;
return this;
}
public Builder<K, V> bootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
return this;
}
public Builder<K, V> auditProperties(Properties auditProperties) {
this.auditProperties = auditProperties;
return this;
}
public Builder<K, V> networkProperties(Properties networkProperties) {
this.networkProperties = networkProperties;
return this;
}
public Builder<K, V> sslConfig(SSLConfig sslConfig) {
this.sslConfig = sslConfig;
return this;
}
public Builder<K, V> sendRequestTimeout(long sendRequestTimeout) {
this.sendRequestTimeout = sendRequestTimeout;
return this;
}
public Builder<K, V> retryStrategy(RetryStrategy retryStrategy) {
this.retryStrategy = retryStrategy;
return this;
}
public Builder<K, V> maxPayloadBytes(int maxPayloadBytes) {
this.maxPayloadBytes = maxPayloadBytes;
return this;
}
public Builder<K, V> lingerMs(int lingerMs) {
this.lingerMs = lingerMs;
return this;
}
public Builder<K, V> maxBlockMs(int maxBlockMs) {
this.maxBlockMs = maxBlockMs;
return this;
}
public Builder<K, V> maxInflightRequestsMemoryBytes(int maxInflightRequestsMemoryBytes) {
this.maxInflightRequestsMemoryBytes = maxInflightRequestsMemoryBytes;
return this;
}
public Builder<K, V> maxInflightRequests(int maxInflightRequests) {
this.maxInflightRequests = maxInflightRequests;
return this;
}
public Builder<K, V> compression(Compression compression) {
this.compression = compression;
return this;
}
public Builder<K, V> disableAcks(boolean disableAcks) {
this.disableAcks = disableAcks;
return this;
}
public Builder<K, V> metricRegistry(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
return this;
}
@VisibleForTesting
protected Builder<K, V> injectClient(MemqCommonClient client) {
this.client = client;
return this;
}
@SuppressWarnings("unchecked")
public MemqProducer<K, V> build() throws Exception {
if (memoize) {
MemqProducer<K, V> ret;
synchronized (clientMap) {
String key = cluster + "/" + topic;
ret = (MemqProducer<K, V>) clientMap.get(key);
if (ret == null || ret.client.isClosed()) {
ret = create();
clientMap.put(key, ret);
}
}
return ret;
}
return create();
}
private MemqProducer<K, V> create() throws Exception {
validate();
List<Endpoint> bootstrapEndpoints = generateEndpoints();
if (retryStrategy == null) {
retryStrategy = new FullJitterRetryStrategy();
}
if (metricRegistry == null) {
metricRegistry = new NoopMetricRegistry();
}
return new MemqProducer<>(
cluster,
topic,
locality,
bootstrapEndpoints,
keySerializer,
valueSerializer,
auditProperties,
networkProperties,
sslConfig,
sendRequestTimeout,
retryStrategy,
maxPayloadBytes,
lingerMs,
maxBlockMs,
maxInflightRequestsMemoryBytes,
maxInflightRequests,
compression,
disableAcks,
metricRegistry,
client);
}
private void validate() throws Exception {
if (cluster == null) {
throw new Exception("cluster is null");
}
if (topic == null) {
throw new Exception("topic is null");
}
if (serversetFile == null && bootstrapServers == null) {
throw new Exception("serversetFile and bootstrapServers cannot be both null");
}
if (keySerializer == null) {
throw new Exception("keySerializer is null");
}
if (valueSerializer == null) {
throw new Exception("valueSerializer is null");
}
}
private List<Endpoint> generateEndpoints() throws Exception {
List<Endpoint> endpoints;
if (serversetFile != null) {
return MemqCommonClient.parseServersetFile(serversetFile);
} else {
endpoints = MemqCommonClient.getEndpointsFromBootstrapServerString(bootstrapServers);
}
return endpoints;
}
public Builder<K, V> memoize() {
this.memoize = true;
return this;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/MemqProducer.java | memq-client/src/main/java/com/pinterest/memq/client/producer/MemqProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer;
import static com.pinterest.memq.client.commons.CommonConfigs.AUDITOR_ENABLED;
import static com.pinterest.memq.client.commons.CommonConfigs.BOOTSTRAP_SERVERS;
import static com.pinterest.memq.client.commons.CommonConfigs.CLUSTER;
import static com.pinterest.memq.client.commons.CommonConfigs.SERVERSET_FILE;
import static com.pinterest.memq.client.commons.ProducerConfigs.CLIENT_LOCALITY;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_ACK_CHECKPOLLINTERVAL_MS;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_COMPRESSION_TYPE;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_DISABLE_ACKS;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_LOCALITY;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_MAX_INFLIGHT_REQUESTS;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_MAX_PAYLOADBYTES;
import static com.pinterest.memq.client.commons.ProducerConfigs.DEFAULT_REQUEST_ACKS_TIMEOUT_MS;
import static com.pinterest.memq.client.commons.ProducerConfigs.KEY_SERIALIZER;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_ACKS_CHECKPOLLINTERVAL_MS;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_ACKS_DISABLE;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_ACKS_TIMEOUT_MS;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_COMPRESSION_TYPE;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_MAX_INFLIGHTREQUESTS;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_MAX_PAYLOADBYTES;
import static com.pinterest.memq.client.commons.ProducerConfigs.REQUEST_TIMEOUT;
import static com.pinterest.memq.client.commons.ProducerConfigs.TOPIC_NAME;
import static com.pinterest.memq.client.commons.ProducerConfigs.VALUE_SERIALIZER;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pinterest.memq.client.commons.AuditorUtils;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqCommonClient;
import com.pinterest.memq.client.commons.audit.Auditor;
import com.pinterest.memq.client.commons.audit.KafkaBackedAuditor;
import com.pinterest.memq.client.commons.serde.Serializer;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.commons.MessageId;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.TopicMetadata;
public abstract class MemqProducer<K, V> {
private static final Logger LOG = LoggerFactory.getLogger(MemqProducer.class);
public static final int PAYLOADHEADER_BYTES = 8;
@SuppressWarnings("rawtypes")
private static Map<String, MemqProducer> clientMap = new ConcurrentHashMap<>();
public static synchronized <K, V> MemqProducer<K, V> getInstance(Properties properties) throws Exception {
return getInstance(properties, false);
}
@SuppressWarnings("unchecked")
public static synchronized <K, V> MemqProducer<K, V> getInstance(Properties properties,
boolean disableCache) throws Exception {
String serversetFile = properties.getProperty(SERVERSET_FILE);
String topicName = properties.getProperty(TOPIC_NAME);
String cluster = properties.getProperty(CLUSTER);
int maxInflightRequest = Integer.parseInt(
properties.getProperty(REQUEST_MAX_INFLIGHTREQUESTS, DEFAULT_MAX_INFLIGHT_REQUESTS));
int maxPayLoadBytes = Integer
.parseInt(properties.getProperty(REQUEST_MAX_PAYLOADBYTES, DEFAULT_MAX_PAYLOADBYTES));
Compression compression = Compression
.valueOf(properties.getProperty(REQUEST_COMPRESSION_TYPE, DEFAULT_COMPRESSION_TYPE));
boolean disableAcks = Boolean
.parseBoolean(properties.getProperty(REQUEST_ACKS_DISABLE, DEFAULT_DISABLE_ACKS));
int ackCheckPollInterval = Integer.parseInt(properties
.getProperty(REQUEST_ACKS_CHECKPOLLINTERVAL_MS, DEFAULT_ACK_CHECKPOLLINTERVAL_MS));
int requestTimeout = Integer
.parseInt(properties.getProperty(REQUEST_TIMEOUT, DEFAULT_REQUEST_ACKS_TIMEOUT_MS));
int requestAckTimeout = Integer
.parseInt(properties.getProperty(REQUEST_ACKS_TIMEOUT_MS, DEFAULT_REQUEST_ACKS_TIMEOUT_MS));
String locality = properties.getProperty(CLIENT_LOCALITY, DEFAULT_LOCALITY);
String bootstrapServers = properties.getProperty(BOOTSTRAP_SERVERS);
Serializer<K> keySerializer;
Serializer<V> valueSerializer;
try {
keySerializer = Class.forName(properties.getProperty(KEY_SERIALIZER))
.asSubclass(Serializer.class).newInstance();
valueSerializer = Class.forName(properties.getProperty(VALUE_SERIALIZER))
.asSubclass(Serializer.class).newInstance();
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw new Exception("Failed to initialize serializer", e);
}
Properties auditConfig = null;
boolean auditEnabled = Boolean.parseBoolean(properties.getProperty(AUDITOR_ENABLED, "false"));
if (auditEnabled) {
auditConfig = AuditorUtils.extractAuditorConfig(properties);
}
SSLConfig sslConfigs = null;
String clientId = serversetFile + "/" + topicName;
MemqProducer<K, V> memqClient = clientMap.get(clientId);
if (memqClient == null || memqClient.isClosed()) {
if (serversetFile!=null) {
memqClient = new MemqNettyProducer<>(cluster, serversetFile, topicName, maxInflightRequest,
maxPayLoadBytes, compression, disableAcks, ackCheckPollInterval, requestTimeout, locality,
requestAckTimeout, auditConfig, sslConfigs);
}else if (bootstrapServers !=null){
Set<Broker> bootstrapBrokers = MemqCommonClient.getBootstrapBrokers(bootstrapServers);
memqClient = new MemqNettyProducer<>(cluster, bootstrapBrokers, topicName, maxInflightRequest,
maxPayLoadBytes, compression, disableAcks, ackCheckPollInterval, requestTimeout, locality,
requestAckTimeout, auditConfig, sslConfigs);
}
memqClient.keySerializer = keySerializer;
memqClient.valueSerializer = valueSerializer;
if (!disableCache) {
clientMap.put(clientId, memqClient);
}
}
return memqClient;
}
protected abstract boolean isClosed();
protected long epoch = System.currentTimeMillis();
protected Serializer<K> keySerializer;
protected Serializer<V> valueSerializer;
protected Map<Long, TaskRequest> requestMap;
protected int maxPayLoadBytes;
protected AtomicLong currentRequestId;
protected Future<MemqWriteResult> currentRequest;
protected TaskRequest currentRequestTask;
protected int maxInflightRequests;
protected Semaphore maxRequestLock;
protected Compression compression;
protected int ackCheckPollIntervalMs;
protected boolean disableAcks;
protected String topicName;
protected String locality;
protected int requestAckTimeout;
protected Auditor auditor;
protected String cluster;
protected SSLConfig sslConfig;
public MemqProducer(String cluster,
String topicName,
int maxInflightRequest,
int maxPayLoadBytes,
Compression compression,
boolean disableAcks,
int ackCheckPollIntervalMs,
String locality,
int requestAckTimeout,
Properties auditorConfig,
SSLConfig sslConfig) throws Exception {
this.cluster = cluster;
this.locality = locality;
this.requestAckTimeout = requestAckTimeout;
this.sslConfig = sslConfig;
this.requestMap = new ConcurrentHashMap<>();
this.maxInflightRequests = maxInflightRequest;
this.topicName = topicName;
this.compression = compression;
this.disableAcks = disableAcks;
this.ackCheckPollIntervalMs = ackCheckPollIntervalMs;
this.currentRequestId = new AtomicLong(0L);
this.maxRequestLock = new Semaphore(maxInflightRequests);
this.maxPayLoadBytes = maxPayLoadBytes;
if (auditorConfig != null) {
String auditorClass = auditorConfig.getProperty("class",
KafkaBackedAuditor.class.getCanonicalName());
this.auditor = Class.forName(auditorClass).asSubclass(Auditor.class).newInstance();
this.auditor.init(auditorConfig);
}
}
public synchronized Future<MemqWriteResult> writeToTopic(K key, V value) throws IOException {
return writeToTopic(null, null, key, value, System.currentTimeMillis());
}
public synchronized Future<MemqWriteResult> writeToTopic(MessageId messageId,
K key,
V value,
long writeTimestamp) throws IOException {
return writeToTopic(messageId, null, key, value, writeTimestamp);
}
public synchronized Future<MemqWriteResult> writeToTopic(K key,
V value,
long writeTimestamp) throws IOException {
return writeToTopic(null, null, key, value, writeTimestamp);
}
public synchronized Future<MemqWriteResult> writeToTopic(MessageId messageId,
K key,
V value) throws IOException {
return writeToTopic(messageId, null, key, value, System.currentTimeMillis());
}
public synchronized Future<MemqWriteResult> writeToTopic(Map<String, byte[]> headers,
K key,
V value) throws IOException {
return writeToTopic(null, headers, key, value, System.currentTimeMillis());
}
public synchronized Future<MemqWriteResult> writeToTopic(MessageId messageId,
Map<String, byte[]> headers,
K key,
V value,
long writeTimestamp) throws IOException {
byte[] keyBytes = keySerializer.serialize(key);
byte[] valueBytes = valueSerializer.serialize(value);
byte[] headerBytes = serializeHeadersToByteArray(headers);
int totalPayloadLength = calculateTotalPayloadLength(messageId, keyBytes, valueBytes,
headerBytes);
if (totalPayloadLength > getMaxMessageSize()) {
// drop data
return null;
}
TaskRequest request = warmAndGetRequestEntry(getCurrentRequestId().get());
if (getCurrentRequestTask().remaining() > totalPayloadLength) {
// note compression estimation isn't used here so we may be leaving unused bytes
writeMemqLogMessage(messageId, headerBytes, keyBytes, valueBytes, request, writeTimestamp);
return getCurrentRequest();
} else {
finalizeRequest();
request = warmAndGetRequestEntry(getCurrentRequestId().get());
writeMemqLogMessage(messageId, headerBytes, keyBytes, valueBytes, request, writeTimestamp);
return getCurrentRequest();
}
}
private int calculateTotalPayloadLength(MessageId messageId,
byte[] keyBytes,
byte[] valueBytes,
byte[] headerBytes) {
return valueBytes.length + (keyBytes != null ? keyBytes.length : 0) + 11// additional field
// 8+1+2
+ (messageId != null ? messageId.toByteArray().length : 0)
+ (headerBytes != null ? headerBytes.length : 0);
}
public static void writeMemqLogMessage(MessageId messageId,
byte[] headerBytes,
byte[] keyBytes,
byte[] valueBytes,
TaskRequest request,
long writeTimestamp) throws IOException {
OutputStream os = request.getOutputStream();
// #######################################
// write additional fields here in future
// #######################################
// 8 bytes for write ts
// 1 byte for messageId length
// 2 bytes for header length
ByteArrayOutputStream out = new ByteArrayOutputStream(11);
DataOutputStream str = new DataOutputStream(out);
// write timestamp
str.writeLong(writeTimestamp);
// message id
if (messageId != null) {
byte[] ary = messageId.toByteArray();
str.write((byte) ary.length);
str.write(ary);
} else {
str.write((byte) 0);
}
// encode and write user defined headers
if (headerBytes != null) {
str.writeShort(headerBytes.length);
str.write(headerBytes);
} else {
str.writeShort(0);
}
// #######################################
// write additional fields here in future
// #######################################
str.close();
byte[] byteArray = out.toByteArray();
os.write(ByteBuffer.allocate(2).putShort((short) byteArray.length).array());
os.write(byteArray);
ByteBuffer keyLength = ByteBuffer.allocate(4);
if (keyBytes != null) {
// mark keys present
keyLength.putInt(keyBytes.length);
os.write(keyLength.array());
os.write(keyBytes);
} else {
keyLength.putInt(0);
os.write(keyLength.array());
}
os.write(ByteBuffer.allocate(4).putInt(valueBytes.length).array());
os.write(valueBytes);
// record the messageId
request.addMessageId(messageId);
os.flush();
request.incrementLogMessageCount();
}
public static byte[] serializeHeadersToByteArray(Map<String, byte[]> headers) throws IOException {
if (headers != null) {
ByteArrayOutputStream out = new ByteArrayOutputStream(11);
DataOutputStream str = new DataOutputStream(out);
for (Entry<String, byte[]> entry : headers.entrySet()) {
byte[] k = entry.getKey().getBytes();
byte[] v = entry.getValue();
str.writeShort(k.length);
str.write(k);
str.writeShort(v.length);
str.write(v);
}
str.close();
return out.toByteArray();
} else {
return null;
}
}
protected abstract TaskRequest warmAndGetRequestEntry(Long requestId) throws IOException;
public synchronized TaskRequest finalizeRequest() throws IOException {
if (currentRequestTask == null) {
LOG.warn("Attempt to finalize a request when request is not initialized");
return null;
}
LOG.debug("Buffer filled:" + (double) currentRequestTask.size() / 1024 / 1024);
currentRequestTask.markReady();
currentRequestId.incrementAndGet();
return currentRequestTask;
}
public Map<Long, TaskRequest> getRequestMap() {
return requestMap;
}
public abstract void close() throws IOException;
public enum ClientType {
HTTP,
TCP
}
public TaskRequest getCurrentRequestTask() {
return currentRequestTask;
}
public Future<MemqWriteResult> getCurrentRequest() {
return currentRequest;
}
public AtomicLong getCurrentRequestId() {
return currentRequestId;
}
public Serializer<K> getKeySerializer() {
return keySerializer;
}
public void setKeySerializer(Serializer<K> keySerializer) {
this.keySerializer = keySerializer;
}
public Serializer<V> getValueSerializer() {
return valueSerializer;
}
public void setValueSerializer(Serializer<V> valueSerializer) {
this.valueSerializer = valueSerializer;
}
protected int getMaxMessageSize() {
return maxPayLoadBytes - TaskRequest.COMPRESSION_WINDOW;
}
public Auditor getAuditor() {
return auditor;
}
public String getCluster() {
return cluster;
}
public void reinitialize() throws IOException {
}
public void cancelAll() {
}
public void awaitConnect(int i, TimeUnit seconds) throws InterruptedException {
}
public TopicMetadata getTopicMetadata(String topic, Duration timeout) throws Exception {
// NOT implemented in abstract class
return null;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/MemqWriteResult.java | memq-client/src/main/java/com/pinterest/memq/client/producer/MemqWriteResult.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer;
public class MemqWriteResult {
private long clientRequestId;
private int writeLatency;
private int ackLatency;
private int bytesWritten;
public MemqWriteResult() {
}
public MemqWriteResult(long clientRequestId, int writeLatency, int ackLatency, int bytesWritten) {
this.clientRequestId = clientRequestId;
this.writeLatency = writeLatency;
this.ackLatency = ackLatency;
this.bytesWritten = bytesWritten;
}
public int getWriteLatency() {
return writeLatency;
}
public void setWriteLatency(int writeLatency) {
this.writeLatency = writeLatency;
}
public int getAckLatency() {
return ackLatency;
}
public void setAckLatency(int ackLatency) {
this.ackLatency = ackLatency;
}
public int getBytesWritten() {
return bytesWritten;
}
public void setBytesWritten(int bytesWritten) {
this.bytesWritten = bytesWritten;
}
public long getClientRequestId() {
return clientRequestId;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/TaskRequest.java | memq-client/src/main/java/com/pinterest/memq/client/producer/TaskRequest.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.zip.GZIPOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.luben.zstd.ZstdOutputStream;
import com.google.common.annotations.VisibleForTesting;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.commons.MessageId;
import com.pinterest.memq.core.utils.MemqUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.buffer.PooledByteBufAllocator;
public abstract class TaskRequest implements Callable<MemqWriteResult> {
public static final int COMPRESSION_WINDOW = 512;
private static final Logger LOG = LoggerFactory.getLogger(TaskRequest.class);
private static final int READY_CHECK_FREQUENCY = 100;
protected Compression compression;
protected int logmessageCount;
protected MemqMessageHeader header = new MemqMessageHeader(this);
protected long clientRequestId;
protected long serverRequestId;
protected OutputStream payloadOutputStream;
protected boolean disableAcks;
private int maxPayLoadBytes;
private String topicName;
private Semaphore maxRequestLock;
private volatile boolean ready = false;
protected Map<Long, TaskRequest> requestMap;
protected int ackCheckPollIntervalMs;
protected int requestAckTimeout;
protected byte[] messageIdHash;
protected ByteBuf buffer;
public TaskRequest(String topicName,
long currentRequestId,
Compression compression,
Semaphore maxRequestLock,
boolean disableAcks,
int maxPayLoadBytes,
int ackCheckPollIntervalMs,
Map<Long, TaskRequest> requestMap,
int requestAckTimeout) throws IOException {
this.topicName = topicName;
this.clientRequestId = currentRequestId;
this.compression = compression;
this.disableAcks = disableAcks;
this.maxPayLoadBytes = maxPayLoadBytes;
this.maxRequestLock = maxRequestLock;
this.ackCheckPollIntervalMs = ackCheckPollIntervalMs;
this.requestMap = requestMap;
this.requestAckTimeout = requestAckTimeout;
this.buffer = PooledByteBufAllocator.DEFAULT.buffer(maxPayLoadBytes - COMPRESSION_WINDOW);
ByteBufOutputStream buf = new ByteBufOutputStream(buffer);
this.payloadOutputStream = prepareOutputStream(buf);
}
public String getTopicName() {
return topicName;
}
public int getMaxPayLoadBytes() {
return maxPayLoadBytes;
}
public OutputStream getOutputStream() {
return payloadOutputStream;
}
@VisibleForTesting
public byte[] getPayloadAsByteArrays() {
ByteBuf duplicate = buffer.duplicate();
duplicate.resetReaderIndex();
byte[] bytes = new byte[duplicate.readableBytes()];
duplicate.readBytes(bytes);
return bytes;
}
public int remaining() {
return maxPayLoadBytes - buffer.readableBytes();
}
public void markReady() throws IOException {
payloadOutputStream.close();
header.writeHeader(buffer);
this.ready = true;
}
@Override
public MemqWriteResult call() throws Exception {
while (!ready) {
// wait while this request is ready to be processed
Thread.sleep(READY_CHECK_FREQUENCY);
}
try {
return runRequest();
} catch (Exception e) {
LOG.error("Request failed clientRequestId(" + clientRequestId + ") serverRequestId("
+ serverRequestId + ")", e);
// handle nested ExecutionExceptions
Exception ee = e;
if (ee instanceof ExecutionException) {
while (ee.getCause() instanceof Exception) {
ee = (Exception) ee.getCause();
}
}
throw ee;
} finally {
requestMap.remove(clientRequestId);
maxRequestLock.release();
}
}
protected abstract MemqWriteResult runRequest() throws Exception;
public int size() {
return buffer.readableBytes();
}
public long getId() {
return clientRequestId;
}
public OutputStream prepareOutputStream(ByteBufOutputStream stream) throws IOException {
OutputStream byteBuffer;
int headerLength = MemqMessageHeader.getHeaderLength();
stream.write(new byte[headerLength]);
if (compression == Compression.GZIP) {
byteBuffer = new GZIPOutputStream(stream, COMPRESSION_WINDOW, true);
} else if (compression == Compression.ZSTD) {
byteBuffer = new ZstdOutputStream(stream);
} else {
byteBuffer = stream;
}
return byteBuffer;
}
public boolean isReady() {
return ready;
}
public void addMessageId(MessageId messageId) {
if (messageId == null) {
return;
}
byte[] byteArray = messageId.toByteArray();
messageIdHash = MemqUtils.calculateMessageIdHash(messageIdHash, byteArray);
}
protected void incrementLogMessageCount() {
logmessageCount++;
}
public MemqMessageHeader getHeader() {
return header;
}
public Compression getCompression() {
return compression;
}
protected void setCompression(Compression compression) {
this.compression = compression;
}
public byte[] getMessageIdHash() {
return messageIdHash;
}
@VisibleForTesting
public short getVersion() {
return 1_0_0;
}
@VisibleForTesting
public ByteBuf getBuffer() {
return buffer;
}
public int getLogmessageCount() {
return logmessageCount;
}
public abstract long getEpoch();
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/netty/MemqNettyProducer.java | memq-client/src/main/java/com/pinterest/memq/client/producer/netty/MemqNettyProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer.netty;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons2.Endpoint;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.producer.MemqProducer;
import com.pinterest.memq.client.producer.TaskRequest;
import com.pinterest.memq.client.producer.http.DaemonThreadFactory;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.TopicMetadata;
public class MemqNettyProducer<H, T> extends MemqProducer<H, T> {
private static final Logger logger = LoggerFactory.getLogger(MemqNettyProducer.class);
private ExecutorService es;
private boolean debug;
private MemqCommonClient memqCommonClient;
public MemqNettyProducer(String cluster,
String serversetFile,
String topicName,
int maxInflightRequest,
int maxPayLoadBytes,
Compression compression,
boolean disableAcks,
int ackCheckPollIntervalMs,
int requestTimeout,
String locality,
int requestAckTimeout,
Properties auditorConfig,
SSLConfig sslConfig) throws Exception {
super(cluster, topicName, maxInflightRequest, maxPayLoadBytes, compression, disableAcks,
ackCheckPollIntervalMs, locality, requestAckTimeout, auditorConfig, sslConfig);
logger
.warn("Creating MemqNetty with url:" + serversetFile + " maxPayLoadBytes:" + maxPayLoadBytes
+ " maxInflightRequests:" + maxInflightRequest + " compression:" + compression);
this.es = Executors.newFixedThreadPool(maxInflightRequests,
new DaemonThreadFactory("MemqRequestPool-" + topicName));
// update topic metadata
try (MemqCommonClient metadataClient = new MemqCommonClient(locality, sslConfig, null)) {
metadataClient.initialize(MemqCommonClient.parseServersetFile(serversetFile));
TopicMetadata topicMetadata = metadataClient.getTopicMetadata(topicName, 10000);
Set<Broker> brokers = topicMetadata.getWriteBrokers();
logger.info("Fetched topic metadata, now reconnecting to one of serving brokers:" + brokers);
this.memqCommonClient = new MemqCommonClient(locality, sslConfig, null);
memqCommonClient.initialize(MemqCommonClient.generateEndpointsFromBrokers(brokers));
}
}
public MemqNettyProducer(String cluster,
Set<Broker> bootstrapServers,
String topicName,
int maxInflightRequest,
int maxPayLoadBytes,
Compression compression,
boolean disableAcks,
int ackCheckPollIntervalMs,
int requestTimeout,
String locality,
int requestAckTimeout,
Properties auditorConfig,
SSLConfig sslConfig) throws Exception {
super(cluster, topicName, maxInflightRequest, maxPayLoadBytes, compression, disableAcks,
ackCheckPollIntervalMs, locality, requestAckTimeout, auditorConfig, sslConfig);
logger
.warn("Creating MemqNetty with servers:" + bootstrapServers + " maxPayLoadBytes:" + maxPayLoadBytes
+ " maxInflightRequests:" + maxInflightRequest + " compression:" + compression);
this.es = Executors.newFixedThreadPool(maxInflightRequests,
new DaemonThreadFactory("MemqRequestPool-" + topicName));
// update topic metadata
try (MemqCommonClient metadataClient = new MemqCommonClient(locality, sslConfig, null)) {
metadataClient.initialize(MemqCommonClient.generateEndpointsFromBrokers(bootstrapServers));
TopicMetadata topicMetadata = metadataClient.getTopicMetadata(topicName, 10000);
Set<Broker> brokers = topicMetadata.getWriteBrokers();
logger.info("Fetched topic metadata, now reconnecting to one of serving brokers:" + brokers);
this.memqCommonClient = new MemqCommonClient(locality, sslConfig, null);
memqCommonClient.initialize(MemqCommonClient.generateEndpointsFromBrokers(brokers));
}
}
public static InetSocketAddress tryAndGetAZLocalServer(String serversetFile,
String locality) throws IOException {
List<InetSocketAddress> azLocalEndPoints = getLocalServers(serversetFile, locality);
int randomServer = ThreadLocalRandom.current().nextInt(azLocalEndPoints.size());
return azLocalEndPoints.get(randomServer);
}
public static List<InetSocketAddress> getLocalServers(String serversetFile,
String locality) throws IOException {
List<JsonObject> servers = parseServerSetFile(serversetFile);
if (servers.isEmpty()) {
throw new IOException("No servers available from serverset:" + serversetFile);
}
List<JsonObject> azLocalEndPoints = servers.stream()
.filter(v -> v.get("az").getAsString().equalsIgnoreCase(locality))
.collect(Collectors.toList());
if (azLocalEndPoints.isEmpty()) {
logger.warn("Not using AZ awareness due to missing local memq servers for:" + serversetFile
+ " local az:" + locality);
azLocalEndPoints = servers;
}
return azLocalEndPoints.stream()
.map(ep -> InetSocketAddress.createUnresolved(ep.get("ip").getAsString(), 9092))
.collect(Collectors.toList());
}
public static List<JsonObject> parseServerSetFile(String serversetFile) throws IOException {
Gson gson = new Gson();
List<String> lines = Files.readAllLines(new File(serversetFile).toPath());
return lines.stream().map(line -> gson.fromJson(line, JsonObject.class))
.filter(g -> g.entrySet().size() > 0).collect(Collectors.toList());
}
public MemqNettyProducer(String cluster,
InetSocketAddress suppliedServer,
String topicName,
int maxInflightRequest,
int maxPayLoadBytes,
Compression compression,
boolean disableAcks,
int ackCheckPollIntervalMs,
int requestTimeout,
String locality,
int requestAckTimeout,
Properties auditorConfig,
SSLConfig sslConfig) throws Exception {
super(cluster, topicName, maxInflightRequest, maxPayLoadBytes, compression, disableAcks,
ackCheckPollIntervalMs, locality, requestAckTimeout, auditorConfig, sslConfig);
logger.warn("DEBUG Creating MemqNettyProducer with server" + suppliedServer
+ " maxPayLoadBytes:" + maxPayLoadBytes + " maxInflightRequests:" + maxInflightRequest
+ " compression:" + compression);
this.es = Executors.newFixedThreadPool(maxInflightRequests,
new DaemonThreadFactory("MemqRequestPool-" + topicName));
this.memqCommonClient = new MemqCommonClient(locality, sslConfig, null);
memqCommonClient.initialize(Collections.singletonList(new Endpoint(suppliedServer)));
}
@Override
public synchronized TaskRequest warmAndGetRequestEntry(Long requestId) throws IOException {
TaskRequest request = requestMap.get(requestId);
if (request == null) {
logger.debug("Making request waiting for semaphore:" + requestId);
try {
maxRequestLock.acquire();
} catch (InterruptedException e) {
throw new IOException("Failed to acquire request lock", e);
}
currentRequestTask = new MemqNettyRequest(topicName, requestId, compression, maxRequestLock,
disableAcks, maxPayLoadBytes, ackCheckPollIntervalMs, requestMap, this, requestAckTimeout,
debug);
request = currentRequestTask;
requestMap.put(requestId, currentRequestTask);
currentRequest = es.submit(currentRequestTask);
}
return request;
}
public MemqCommonClient getMemqCommonClient() {
return memqCommonClient;
}
public void setDebug() {
this.debug = true;
}
@Override
public boolean isClosed() {
return memqCommonClient.isClosed();
}
@VisibleForTesting
protected ExecutorService getEs() {
return es;
}
@Override
public void close() throws IOException {
try {
memqCommonClient.close();
if (es != null && !es.isShutdown()) {
es.shutdownNow();
}
if (auditor != null) {
auditor.close();
}
} catch (Exception e) {
throw new IOException("Interrupted closing request", e);
}
}
public long getEpoch() {
return epoch;
}
@Override
public TopicMetadata getTopicMetadata(String topic, Duration timeout) throws Exception {
return memqCommonClient.getTopicMetadata(topic, timeout.toMillis());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/netty/MemqNettyRequest.java | memq-client/src/main/java/com/pinterest/memq/client/producer/netty/MemqNettyRequest.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer.netty;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.zip.CRC32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.TaskRequest;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.core.utils.MemqUtils;
import io.netty.buffer.ByteBuf;
@SuppressWarnings("rawtypes")
public class MemqNettyRequest extends TaskRequest {
private static int overrideDebugChecksum = 0;
private static AtomicLong byteCounter = new AtomicLong();
private static AtomicLong ackedByteCounter = new AtomicLong();
private int messagePayloadSize;
private Logger logger;
private MemqNettyProducer producer;
private AtomicInteger requestStatus = new AtomicInteger(-1);
private int requestAckTimeout;
private boolean debug;
public MemqNettyRequest(String topicName,
long currentRequestId,
Compression compression,
Semaphore maxRequestLock,
boolean disableAcks,
int maxPayLoadBytes,
int ackCheckPollIntervalMs,
Map<Long, TaskRequest> requestMap,
MemqNettyProducer producer,
int requestAckTimeout,
boolean debug) throws IOException {
super(topicName, currentRequestId, compression, maxRequestLock, disableAcks, maxPayLoadBytes,
ackCheckPollIntervalMs, requestMap, requestAckTimeout);
this.producer = producer;
this.requestAckTimeout = requestAckTimeout;
this.debug = debug;
this.logger = LoggerFactory.getLogger(MemqNettyRequest.class);
}
public RequestPacket getWriteRequestPacket(ByteBuf payload) throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(payload.duplicate().nioBuffer());
int checksum = (int) crc32.getValue();
if (debug && overrideDebugChecksum != 0) {
checksum = overrideDebugChecksum;
}
messagePayloadSize = payload.readableBytes();
byteCounter.accumulateAndGet(messagePayloadSize, (v1, v2) -> v1 + v2);
WriteRequestPacket writeRequestPacket = new WriteRequestPacket(disableAcks,
getTopicName().getBytes(), true, checksum, payload.retainedDuplicate());
this.payloadOutputStream = null;
logger.debug("Prepared request:" + clientRequestId);
return new RequestPacket(RequestType.PROTOCOL_VERSION, getId(), RequestType.WRITE,
writeRequestPacket);
}
@Override
protected MemqWriteResult runRequest() throws Exception {
MemqCommonClient commonClient = producer.getMemqCommonClient();
long writeTs = System.currentTimeMillis();
RequestPacket requestPacket = getWriteRequestPacket(buffer);
buffer.release();
return dispatchRequestAndReturnResponse(commonClient, writeTs, requestPacket, 0);
}
private MemqWriteResult dispatchRequestAndReturnResponse(MemqCommonClient networkClient,
long writeTs,
RequestPacket requestPacket,
int attempt) throws InterruptedException,
ExecutionException,
TimeoutException,
Exception {
Future<ResponsePacket> responseFuture = networkClient
.sendRequestPacketAndReturnResponseFuture(requestPacket, getTopicName(), requestAckTimeout);
// not sure if the dispatch latencies matter
// TODO add check for how long it takes between dispatch trigger and sync??
logger.info("Dispatched:" + clientRequestId);
int writeLatency = (int) (System.currentTimeMillis() - writeTs);
boolean shouldRelease = true;
try {
// if (!disableAcks) {
if (debug) {
logger.warn("Waiting for ack:" + clientRequestId);
}
ResponsePacket responsePacket = responseFuture.get(requestAckTimeout, TimeUnit.MILLISECONDS);
short responseCode = responsePacket.getResponseCode();
switch (responseCode) {
case ResponseCodes.OK:
sendAuditMessageIfAuditEnabled();
ackedByteCounter.accumulateAndGet(messagePayloadSize, (v1, v2) -> v1 + v2);
int ackLatency = (int) (System.currentTimeMillis() - writeTs);
logger.debug("Request acked in:" + ackLatency + " " + clientRequestId);
return new MemqWriteResult(clientRequestId, writeLatency, ackLatency, messagePayloadSize);
case ResponseCodes.REDIRECT:
if (attempt > 1) {
throw new Exception("Write request failed after multiple attempts");
}
networkClient.reconnect(getTopicName(), false);
shouldRelease = false;
return dispatchRequestAndReturnResponse(networkClient, writeTs, requestPacket, ++attempt);
case ResponseCodes.BAD_REQUEST:
throw new Exception("Bad request, id:" + clientRequestId);
case ResponseCodes.NOT_FOUND:
throw new Exception("Topic not found:" + getTopicName());
case ResponseCodes.INTERNAL_SERVER_ERROR:
throw new Exception("Unknown server error:" + clientRequestId);
case ResponseCodes.REQUEST_FAILED:
throw new Exception("Request failed:" + clientRequestId);
case ResponseCodes.SERVICE_UNAVAILABLE:
throw new Exception("Server out of capacity:" + getTopicName());
default:
throw new Exception("Unknown response code:" + responseCode);
}
// } else {
// sendAuditMessageIfAuditEnabled();
// return new MemqWriteResult(clientRequestId, writeLatency, -1, byteArrays.length);
// }
} finally {
requestMap.remove(clientRequestId);
if (shouldRelease) {
requestPacket.release();
}
}
}
private void sendAuditMessageIfAuditEnabled() {
if (producer.getAuditor() != null) {
try {
producer.getAuditor().auditMessage(producer.getCluster().getBytes(MemqUtils.CHARSET),
getTopicName().getBytes(MemqUtils.CHARSET), MemqUtils.HOST_IPV4_ADDRESS,
producer.getEpoch(), clientRequestId, messageIdHash, logmessageCount, true, "producer");
} catch (IOException e) {
logger.error("Failed to log audit record for topic:" + getTopicName(), e);
}
}
}
@VisibleForTesting
public void setDebugEnabled() {
debug = true;
}
public void setRequestStatus(int status) {
requestStatus.set(status);
}
public int getRequestStatus() {
return requestStatus.get();
}
@Override
public long getEpoch() {
return producer != null ? producer.getEpoch() : System.currentTimeMillis();
}
@VisibleForTesting
public static long getByteCounter() {
return byteCounter.get();
}
@VisibleForTesting
public static long getAckedByteCounter() {
return ackedByteCounter.get();
}
@VisibleForTesting
public static void reset() {
byteCounter.set(0);
ackedByteCounter.set(0);
}
@VisibleForTesting
public static void setOverrideDebugChecksum(int overrideDebugChecksum) {
MemqNettyRequest.overrideDebugChecksum = overrideDebugChecksum;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/producer/http/DaemonThreadFactory.java | memq-client/src/main/java/com/pinterest/memq/client/producer/http/DaemonThreadFactory.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer.http;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
public final class DaemonThreadFactory implements ThreadFactory {
private String basename;
private AtomicInteger counter;
public DaemonThreadFactory(String basename) {
this.basename = basename;
this.counter = new AtomicInteger();
}
@Override
public Thread newThread(Runnable r) {
Thread th = new Thread(r);
th.setDaemon(true);
th.setName(basename + "-" + counter.getAndIncrement());
return th;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/NoTopicsSubscribedException.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/NoTopicsSubscribedException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
public class NoTopicsSubscribedException extends Exception {
private static final long serialVersionUID = 1L;
public NoTopicsSubscribedException(String message) {
super(message);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/Metrics.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/Metrics.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.pinterest.memq.commons.mon.OpenTSDBClient;
import com.pinterest.memq.commons.mon.OpenTSDBReporter;
public class Metrics implements Runnable {
private static Metrics INSTANCE;
private Map<String, MetricRegistry> registryMap;
private Map<String, ScheduledReporter> reporterMap;
private ScheduledExecutorService es;
private Metrics() {
registryMap = new ConcurrentHashMap<>();
reporterMap = new ConcurrentHashMap<>();
es = Executors.newScheduledThreadPool(1);
es.scheduleAtFixedRate(this, 0, 60, TimeUnit.SECONDS);
}
public static String getHostname() {
String hostName;
try {
hostName = InetAddress.getLocalHost().getHostName();
int firstDotPos = hostName.indexOf('.');
if (firstDotPos > 0) {
hostName = hostName.substring(0, firstDotPos);
}
} catch (Exception e) {
// fall back to env var.
hostName = System.getenv("HOSTNAME");
}
return hostName;
}
public synchronized static Metrics getInstance() throws Exception {
if (INSTANCE == null) {
INSTANCE = new Metrics();
}
return INSTANCE;
}
@Override
public void run() {
for (Entry<String, MetricRegistry> entry : registryMap.entrySet()) {
ScheduledReporter reporter = reporterMap.get(entry.getKey());
if (reporter == null) {
try {
reporter = OpenTSDBReporter.createReporter("consumer", entry.getValue(), entry.getKey(),
(String name, Metric metric) -> true, TimeUnit.SECONDS, TimeUnit.SECONDS,
new OpenTSDBClient("localhost", 18126), getHostname());
reporter.start(60, TimeUnit.SECONDS);
reporterMap.put(entry.getKey(), reporter);
} catch (UnknownHostException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
public synchronized MetricRegistry getOrCreate(String name) {
MetricRegistry r = registryMap.get(name);
if (r == null) {
r = new MetricRegistry();
registryMap.put(name, r);
}
return r;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/OffsetCommitCallback.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/OffsetCommitCallback.java | package com.pinterest.memq.client.consumer;
import java.util.Map;
public interface OffsetCommitCallback {
void onCompletion(Map<Integer, Long> offsets, Exception exception);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/NotificationSource.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/NotificationSource.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import com.google.gson.JsonObject;
interface NotificationSource {
int lookForNewObjects(Duration timeout, Queue<JsonObject> notificationQueue);
void assign(Collection<Integer> asList);
void seek(Map<Integer, Long> notificationOffset);
long position(int partition);
long committed(int partition);
public void commit(Map<Integer, Long> offsetMap);
void commit();
void commitAsync();
void commitAsync(OffsetCommitCallback callback);
void commitAsync(Map<Integer, Long> offsets, OffsetCommitCallback callback);
Object getRawObject();
void unsubscribe();
void close();
String getNotificationTopicName();
List<PartitionInfo> getPartitions();
void setParentConsumer(MemqConsumer<?, ?> memqConsumer);
Map<Integer, Long> offsetsForTimestamps(Map<Integer, Long> partitionTimestamps);
Map<Integer, Long> getEarliestOffsets(Collection<Integer> partitions);
Map<Integer, Long> getLatestOffsets(Collection<Integer> partitions);
Set<TopicPartition> waitForAssignment();
void wakeup();
Map<Integer, JsonObject> getNotificationsAtOffsets(Duration timeout,
Map<Integer, Long> partitionOffsets) throws TimeoutException;
Set<TopicPartition> getAssignments();
void seekToEnd(Collection<Integer> partitions);
void seekToBeginning(Collection<Integer> partitions);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/KafkaNotificationSource.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/KafkaNotificationSource.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import java.io.File;
import java.nio.file.Files;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import com.google.common.collect.ImmutableList;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.producer.http.DaemonThreadFactory;
import com.pinterest.memq.commons.MemqLogMessage;
public class KafkaNotificationSource implements NotificationSource {
private static final Logger logger = Logger
.getLogger(KafkaNotificationSource.class.getCanonicalName());
public static final String NOTIFICATION_SERVERSET = "notificationServerset";
public static final String NOTIFICATION_TOPIC_NAME_KEY = "notificationTopic";
private static final Gson gson = new Gson();
private final Consumer<String, String> kc;
private String notificationTopicName;
private Properties props;
private MemqConsumer<?, ?> parentConsumer;
public KafkaNotificationSource(Properties props) throws Exception {
this.notificationTopicName = props.getProperty(NOTIFICATION_TOPIC_NAME_KEY);
if (props.containsKey(NOTIFICATION_SERVERSET)) {
String serverset = props.getProperty(NOTIFICATION_SERVERSET);
List<String> servers = Files.readAllLines(new File(serverset).toPath());
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers.get(0));
}
validateConsumerProps(props);
props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.StringDeserializer.class.getName());
props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.StringDeserializer.class.getName());
props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.putIfAbsent(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100");
props.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
props.remove(NOTIFICATION_TOPIC_NAME_KEY);
props.remove(NOTIFICATION_SERVERSET);
this.props = props;
kc = new KafkaConsumer<>(props);
kc.subscribe(Collections.singleton(notificationTopicName));
logger.info("Initialized notification source with:" + props + " and subscribed to:"
+ notificationTopicName);
}
public String getNotificationTopicName() {
return notificationTopicName;
}
protected KafkaNotificationSource(Consumer<String, String> kc) {
this.kc = kc;
}
private static void validateConsumerProps(Properties props) throws Exception {
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
throw new Exception("bootstrapServers config missing:" + props);
}
if (!props.containsKey((NOTIFICATION_TOPIC_NAME_KEY))) {
throw new Exception("notification topic name missing:" + props);
}
if (!props.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
throw new Exception("consumer groupId config missing:" + props);
}
}
public List<PartitionInfo> getPartitions() {
return kc.partitionsFor(notificationTopicName);
}
public long[] getOffsetsForAllPartitions(boolean isBeginning) {
List<PartitionInfo> partitionInfo = kc.partitionsFor(notificationTopicName);
List<TopicPartition> partitions = partitionInfo.stream()
.map(p -> new TopicPartition(notificationTopicName, p.partition()))
.sorted(Comparator.comparingInt(TopicPartition::partition)).collect(Collectors.toList());
long[] offsets = new long[partitions.size()];
Arrays.fill(offsets, -1);
// scan partition until we find the first offset for the given topic;
kc.unsubscribe();
Map<TopicPartition, Long> offsetMap;
if (isBeginning) {
offsetMap = kc.beginningOffsets(partitions);
} else {
offsetMap = kc.endOffsets(partitions);
}
for (Entry<TopicPartition, Long> entry : offsetMap.entrySet()) {
offsets[entry.getKey().partition()] = entry.getValue();
}
return offsets;
}
// @Override
public int lookForNewObjects(Duration timeout, Queue<JsonObject> notificationQueue) {
if (parentConsumer.getTopicName() == null || parentConsumer.getTopicName().isEmpty()) {
throw new RuntimeException("No topics subscribed");
}
logger.fine("Looking for new objects");
int c = 0;
ConsumerRecords<String, String> records = kc.poll(timeout);
for (ConsumerRecord<String, String> record : records) {
try {
JsonObject notificationObject = parseAndGetNotificationObject(record);
if (notificationObject != null) {
notificationQueue.add(notificationObject);
}
c++;
} catch (Exception e) {
e.printStackTrace();
logger.log(Level.SEVERE, "Unable to process notification topic record", e);
}
}
if (!notificationQueue.isEmpty()) {
logger.fine(() -> "Size of queue: " + notificationQueue.size());
}
return c;
}
public List<JsonObject> getNotificationObjectsForAllPartitionsTillCurrent() throws InterruptedException {
List<JsonObject> list = Collections.synchronizedList(new ArrayList<>());
ExecutorService es = Executors.newFixedThreadPool(4, new DaemonThreadFactory("objectfetcher"));
for (PartitionInfo partitionInfo : getPartitions()) {
final int partitionId = partitionInfo.partition();
es.submit(() -> {
try {
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
TopicPartition tp = new TopicPartition(notificationTopicName, partitionId);
ImmutableList<TopicPartition> set = ImmutableList.of(tp);
consumer.assign(set);
Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(set);
Long beginningOffset = beginningOffsets.get(tp);
consumer.seek(tp, beginningOffset);
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(set);
Long endOffset = endOffsets.get(tp);
long lastOffset = 0L;
while (lastOffset < endOffset) {
for (ConsumerRecord<String, String> consumerRecord : consumer
.poll(Duration.ofSeconds(10))) {
lastOffset = consumerRecord.offset();
JsonObject obj = parseAndGetNotificationObject(consumerRecord);
if (obj != null) {
list.add(obj);
}
}
}
consumer.close();
} catch (Exception e) {
e.printStackTrace();
}
});
}
es.shutdown();
es.awaitTermination(100, TimeUnit.SECONDS);
return list;
}
protected JsonObject parseAndGetNotificationObject(ConsumerRecord<String, String> record) {
JsonObject json = gson.fromJson(record.value(), JsonObject.class);
json.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, record.partition());
json.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET,
record.offset());
json.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
return json;
}
public void commit(Map<Integer, Long> offsetMap) {
Map<TopicPartition, OffsetAndMetadata> toCommit = new HashMap<>();
for (Entry<Integer, Long> entry : offsetMap.entrySet()) {
TopicPartition tp = new TopicPartition(notificationTopicName, entry.getKey());
OffsetAndMetadata om = new OffsetAndMetadata(entry.getValue());
toCommit.put(tp, om);
}
kc.commitSync(toCommit);
}
// @Override
public void commit() {
kc.commitSync();
}
public void commitAsync() {
kc.commitAsync();
}
public void commitAsync(OffsetCommitCallback callback) {
kc.commitAsync((offsets, exception) -> {
Map<Integer, Long> transformedOffsetMap = new HashMap<>();
if (offsets != null) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
transformedOffsetMap.put(entry.getKey().partition(), entry.getValue().offset());
}
}
callback.onCompletion(transformedOffsetMap, exception);
});
}
public void commitAsync(Map<Integer, Long> offsetMap, OffsetCommitCallback callback) {
Map<TopicPartition, OffsetAndMetadata> toCommit = new HashMap<>();
for (Entry<Integer, Long> entry : offsetMap.entrySet()) {
TopicPartition tp = new TopicPartition(notificationTopicName, entry.getKey());
OffsetAndMetadata om = new OffsetAndMetadata(entry.getValue());
toCommit.put(tp, om);
}
kc.commitAsync(toCommit, (offsets, exception) -> {
Map<Integer, Long> transformedOffsetMap = new HashMap<>();
if (offsets != null) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
transformedOffsetMap.put(entry.getKey().partition(), entry.getValue().offset());
}
}
callback.onCompletion(transformedOffsetMap, exception);
});
}
/**
* Retrieve the committed offsets of a partition
* @param partition the partition to retrieve the offsets
* @return the committed offset, or -1 if there is no offset committed.
*/
public long committed(int partition) {
OffsetAndMetadata offsetAndMetadata = kc.committed(new TopicPartition(notificationTopicName, partition));
return offsetAndMetadata == null ? -1L : offsetAndMetadata.offset();
}
public long position(int partition) {
return kc.position(new TopicPartition(notificationTopicName, partition));
}
// @Override
public void seek(Map<Integer, Long> notificationOffset) {
for (Entry<Integer, Long> entry : notificationOffset.entrySet()) {
long offset = entry.getValue();
kc.seek(new TopicPartition(notificationTopicName, entry.getKey()), offset);
}
}
public void seekToBeginning(Collection<Integer> partitions) {
kc.seekToBeginning(partitions.stream().map(p -> new TopicPartition(notificationTopicName, p)).collect(Collectors.toSet()));
}
public void seekToEnd(Collection<Integer> partitions) {
kc.seekToEnd(partitions.stream().map(p -> new TopicPartition(notificationTopicName, p)).collect(Collectors.toSet()));
}
public Map<Integer, Long> offsetsForTimestamps(Map<Integer, Long> partitionTimestamps) {
Map<TopicPartition, Long> topicPartitionTimestamps = partitionTimestamps
.entrySet()
.stream()
.collect(
Collectors.toMap(
e -> new TopicPartition(notificationTopicName, e.getKey()),
Entry::getValue
)
);
return kc.offsetsForTimes(topicPartitionTimestamps).entrySet().stream().collect(Collectors.toMap(e -> e.getKey().partition(), e -> e.getValue().offset()));
}
// @Override
public void assign(Collection<Integer> partitions) {
kc.unsubscribe();
List<TopicPartition> collect = partitions.stream()
.map(partition -> new TopicPartition(notificationTopicName, partition))
.collect(Collectors.toList());
kc.assign(collect);
}
public Map<Integer, Long> getEarliestOffsets(Collection<Integer> partitions) {
List<TopicPartition> collect = partitions.stream()
.map(partition -> new TopicPartition(notificationTopicName, partition))
.collect(Collectors.toList());
Map<TopicPartition, Long> beginningOffsets = kc.beginningOffsets(collect);
Map<Integer, Long> result = new HashMap<>();
for (Entry<TopicPartition, Long> entry : beginningOffsets.entrySet()) {
result.put(entry.getKey().partition(), entry.getValue());
}
logger.info("Earliest offsets:" + result);
return result;
}
public Map<Integer, Long> getLatestOffsets(Collection<Integer> partitions) {
List<TopicPartition> collect = partitions.stream()
.map(partition -> new TopicPartition(notificationTopicName, partition))
.collect(Collectors.toList());
Map<TopicPartition, Long> beginningOffsets = kc.endOffsets(collect);
Map<Integer, Long> result = new HashMap<>();
for (Entry<TopicPartition, Long> entry : beginningOffsets.entrySet()) {
result.put(entry.getKey().partition(), entry.getValue());
}
logger.info("Latest offsets:" + result);
return result;
}
public Map<Integer, JsonObject> getNotificationsAtOffsets(Duration timeout,
Map<Integer, Long> partitionOffsets) throws TimeoutException {
Set<TopicPartition> tpSet = new HashSet<>();
Map<Integer, JsonObject> notificationMap = new HashMap<>();
for (Map.Entry<Integer, Long> entry : partitionOffsets.entrySet()) {
TopicPartition tp = new TopicPartition(notificationTopicName, entry.getKey());
tpSet.add(tp);
}
long deadline = timeout.toMillis();
kc.unsubscribe();
while (!tpSet.isEmpty() && deadline >= 0) {
long start = System.currentTimeMillis();
kc.assign(tpSet);
for (TopicPartition tp : tpSet) {
kc.seek(tp, partitionOffsets.get(tp.partition()));
}
ConsumerRecords<String, String> records = kc.poll(timeout);
for (Iterator<TopicPartition> itr = tpSet.iterator(); itr.hasNext();) {
TopicPartition tp = itr.next();
for (ConsumerRecord<String, String> record : records.records(tp)) {
if (record.offset() == partitionOffsets.get(tp.partition())) {
itr.remove();
notificationMap.put(tp.partition(), parseAndGetNotificationObject(record));
}
}
}
deadline -= (System.currentTimeMillis() - start);
}
if (deadline < 0 && !tpSet.isEmpty()) {
throw new TimeoutException(
"Failed to retrieve all notifications within " + timeout.toMillis() + " seconds");
}
return notificationMap;
}
// @Override
public Object getRawObject() {
return kc;
}
// @Override
public void unsubscribe() {
kc.unsubscribe();
}
// @Override
public void close() {
kc.close();
}
public void setParentConsumer(MemqConsumer<?, ?> parentConsumer) {
this.parentConsumer = parentConsumer;
}
public Set<TopicPartition> getAssignments() {
return kc.assignment();
}
public void wakeup() {
kc.wakeup();
}
public Set<TopicPartition> waitForAssignment() {
Set<TopicPartition> assignment = kc.assignment();
while (assignment.isEmpty()) {
kc.poll(Duration.ofMillis(500));
try {
Thread.sleep(100);
} catch (InterruptedException e) {
logger.warning("Thread interrupted");
break;
}
assignment = kc.assignment();
}
return assignment;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/MemqConsumer.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/MemqConsumer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import static com.pinterest.memq.client.commons.CommonConfigs.CLUSTER;
import static com.pinterest.memq.client.commons.ConsumerConfigs.BUFFER_FILES_DIRECTORY_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.BUFFER_TO_FILE_CONFIG_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.CLIENT_ID;
import static com.pinterest.memq.client.commons.ConsumerConfigs.DIRECT_CONSUMER;
import static com.pinterest.memq.client.commons.ConsumerConfigs.DRY_RUN_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.KEY_DESERIALIZER_CLASS_CONFIGS_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_PREFIX_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.STORAGE_PROPS_PREFIX_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.USE_DIRECT_BUFFER_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.VALUE_DESERIALIZER_CLASS_CONFIGS_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.Properties;
import java.util.Queue;
import java.util.Set;
import java.util.SortedMap;
import java.util.UUID;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.google.common.collect.ImmutableMap;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.AuditorUtils;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.Deserializer;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.audit.Auditor;
import com.pinterest.memq.client.commons.audit.KafkaBackedAuditor;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.client.commons2.ClosedException;
import com.pinterest.memq.client.commons2.DataNotFoundException;
import com.pinterest.memq.client.commons2.Endpoint;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.CloseableIterator;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerTable;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.buffer.PooledByteBufAllocator;
import software.amazon.awssdk.core.exception.SdkClientException;
/**
* Consumer class for MemQ data. This consumer follows conventions to other
* PubSub consumers like Kafka and Pulsar.
*
* The consumer has 2 mode direct and indirect. Direct consumer allows custom
* notification configurations allowing users to override what data is being
* read and how reads are configured. Indirect consumer performs metadata query
* on the broker to get topic metadata before initializing storage
* configurations, this allows cluster to provide updated metadata rather than
* static configuration.
*/
public final class MemqConsumer<K, V> implements Closeable {
private static final Logger logger = Logger.getLogger(MemqConsumer.class.getCanonicalName());
public static final String METRICS_PREFIX = "memq.consumer";
private final MemqLogMessage<K, V> EMPTY_MESSAGE = new MemqLogMessage<>(null, null, null, null, true);
public MemqLogMessageIterator<K, V> EMPTY_ITERATOR = new MemqLogMessageIterator<K, V>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public MemqLogMessage<K, V> next() {
return null;
}
@Override
public void close() {
}
};
private Deserializer<K> keyDeserializer;
private Deserializer<V> valueDeserializer;
private Properties properties;
private ConcurrentLinkedQueue<JsonObject> notificationQueue;
private String topicName;
private NotificationSource notificationSource;
private boolean isBufferToFile;
private boolean isDirectBuffer;
private String bufferFilesDirectory;
private boolean dryRun;
private volatile boolean closed = false;
// metrics
private MetricRegistry metricRegistry;
private File bufferFile;
private String cluster;
private Auditor auditor;
private Counter iteratorExceptionCounter;
private Counter loadBatchExceptionCounter;
private boolean directConsumer;
private MemqCommonClient client;
private Properties notificationSourceProps;
private Properties additionalStorageProps;
private int metadataTimeout = 10000;
private String groupId;
private StorageHandler storageHandler;
@SuppressWarnings("unchecked")
public MemqConsumer(Properties props) throws Exception {
this.properties = props;
this.cluster = props.getProperty(CLUSTER);
this.metricRegistry = new MetricRegistry();
this.isBufferToFile = Boolean
.parseBoolean(props.getProperty(BUFFER_TO_FILE_CONFIG_KEY, "false"));
this.isDirectBuffer = Boolean.parseBoolean(props.getProperty(USE_DIRECT_BUFFER_KEY, "false"));
String clientId = props.getProperty(CLIENT_ID, UUID.randomUUID().toString());
checkAndConfigureTmpFileBuffering(props, clientId);
this.notificationQueue = new ConcurrentLinkedQueue<>();
this.dryRun = Boolean.parseBoolean(props.getProperty(DRY_RUN_KEY, "false"));
this.directConsumer = Boolean.parseBoolean(props.getProperty(DIRECT_CONSUMER, "true"));
try {
String valueDeserializerClassName = props.getProperty(VALUE_DESERIALIZER_CLASS_KEY,
ByteArrayDeserializer.class.getName());
this.valueDeserializer = (Deserializer<V>) Class.forName(valueDeserializerClassName)
.newInstance();
this.valueDeserializer.init((Properties) props.get(VALUE_DESERIALIZER_CLASS_CONFIGS_KEY));
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw e;
}
try {
String keyDeserializerClassName = props.getProperty(KEY_DESERIALIZER_CLASS_KEY,
ByteArrayDeserializer.class.getName());
this.keyDeserializer = (Deserializer<K>) Class.forName(keyDeserializerClassName)
.newInstance();
this.keyDeserializer.init((Properties) props.get(KEY_DESERIALIZER_CLASS_CONFIGS_KEY));
} catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
throw e;
}
// initialize auditor
if (Boolean.parseBoolean(props.getProperty(ConsumerConfigs.AUDITOR_ENABLED, "false"))) {
this.auditor = Class.forName(props.getProperty(ConsumerConfigs.AUDITOR_CLASS,
KafkaBackedAuditor.class.getCanonicalName())).asSubclass(Auditor.class).newInstance();
Properties auditorConfig = AuditorUtils.extractAuditorConfig(props);
this.auditor.init(auditorConfig);
}
initializeMetrics();
// fetch consumer metadata
if (directConsumer) {
if (!dryRun) {
String storageType = props.getProperty("storageHandler", "customs3aync2");
storageHandler = StorageHandlerTable.getClass(storageType).newInstance();
storageHandler.initReader(props, metricRegistry);
initializeNotificationSource();
}
} else {
this.groupId = props.getProperty(ConsumerConfigs.GROUP_ID);
if (groupId == null) {
throw new Exception("Missing consumer group id");
}
additionalStorageProps = new Properties();
additionalStorageProps.setProperty(ConsumerConfigs.BOOTSTRAP_SERVERS,
props.getProperty(ConsumerConfigs.BOOTSTRAP_SERVERS));
notificationSourceProps = (Properties) properties.get(NOTIFICATION_SOURCE_PROPS_KEY);
if (notificationSourceProps == null) {
notificationSourceProps = new Properties();
}
for (Entry<Object, Object> prop : properties.entrySet()) {
String key = prop.getKey().toString();
if (key.startsWith(NOTIFICATION_SOURCE_PROPS_PREFIX_KEY)) {
notificationSourceProps.put(key.replace(NOTIFICATION_SOURCE_PROPS_PREFIX_KEY, ""),
prop.getValue());
} else if (key.startsWith(STORAGE_PROPS_PREFIX_KEY)) {
additionalStorageProps.put(key.replace(STORAGE_PROPS_PREFIX_KEY, ""), prop.getValue());
}
}
String bootstrapServers = props.getProperty(ConsumerConfigs.BOOTSTRAP_SERVERS);
List<Endpoint> endpoints = MemqCommonClient
.getEndpointsFromBootstrapServerString(bootstrapServers);
client = new MemqCommonClient("", null, null);
client.initialize(endpoints);
}
}
public MemqConsumer(Properties props, StorageHandler storageHandler) throws Exception {
this(props);
this.storageHandler = storageHandler;
}
protected void initializeMetrics() {
if (metricRegistry != null) {
iteratorExceptionCounter = metricRegistry.counter("iterator.exception");
loadBatchExceptionCounter = metricRegistry.counter("loading.exception");
if (isDirectBuffer) {
metricRegistry.gauge("netty.direct.memory.used",
() -> () -> PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory());
metricRegistry.gauge("netty.heap.memory.used",
() -> () -> PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory());
}
}
}
private void checkAndConfigureTmpFileBuffering(Properties props, String clientId) {
if (isBufferToFile) {
bufferFilesDirectory = props.getProperty(BUFFER_FILES_DIRECTORY_KEY, "/tmp");
bufferFilesDirectory += "/" + clientId;
new File(bufferFilesDirectory).mkdirs();
bufferFile = new File(
bufferFilesDirectory + "/objectBufferFile_" + Thread.currentThread().getId());
bufferFile.deleteOnExit();
}
}
// for testing
public MemqConsumer(ConcurrentLinkedQueue<JsonObject> notificationQueue,
Deserializer<K> keyDeserializer,
Deserializer<V> valueDeserializer) throws Exception {
this.notificationQueue = notificationQueue;
this.dryRun = true;
this.keyDeserializer = keyDeserializer;
this.valueDeserializer = valueDeserializer;
this.metricRegistry = new MetricRegistry();
}
/**
* Set fields based on nextNotificationToProcess
*
* @param nextNotificationToProcess a JsonObject of the next notification in the
* queue from compaction topic
* @throws IOException
* @throws DataNotFoundException
*/
protected InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess)
throws IOException, DataNotFoundException, ClosedException {
if (closed) {
throw new ClosedException();
}
return storageHandler.fetchBatchStreamForNotification(nextNotificationToProcess);
}
/**
* Fetch a Memq data object and returns a DataInputStream of the object
*
* @return a DataInputStream of the Memq data object
* @throws DataNotFoundException
*/
public DataInputStream fetchObjectToInputStream(JsonObject nextNotificationToProcess)
throws IOException, DataNotFoundException, ClosedException {
if (closed) {
throw new ClosedException();
}
InputStream stream = fetchBatchStreamForNotification(nextNotificationToProcess);
InputStream newStream;
try {
if (isBufferToFile) {
IOUtils.copy(stream, new FileOutputStream(bufferFile));
newStream = new FileInputStream(bufferFile);
} else if (isDirectBuffer) {
ByteBuf byteBuf = PooledByteBufAllocator.DEFAULT
.directBuffer(storageHandler.getBatchSizeFromNotification(nextNotificationToProcess));
ByteBufOutputStream out = new ByteBufOutputStream(byteBuf);
IOUtils.copy(stream, out);
newStream = new ByteBufInputStream(byteBuf, true);
out.close();
} else {
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copy(stream, out);
newStream = new ByteArrayInputStream(out.toByteArray());
out.close();
}
return new DataInputStream(newStream);
} finally {
try {
// attempt to close stream if not already closed to avoid memory leaks
stream.close();
} catch (Exception e) {
}
}
}
/**
* Subscribe to topicNames
*
* @param topicNames the Memq topicNames to subscribe to
* @throws Exception
*/
public void subscribe(Collection<String> topicNames) throws Exception {
if (topicNames.size() > 1) {
// only support single topic subscription for now
throw new UnsupportedOperationException(
"MemqConsumer only supports subscription to a single topic (singleton collection)");
}
subscribe(topicNames.iterator().next());
}
public void subscribe(String topicName) throws Exception {
if (!topicName.isEmpty() && this.topicName != null && !this.topicName.equals(topicName)) {
// cannot subscribe to another topic
throw new UnsupportedOperationException(
"MemqConsumer doesn't currently support multi-topic subscription");
}
this.topicName = topicName;
if (!directConsumer) {
initializeNotificationSource(topicName);
}
}
public List<Integer> getPartition() {
List<Integer> partitions = new ArrayList<>();
for (PartitionInfo partitionInfo : notificationSource.getPartitions()) {
partitions.add(partitionInfo.partition());
}
Collections.sort(partitions);
return partitions;
}
public TopicMetadata getTopicMetadata(String topic, int timeoutMillis) throws Exception {
return client.getTopicMetadata(topic, timeoutMillis);
}
/**
* Unsubscribe from currently subscribed topics
*/
public void unsubscribe() {
topicName = null;
}
/**
* Initialize notification source
*
* @throws Exception
*/
private void initializeNotificationSource() throws Exception {
try {
Properties notificationSourceProperties = (Properties) properties
.get(NOTIFICATION_SOURCE_PROPS_KEY);
this.notificationSource = new KafkaNotificationSource(notificationSourceProperties);
notificationSource.setParentConsumer(this);
} catch (Exception e) {
logger.log(Level.SEVERE, "Failed to initialize notification source", e);
throw e;
}
}
private void initializeNotificationSource(String topic) throws Exception {
if (!directConsumer) {
if (notificationSource != null) {
notificationSource.close();
notificationSource = null;
}
TopicMetadata topicMetadata = client.getTopicMetadata(topic, metadataTimeout);
Properties storageProperties = topicMetadata.getStorageHandlerConfig();
storageProperties.putAll(additionalStorageProps);
storageProperties.setProperty(ConsumerConfigs.TOPIC_INTERNAL_PROP, topic);
notificationSourceProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
notificationSourceProps.putAll(storageProperties);
storageHandler = StorageHandlerTable.getClass(topicMetadata.getStorageHandlerName())
.getDeclaredConstructor().newInstance();
storageHandler.initReader(storageProperties, metricRegistry);
}
if (notificationSource == null) {
notificationSource = new KafkaNotificationSource(notificationSourceProps);
notificationSource.setParentConsumer(this);
}
}
public void commitOffset() {
if (!dryRun) {
notificationSource.commit();
}
}
public void commitOffset(Map<Integer, Long> offsetMap) {
if (!dryRun) {
notificationSource.commit(offsetMap);
}
}
public void commitOffsetAsync() {
if (!dryRun) {
notificationSource.commitAsync();
}
}
public void commitOffsetAsync(OffsetCommitCallback callback) {
if (!dryRun) {
notificationSource.commitAsync(callback);
}
}
public void commitOffsetAsync(Map<Integer, Long> offsetMap, OffsetCommitCallback callback) {
if (!dryRun) {
notificationSource.commitAsync(offsetMap, callback);
}
}
public Map<Integer, Long> offsetsOfTimestamps(Map<Integer, Long> partitionTimestamps) {
return notificationSource.offsetsForTimestamps(partitionTimestamps);
}
public Map<Integer, Long> getEarliestOffsets(Collection<Integer> partitions) {
return notificationSource.getEarliestOffsets(partitions);
}
public Map<Integer, Long> getLatestOffsets(Collection<Integer> partitions) {
return notificationSource.getLatestOffsets(partitions);
}
public long position(int partition) {
return notificationSource.position(partition);
}
public long committed(int partition) {
return notificationSource.committed(partition);
}
/**
* Close memq consumer
*/
@Override
public void close() throws IOException {
closed = true;
notificationQueue.clear();
if (notificationSource != null) {
notificationSource.close();
}
if (storageHandler != null) {
storageHandler.closeReader();
}
if (client != null) {
client.close();
}
}
public void seek(Map<Integer, Long> map) {
notificationQueue.clear();
getNotificationSource().seek(map);
}
public void seekToBeginning(Collection<Integer> partitions) {
notificationQueue.clear();
getNotificationSource().seekToBeginning(partitions);
}
public void seekToEnd(Collection<Integer> partitions) {
notificationQueue.clear();
getNotificationSource().seekToEnd(partitions);
}
protected BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
return storageHandler.fetchHeaderForBatch(nextNotificationToProcess);
}
public Set<Integer> assignment() {
Set<TopicPartition> assignments = getNotificationSource().getAssignments();
return assignments.stream().map(p -> p.partition()).collect(Collectors.toSet());
}
public MemqLogMessage<K, V> getLogMessageAtCurrentOffset(Duration timeout,
int partition,
long batchOffset,
int messageIndex,
int logMessageIndex) throws IOException,
DataNotFoundException {
// first index
notificationQueue.clear();
getNotificationSource().assign(Arrays.asList(partition));
getNotificationSource().seek(ImmutableMap.of(partition, batchOffset));
if (notificationQueue.isEmpty()
&& notificationSource.lookForNewObjects(timeout, notificationQueue) <= 0
&& notificationQueue.isEmpty()) {
// throw new IOException("No new object");
return null;
}
// second index
JsonObject objectNotification = notificationQueue.poll();
return getMessageFromNotification(messageIndex, logMessageIndex, objectNotification);
}
private MemqLogMessage<K, V> getMessageFromNotification(int messageIndex,
int logMessageIndex,
JsonObject objectNotification) throws IOException,
DataNotFoundException {
BatchHeader header = fetchHeaderForBatch(objectNotification);
SortedMap<Integer, IndexEntry> idx = header.getMessageIndex();
if (messageIndex == -1) {
messageIndex = idx.lastKey();
}
IndexEntry indexEntry = idx.get(messageIndex);
if (indexEntry == null) {
// TODO throw no such message
return null;
}
DataInputStream stream = fetchMessageAtIndex(objectNotification, indexEntry);
// third index
try {
if (logMessageIndex == -1) {
logMessageIndex = Integer.MAX_VALUE;
}
try (MemqLogMessageIterator<K, V> iterator = new MemqLogMessageIterator<>(cluster, groupId,
stream, objectNotification, getKeyDeserializer(), getValueDeserializer(),
getMetricRegistry(), true, auditor)) {
int i = 0;
MemqLogMessage<K, V> msg = null;
// TODO make this code more robust
while (i <= logMessageIndex && iterator.hasNext()) {
msg = iterator.next();
i++;
}
return msg;
}
} finally {
stream.close();
}
}
public List<MemqLogMessage<K, V>> getLogMessagesAtOffsets(Duration timeout,
int[] partitions,
long[] batchOffsets,
int[] messageIndexes,
int[] logMessageIndexes,
ExecutorService downloadPool) throws IllegalArgumentException,
TimeoutException {
if (partitions.length != batchOffsets.length || batchOffsets.length != messageIndexes.length
|| messageIndexes.length != logMessageIndexes.length) {
throw new IllegalArgumentException("Mismatching argument array lengths");
}
Map<Integer, Long> partitionOffsets = new HashMap<>();
for (int i = 0; i < partitions.length; i++) {
int partition = partitions[i];
long batchOffset = batchOffsets[i];
partitionOffsets.put(partition, batchOffset);
}
Map<Integer, JsonObject> notifications = getNotificationSource()
.getNotificationsAtOffsets(timeout, partitionOffsets);
List<Future<MemqLogMessage<K, V>>> futures = new ArrayList<>();
for (int i = 0; i < partitions.length; i++) {
final int p = i;
futures.add(downloadPool.submit(() -> {
JsonObject notification = notifications.get(partitions[p]);
int retries = 3;
while (true) {
try {
return getMessageFromNotification(messageIndexes[p], logMessageIndexes[p],
notification);
} catch (Exception e) {
if (--retries == 0) {
throw new ExecutionException(e);
}
}
}
}));
}
List<MemqLogMessage<K, V>> messages = new ArrayList<>();
for (int i = 0; i < futures.size(); i++) {
try {
messages.add(futures.get(i).get());
} catch (Exception e) {
logger.log(Level.SEVERE,
"Failed to retrieve message of partition " + i + " at offset " + batchOffsets[i], e);
messages.add(null);
}
}
return messages;
}
protected DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException,
DataNotFoundException {
return storageHandler.fetchMessageAtIndex(objectNotification, index);
}
public CloseableIterator<MemqLogMessage<K, V>> poll(Duration timeout) throws NoTopicsSubscribedException,
IOException {
return poll(timeout, new MutableInt(0));
}
public void wakeup() {
getNotificationSource().wakeup();
}
/**
* Poll the notification source for new notifications and return an iterator of
* the polled data objects
*
* @param timeout timeout for notification source poll
* @return a MemqLogMessageIterator
* @throws IOException
*/
public CloseableIterator<MemqLogMessage<K, V>> poll(Duration timeout,
MutableInt numMessages) throws NoTopicsSubscribedException,
IOException {
if (topicName == null) {
throw new NoTopicsSubscribedException("Currently not subscribed to any topic");
}
if (notificationQueue.isEmpty()) {
int count = notificationSource.lookForNewObjects(timeout, notificationQueue);
numMessages.add(count);
if (notificationQueue.isEmpty()) {
logger.fine("Empty iterator due to no new objects");
return MiscUtils.emptyCloseableIterator();
}
} else {
numMessages.setValue(notificationQueue.size());
}
NotificationBatchIterator tmp = new NotificationBatchIterator(notificationQueue);
notificationQueue.clear();
return tmp;
}
public Set<Integer> waitForAssignment() throws NoTopicsSubscribedException {
if (getNotificationSource() == null) {
throw new NoTopicsSubscribedException(
"Notification source is null, must subscribe before waiting for assignments");
}
Set<TopicPartition> waitForAssignment = getNotificationSource().waitForAssignment();
return waitForAssignment.stream().map(tp -> tp.partition()).collect(Collectors.toSet());
}
public Deserializer<K> getKeyDeserializer() {
return keyDeserializer;
}
public Deserializer<V> getValueDeserializer() {
return valueDeserializer;
}
public Properties getProperties() {
return properties;
}
public ConcurrentLinkedQueue<JsonObject> getNotificationQueue() {
return notificationQueue;
}
public NotificationSource getNotificationSource() {
return notificationSource;
}
public void setNotificationSource(KafkaNotificationSource notificationSource) {
this.notificationSource = notificationSource;
}
public MetricRegistry getMetricRegistry() {
return metricRegistry;
}
public String getTopicName() {
return topicName;
}
public void setValueDeserializer(Deserializer<V> valueDeserializer) {
this.valueDeserializer = valueDeserializer;
}
public void assign(Collection<Integer> asList) throws Exception {
if (topicName == null) {
throw new Exception("Must subscribe to topic before calling assign");
}
if (!directConsumer) {
initializeNotificationSource(topicName);
}
notificationSource.assign(asList);
}
public StorageHandler getStorageHandler() {
return storageHandler;
}
public final class NotificationBatchIterator implements CloseableIterator<MemqLogMessage<K, V>> {
private static final int MAX_RETRIES_FOR_BATCH_LOAD_FAILURE = 2;
private Deque<JsonObject> queue;
private JsonObject currNotificationObj;
private MemqLogMessageIterator<K, V> itr;
public NotificationBatchIterator(Queue<JsonObject> notificationQueue) throws IOException {
this.queue = new LinkedList<>(notificationQueue);
}
/**
* Loads next notification and returns whether there are bytes to read in the
* next object
*
* @return true if there are object bytes to read, false otherwise
* @throws IOException
* @throws DataNotFoundException
*/
protected MemqLogMessageIterator<K, V> loadNewNotification(JsonObject nextNotification) throws IOException, DataNotFoundException, ClosedException {
currNotificationObj = nextNotification;
if (itr != null) {
closeAndResetIterator();
}
DataInputStream objectStream = fetchObjectToInputStream(currNotificationObj);
// parse batch header
itr = new MemqLogMessageIterator<>(cluster, groupId, objectStream, currNotificationObj,
keyDeserializer, valueDeserializer, metricRegistry, false, auditor);
return itr;
}
@Override
public boolean hasNext() {
return !queue.isEmpty() || (itr != null && itr.hasNext());
}
@Override
public MemqLogMessage<K, V> next() {
if (itr == null) {
JsonObject poll = queue.poll();
if (poll == null) {
// if there are no messages in the poll due to DataNotFound exception the return
// empty message
return EMPTY_MESSAGE;
}
int retryCount = MAX_RETRIES_FOR_BATCH_LOAD_FAILURE;
while (retryCount > 0) {
try {
loadNewNotification(poll);
break;
} catch (IOException | SdkClientException e) {
if (retryCount == 1) {
throw new RuntimeException(e);
} else {
loadBatchExceptionCounter.inc();
logger.log(Level.SEVERE, "Loading batch for object:" + poll + " failed retrying", e);
}
} catch (DataNotFoundException e) {
e.printStackTrace();
// if data not found then skip this notification, set to empty iterator
itr = EMPTY_ITERATOR;
break;
} catch (ClosedException ce) {
logger.log(Level.SEVERE, "Consumer has been closed", ce);
throw new NoSuchElementException("Consumer has been closed");
}
retryCount--;
}
}
try {
if (!itr.hasNext()) {
closeAndResetIterator();
return next();
} else {
return itr.next();
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Iterator failed for:" + currNotificationObj + " skipping to next",
e);
if (iteratorExceptionCounter != null) {
iteratorExceptionCounter.inc();
}
closeAndResetIterator();
return next();
}
}
private void closeAndResetIterator() {
try {
if (itr != null) {
itr.close();
}
} catch (Exception e1) {
logger.log(Level.WARNING, "Iterator failed to best-effort close: ", e1);
}
itr = null;
}
public void skipToLastLogMessage() throws IOException, DataNotFoundException, ClosedException {
JsonObject last = queue.getLast();
MemqLogMessageIterator<K, V> itr = loadNewNotification(last);
itr.skipToLastLogMessage();
}
@Override
public void close() throws IOException {
closeAndResetIterator();
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/tools/ConsoleConsumer.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/tools/ConsoleConsumer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer.tools;
import java.io.File;
import java.io.FileInputStream;
import java.time.Duration;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.serde.StringDeserializer;
import com.pinterest.memq.client.consumer.MemqConsumer;
import com.pinterest.memq.commons.MemqLogMessage;
public class ConsoleConsumer {
public static void main(String[] args) throws Exception {
String topicName = null;
String configFile = null;
Options options = new Options();
options
.addOption(Option.builder("config").argName("config").desc("Configuration properties file")
.numberOfArgs(1).required().type(String.class).build());
options.addOption(Option.builder("topic").argName("topic").desc("Memq Topic name")
.numberOfArgs(1).required().type(String.class).build());
CommandLineParser parser = new DefaultParser();
try {
// parse the command line arguments
CommandLine line = parser.parse(options, args);
configFile = line.getOptionValue("config");
topicName = line.getOptionValue("topic");
} catch (ParseException exp) {
// oops, something went wrong
System.err.println("Parsing failed. Reason: " + exp.getMessage());
System.exit(-1);
}
final AtomicBoolean loopControl = new AtomicBoolean(true);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
loopControl.set(false);
}
});
Properties properties = new Properties();
properties.load(new FileInputStream(new File(configFile)));
properties.setProperty(ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY,
StringDeserializer.class.getCanonicalName());
properties.setProperty(ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY,
StringDeserializer.class.getCanonicalName());
MemqConsumer<String, String> consumer = new MemqConsumer<>(properties);
consumer.subscribe(Arrays.asList(topicName));
while (loopControl.get()) {
Iterator<MemqLogMessage<String, String>> records = consumer.poll(Duration.ofSeconds(10));
while (records.hasNext() && loopControl.get()) {
System.out.println(records.next());
}
}
System.out.println("Closing memq consumer");
consumer.close();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/CheckNotificationTopic.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/CheckNotificationTopic.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer.utils;
import java.io.FileInputStream;
import java.util.Properties;
import com.pinterest.memq.client.consumer.KafkaNotificationSource;
public class CheckNotificationTopic {
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.load(new FileInputStream(args[0]));
KafkaNotificationSource source = new KafkaNotificationSource(props);
source.unsubscribe();
long ts = System.currentTimeMillis();
long[] offsetsForAllPartitions = source.getOffsetsForAllPartitions(true);
for (int i = 0; i < offsetsForAllPartitions.length; i++) {
long l = offsetsForAllPartitions[i];
System.out.println("Partition:" + i + " Offset:" + l);
}
ts = System.currentTimeMillis() - ts;
System.out.println("Ts:" + ts);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/properties/MemqConsumerBuilder.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/properties/MemqConsumerBuilder.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer.utils.properties;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.consumer.KafkaNotificationSource;
import com.pinterest.memq.client.consumer.MemqConsumer;
public class MemqConsumerBuilder<H, T> {
protected Properties properties;
private Properties notificationSourceProperties;
public MemqConsumerBuilder() {
this.properties = new Properties();
this.notificationSourceProperties = new Properties();
}
public MemqConsumerBuilder<H, T> setConsumerGroupId(String consumerGroupId) {
notificationSourceProperties.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
return this;
}
public MemqConsumerBuilder<H, T> setHeaderDeserializerClassname(String headerDeserializerClassname) {
properties.put(ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY, headerDeserializerClassname);
return this;
}
public MemqConsumerBuilder<H, T> setHeaderDeserializerClassConfigs(Properties headerDeserializerClassConfigs) {
properties.put(ConsumerConfigs.KEY_DESERIALIZER_CLASS_CONFIGS_KEY,
headerDeserializerClassConfigs);
return this;
}
public MemqConsumerBuilder<H, T> setValueDeserializerClassname(String valueDeserializerClassname) {
properties.put(ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY, valueDeserializerClassname);
return this;
}
public MemqConsumerBuilder<H, T> setValueDeserializerClassConfigs(Properties valueDeserializerClassConfigs) {
properties.put(ConsumerConfigs.VALUE_DESERIALIZER_CLASS_CONFIGS_KEY,
valueDeserializerClassConfigs);
return this;
}
public MemqConsumerBuilder<H, T> bufferObjectFetchToFile() {
properties.put(ConsumerConfigs.BUFFER_TO_FILE_CONFIG_KEY, true);
return this;
}
public MemqConsumerBuilder<H, T> setBufferFilesDirectory(String directory) {
properties.put(ConsumerConfigs.BUFFER_FILES_DIRECTORY_KEY, directory);
return this;
}
public MemqConsumerBuilder<H, T> dryRun() {
properties.put(ConsumerConfigs.DRY_RUN_KEY, true);
return this;
}
public MemqConsumerBuilder<H, T> useStreamingIterator() {
properties.put(ConsumerConfigs.USE_STREAMING_ITERATOR, true);
return this;
}
public Properties getProperties() {
return properties;
}
private void validateProperties() throws PropertiesInitializationException {
if (!properties.containsKey(ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY)) {
throw new PropertiesInitializationException(
"Missing header deserializer config for MemqConsumer");
}
if (!properties.containsKey(ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY)) {
throw new PropertiesInitializationException(
"Missing value deserializer config for MemqConsumer");
}
}
private void loadDefaultNotificationSourceProperties() {
notificationSourceProperties.put(ConsumerConfigs.NOTIFICATION_SOURCE_TYPE_KEY, "kafka");
notificationSourceProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
notificationSourceProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
notificationSourceProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getName());
notificationSourceProperties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);
}
public MemqConsumer<H, T> getMemqS3Consumer() throws Exception {
loadDefaultNotificationSourceProperties();
properties.put(ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_KEY, notificationSourceProperties);
validateProperties();
return new MemqConsumer<>(properties);
}
public MemqConsumerBuilder<H, T> setNotificationBootstrapServer(String bootstrapServer) {
notificationSourceProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
return this;
}
public MemqConsumerBuilder<H, T> setNotificationTopic(String notificationTopic) {
notificationSourceProperties.put(KafkaNotificationSource.NOTIFICATION_TOPIC_NAME_KEY,
notificationTopic);
return this;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/properties/PropertiesInitializationException.java | memq-client/src/main/java/com/pinterest/memq/client/consumer/utils/properties/PropertiesInitializationException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer.utils.properties;
public class PropertiesInitializationException extends Exception {
private static final long serialVersionUID = 1L;
public PropertiesInitializationException(String message) {
super(message);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/TestMemqManager.java | memq/src/test/java/com/pinterest/memq/core/TestMemqManager.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.nio.file.Paths;
import java.util.HashMap;
import org.junit.Test;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.core.config.MemqConfig;
public class TestMemqManager {
@Test
public void testTopicCache() throws Exception {
new File("target/testmgrcache").delete();
MemqConfig configuration = new MemqConfig();
configuration.setTopicCacheFile("target/testmgrcache");
MemqManager mgr = new MemqManager(null, configuration, new HashMap<>());
mgr.init();
TopicConfig topicConfig = new TopicConfig(0, 1024, 100, "test", 10, 10, 2);
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 10);
topicAssignment.setStorageHandlerName("delayeddevnull");
mgr.createTopicProcessor(topicAssignment);
mgr.updateTopicCache();
mgr = new MemqManager(null, configuration, new HashMap<>());
mgr.init();
assertEquals(1, mgr.getProcessorMap().size());
assertEquals(1, mgr.getTopicAssignment().size());
}
@Test
public void testTopicConfig() throws Exception {
MemqConfig config = new MemqConfig();
File tmpFile = File.createTempFile("test", "", Paths.get("/tmp").toFile());
tmpFile.deleteOnExit();
config.setTopicCacheFile(tmpFile.toString());
MemqManager mgr = new MemqManager(null, new MemqConfig(), new HashMap<>());
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, -1);
mgr.createTopicProcessor(topicAssignment);
long size = mgr.getTopicAssignment().iterator().next().getBatchSizeBytes();
topicAssignment = new TopicAssignment(new TopicConfig("test", "delayeddevnull"), -1);
topicAssignment.setBatchSizeBytes(size + 100);
mgr.updateTopic(topicAssignment);
assertEquals(size + 100, mgr.getTopicAssignment().iterator().next().getBatchSizeBytes());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/CustomS3Async2OutputHandlerIntegration.java | memq/src/test/java/com/pinterest/memq/core/integration/CustomS3Async2OutputHandlerIntegration.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.function.BiFunction;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.google.common.io.Files;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.client.consumer.MemqConsumer;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.storage.s3.CustomS3Async2StorageHandler;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.CoreUtils;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
import io.netty.util.internal.ThreadLocalRandom;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.S3Object;
public class CustomS3Async2OutputHandlerIntegration {
public static final String INTEGRATION_TEST_BUCKET = System.getProperty("integration.test.bucket");
public static final String BASEPATH = "testing/integration/memq";
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@BeforeClass
public static void beforeClass() {
S3Client s3 = S3Client.builder().region(Region.US_EAST_1).build();
List<S3Object> listObjects = s3.listObjectsV2(ListObjectsV2Request.builder()
.bucket(INTEGRATION_TEST_BUCKET).prefix(BASEPATH).build()).contents();
for (S3Object s3ObjectSummary : listObjects) {
s3.deleteObject(
DeleteObjectRequest.builder().bucket(INTEGRATION_TEST_BUCKET).key(s3ObjectSummary.key()).build());
System.out.println("Deleting old test data s3://" + INTEGRATION_TEST_BUCKET + "/"
+ s3ObjectSummary.key());
}
s3.close();
}
@Test
public void testSimpleS3Uploads() throws Exception {
String notificationTopic = "testnotifications";
sharedKafkaTestResource.getKafkaTestUtils().createTopic(notificationTopic, 1, (short) 1);
CustomS3Async2StorageHandler handler = new CustomS3Async2StorageHandler();
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.put("bucket", INTEGRATION_TEST_BUCKET);
outputHandlerConfig.put("path", BASEPATH + "/test-simpleuploads");
outputHandlerConfig.put("disableNotifications", "false");
outputHandlerConfig.put("enableHashing", "false");
outputHandlerConfig.put("retryTimeoutMillis", "30000");
Files.write("localhost:9092".getBytes(), new File("target/tests3outputhandlerserverset"));
outputHandlerConfig.put("notificationServerset", "target/tests3outputhandlerserverset");
outputHandlerConfig.put("notificationTopic", notificationTopic);
String topic = "test";
handler.initWriter(outputHandlerConfig, topic, new MetricRegistry());
List<Message> messages = new ArrayList<Message>();
int totalMessages = publishMessages(messages);
int checksum = CoreUtils.batchChecksum(messages);
int objectSize = CoreUtils.batchSizeInBytes(messages);
handler.writeOutput(objectSize, checksum, messages);
handler.closeWriter();
Properties config = new Properties();
Properties props = new Properties();
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
sharedKafkaTestResource.getKafkaConnectString());
config.setProperty(ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY,
ByteArrayDeserializer.class.getCanonicalName());
config.setProperty(ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY,
ByteArrayDeserializer.class.getCanonicalName());
props.setProperty(ConsumerConfigs.GROUP_ID, "testSimpleS3Uploads1");
props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.setProperty("notificationTopic", notificationTopic);
config.put(ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_KEY, props);
config.setProperty(ConsumerConfigs.CLUSTER, topic);
config.setProperty(ConsumerConfigs.CLIENT_ID, topic);
MemqConsumer<byte[], byte[]> consumer = new MemqConsumer<>(config);
consumer.subscribe(topic);
Iterator<MemqLogMessage<byte[], byte[]>> iterator = consumer.poll(Duration.ofSeconds(10));
int counts = 0;
while (iterator.hasNext()) {
iterator.next();
counts++;
}
assertEquals(totalMessages, counts);
consumer.close();
}
private int publishMessages(List<Message> messages) throws IOException {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> (base + k).getBytes();
long baseRequestId = ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE);
int totalMessages = 0;
// 02/16/2021: increased number of test messages to 100+ to test an edge case that causes corrupted message for
// batches with more than 64 messages (potentially caused by reactive-stream client)
for (int i = 0; i < 100 + ThreadLocalRandom.current().nextInt(50); i++) {
Message m = new Message(1024 * 100, true);
m.setClientRequestId(baseRequestId + i);
m.setServerRequestId(baseRequestId + 1000 + i);
byte[] message = TestUtils.createMessage(UUID.randomUUID().toString(), getLogMessageBytes,
100, true, Compression.NONE, null, false);
m.getBuf().writeBytes(message);
messages.add(m);
totalMessages += 100;
}
return totalMessages;
}
public static class ConsumerResponse {
private byte[] byteArray;
private JsonObject notification;
private String bucket;
private String key;
private String eTag;
public ConsumerResponse(byte[] byteArray,
JsonObject notification,
String bucket,
String key,
String eTag) {
this.byteArray = byteArray;
this.notification = notification;
this.bucket = bucket;
this.key = key;
this.eTag = eTag;
}
public byte[] getByteArray() {
return byteArray;
}
public JsonObject getNotification() {
return notification;
}
public String getBucket() {
return bucket;
}
public String getKey() {
return key;
}
public String geteTag() {
return eTag;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/TestMemqReadBrokers.java | memq/src/test/java/com/pinterest/memq/core/integration/TestMemqReadBrokers.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.time.Duration;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.consumer.MemqConsumer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.CloseableIterator;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.storage.fs.FileSystemStorageHandler;
import com.pinterest.memq.commons.storage.s3.KafkaNotificationSink;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
public class TestMemqReadBrokers {
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@Test
public void testProduceConsumeWithReadBrokers() throws Exception {
String notificationTopic = "notify_topic_1";
TestMemqClientServerIntegration.createTopic(notificationTopic, "localhost:9092",
"audit_topic_1");
Files.write(new File("target/test_broker_read_producer_consumer").toPath(),
"localhost:9092".getBytes());
long startMs = System.currentTimeMillis();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23434);
configuration.setNettyServerConfig(nettyServerConfig);
configuration.setBrokerType(BrokerType.READ_WRITE);
TopicConfig topicConfig = new TopicConfig("test", "filesystem");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS,
folder.newFolder().getAbsolutePath());
outputHandlerConfig.setProperty(FileSystemStorageHandler.NOTIFICATIONS_DISABLE, "false");
outputHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_SERVERSET,
"target/test_broker_read_producer_consumer");
outputHandlerConfig.setProperty("read.local.enabled", "false");
outputHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_TOPIC, notificationTopic);
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
int timeout = 60_000;
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", nettyServerConfig.getPort()), "test", 10, 1000000,
Compression.GZIP, false, 10, timeout, "local", timeout, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Timer timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_000_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
// server should have written 55 batches
assertEquals(MemqNettyRequest.getAckedByteCounter(), MemqNettyRequest.getByteCounter());
TestAuditor.reset();
producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", nettyServerConfig.getPort()), topicConfig.getTopic(), 10,
1000000, Compression.NONE, true, 10, timeout, "local", timeout, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
value = UUID.randomUUID().toString().getBytes();
futures = new LinkedHashSet<>();
timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_000_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
assertEquals(MemqNettyRequest.getAckedByteCounter(), MemqNettyRequest.getByteCounter());
Properties props = new Properties();
props.setProperty(ConsumerConfigs.BOOTSTRAP_SERVERS,
"localhost:" + nettyServerConfig.getPort());
props.setProperty(ConsumerConfigs.DIRECT_CONSUMER, "false");
props.setProperty(ConsumerConfigs.GROUP_ID, "test_111_" + System.currentTimeMillis());
MemqConsumer<byte[], byte[]> consumer = new MemqConsumer<byte[], byte[]>(props);
// subscribe to the topic and attempt a poll
consumer.subscribe(topicConfig.getTopic());
int c = 0;
CloseableIterator<MemqLogMessage<byte[], byte[]>> poll = consumer.poll(Duration.ofSeconds(5));
while (poll.hasNext()) {
poll.next();
c++;
}
assertEquals(2_000_000, c);
assertNotNull(consumer.getStorageHandler());
consumer.close();
server.stop();
}
@Test
public void testProduceConsumeWithReadBrokersSendFile() throws Exception {
String notificationTopic = "notify_topic_2";
TestMemqClientServerIntegration.createTopic(notificationTopic, "localhost:9092",
"audit_topic_2");
Files.write(new File("target/test_broker_read_producer_consumer_sendfile").toPath(),
"localhost:9092".getBytes());
long startMs = System.currentTimeMillis();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23435);
configuration.setNettyServerConfig(nettyServerConfig);
configuration.setBrokerType(BrokerType.READ_WRITE);
TopicConfig topicConfig = new TopicConfig("test", "filesystem");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties storageHandlerConfig = new Properties();
storageHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS,
folder.newFolder().getAbsolutePath());
storageHandlerConfig.setProperty(FileSystemStorageHandler.NOTIFICATIONS_DISABLE, "false");
storageHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_SERVERSET,
"target/test_broker_read_producer_consumer_sendfile");
storageHandlerConfig.setProperty("read.local.enabled", "false");
storageHandlerConfig.setProperty(FileSystemStorageHandler.OPTIMIZATION_SENDFILE, "true");
storageHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_TOPIC, notificationTopic);
topicConfig.setStorageHandlerConfig(storageHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
int timeout = 60_000;
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", nettyServerConfig.getPort()), "test", 10, 1000000,
Compression.GZIP, false, 10, timeout, "local", timeout, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Timer timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_00_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
Properties props = new Properties();
props.setProperty(ConsumerConfigs.BOOTSTRAP_SERVERS,
"localhost:" + nettyServerConfig.getPort());
props.setProperty(ConsumerConfigs.DIRECT_CONSUMER, "false");
props.setProperty(ConsumerConfigs.GROUP_ID, "test_111_" + System.currentTimeMillis());
MemqConsumer<byte[], byte[]> consumer = new MemqConsumer<byte[], byte[]>(props);
// subscribe to the topic and attempt a poll
consumer.subscribe(topicConfig.getTopic());
int c = 0;
CloseableIterator<MemqLogMessage<byte[], byte[]>> poll = consumer.poll(Duration.ofSeconds(5));
while (poll.hasNext()) {
poll.next();
c++;
}
assertEquals(1_00_000, c);
assertNotNull(consumer.getStorageHandler());
consumer.close();
server.stop();
}
@Test
public void testReadBrokerHeaders() throws Exception {
String notificationTopic = "notify_topic_3";
TestMemqClientServerIntegration.createTopic(notificationTopic, "localhost:9092",
"audit_topic_3");
Files.write(new File("target/test_broker_read_headers").toPath(), "localhost:9092".getBytes());
long startMs = System.currentTimeMillis();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23435);
configuration.setNettyServerConfig(nettyServerConfig);
configuration.setBrokerType(BrokerType.READ_WRITE);
TopicConfig topicConfig = new TopicConfig("test", "filesystem");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties storageHandlerConfig = new Properties();
storageHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS,
folder.newFolder().getAbsolutePath());
storageHandlerConfig.setProperty(FileSystemStorageHandler.NOTIFICATIONS_DISABLE, "false");
storageHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_SERVERSET,
"target/test_broker_read_headers");
storageHandlerConfig.setProperty("read.local.enabled", "false");
storageHandlerConfig.setProperty(FileSystemStorageHandler.OPTIMIZATION_SENDFILE, "true");
storageHandlerConfig.setProperty(KafkaNotificationSink.NOTIFICATION_TOPIC, notificationTopic);
topicConfig.setStorageHandlerConfig(storageHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
int timeout = 60_000;
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", nettyServerConfig.getPort()), "test", 10, 1000000,
Compression.GZIP, false, 10, timeout, "local", timeout, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Timer timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_00_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
Properties props = new Properties();
props.setProperty(ConsumerConfigs.BOOTSTRAP_SERVERS,
"localhost:" + nettyServerConfig.getPort());
props.setProperty(ConsumerConfigs.DIRECT_CONSUMER, "false");
props.setProperty(ConsumerConfigs.GROUP_ID, "test_111_" + System.currentTimeMillis());
MemqConsumer<byte[], byte[]> consumer = new MemqConsumer<byte[], byte[]>(props);
// subscribe to the topic and attempt a poll
consumer.subscribe(topicConfig.getTopic());
ExecutorService es = Executors.newSingleThreadExecutor(new DaemonThreadFactory());
List<MemqLogMessage<byte[], byte[]>> messages = consumer.getLogMessagesAtOffsets(
Duration.ofMillis(5000), new int[] { 0 }, new long[] { 0 }, new int[] { 0 },
new int[] { 0 }, es);
assertTrue(messages.size() > 0);
consumer.close();
es.shutdown();
server.stop();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/TestMemqClientServerPerf.java | memq/src/test/java/com/pinterest/memq/core/integration/TestMemqClientServerPerf.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.storage.DelayedDevNullStorageHandler;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
public class TestMemqClientServerPerf {
@Before
public void before() {
new File(new MemqConfig().getTopicCacheFile()).delete();
}
@After
public void after() {
new File(new MemqConfig().getTopicCacheFile()).delete();
DelayedDevNullStorageHandler.reset();
TestAuditor.reset();
MemqNettyRequest.reset();
}
@Test
public void testServerPerf() throws Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23437);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(4 * 1024 * 1024);
topicConfig.setBatchSizeMB(4);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties handlerConfig = new Properties();
handlerConfig.setProperty("delay.min.millis", "1");
handlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(handlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
int numOfProducerThreads = 4;
ExecutorService es = Executors.newFixedThreadPool(numOfProducerThreads,
DaemonThreadFactory.INSTANCE);
final int TOTAL = 1_000_000;
long startTime = System.currentTimeMillis();
for (int i = 0; i < numOfProducerThreads; i++) {
es.submit(() -> {
try {
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", 23437), "test", 10, 3000000, Compression.NONE,
false, 10, 60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
producer.setDebug();
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int j = 0; j < TOTAL; j++) {
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
futures.add(writeToTopic);
}
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get();
}
Thread.sleep(500);
producer.close();
} catch (Exception e) {
e.printStackTrace();
}
});
}
final AtomicBoolean flg = new AtomicBoolean(true);
Executors.newCachedThreadPool(DaemonThreadFactory.INSTANCE).submit(() -> {
long prevBytes = 0;
while (flg.get()) {
long curBytes = DelayedDevNullStorageHandler.getByteCounter();
System.out.println("Server Throughput: " + (curBytes - prevBytes) / 1024 / 1024 + "MB/s");
prevBytes = curBytes;
try {
Thread.sleep(1_000);
} catch (InterruptedException e) {
break;
}
}
});
es.shutdown();
es.awaitTermination(100, TimeUnit.SECONDS);
long endTime = System.currentTimeMillis();
long duration = (endTime - startTime) / 1000;
System.out.println("Took " + duration + " seconds");
assertEquals(MemqNettyRequest.getByteCounter(), DelayedDevNullStorageHandler.getByteCounter());
flg.set(false);
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
assertTrue("Performance test should take less than 15 seconds", duration < 15);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/TestMemqClientServerIntegration.java | memq/src/test/java/com/pinterest/memq/core/integration/TestMemqClientServerIntegration.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.security.KeyStore;
import java.security.KeyStore.PrivateKeyEntry;
import java.security.cert.X509Certificate;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.MemqCommonClient;
import com.pinterest.memq.client.commons.MemqNettyClientSideResponseHandler;
import com.pinterest.memq.client.commons.ProducerConfigs;
import com.pinterest.memq.client.commons.ResponseHandler;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.consumer.MemqConsumer;
import com.pinterest.memq.client.producer.MemqProducer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.storage.DelayedDevNullStorageHandler;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.MemqRequestDecoder;
import com.pinterest.memq.core.rpc.MemqResponseEncoder;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.pinterest.memq.core.utils.MiscUtils;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.embedded.EmbeddedChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.ssl.util.SelfSignedCertificate;
public class TestMemqClientServerIntegration {
private static final String JKS = "JKS";
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@Before
public void before() {
new File(new MemqConfig().getTopicCacheFile()).delete();
}
@After
public void after() {
new File(new MemqConfig().getTopicCacheFile()).delete();
DelayedDevNullStorageHandler.reset();
TestAuditor.reset();
MemqNettyRequest.reset();
}
@Test
public void testProducerAndServer() throws IOException {
long currentRequestId = 1213323L;
MemqNettyRequest request = new MemqNettyRequest("test", currentRequestId, Compression.ZSTD,
null, false, 1024 * 1024 * 2, 100, null, null, 200, false);
ByteBuffer buf = ByteBuffer.allocate(1024 * 100);
byte[] payload = buf.array();
RequestPacket requestPacket = request.getWriteRequestPacket(Unpooled.wrappedBuffer(payload));
ByteBuf output = Unpooled.buffer(requestPacket.getSize(RequestType.PROTOCOL_VERSION));
requestPacket.write(output, RequestType.PROTOCOL_VERSION);
MetricRegistry registry = new MetricRegistry();
EmbeddedChannel ech = new EmbeddedChannel(new MemqResponseEncoder(registry),
new LengthFieldBasedFrameDecoder(ByteOrder.BIG_ENDIAN, 2 * 1024 * 1024, 0, 4, 0, 0, false),
new MemqRequestDecoder(null, null, null, registry));
ech.writeInbound(output);
ech.checkException();
assertEquals(1, ech.outboundMessages().size());
ByteBuf val = ech.readOutbound();
ech.close();
String key = MemqCommonClient.makeResponseKey(requestPacket);
ResponseHandler responseHandler = new ResponseHandler();
final AtomicReference<ResponsePacket> p = new AtomicReference<>();
Map<String, Consumer<ResponsePacket>> requestMap = new HashMap<>();
requestMap.put(key, rp -> p.set(rp));
responseHandler.setRequestMap(requestMap);
ech = new EmbeddedChannel(new LengthFieldBasedFrameDecoder(ByteOrder.BIG_ENDIAN,
4 * 1024 * 1024, 0, Integer.BYTES, 0, 0, false),
new MemqNettyClientSideResponseHandler(responseHandler));
ech.writeInbound(val);
ech.checkException();
assertEquals(500, p.get().getResponseCode());
}
@Test
public void testRedirection() throws Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23433);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
TopicMetadata md = new TopicMetadata("test2", "delayeddevnull", new Properties());
md.getWriteBrokers()
.add(new Broker("127.0.0.2", (short) 9092, "2xl", "us-east-1a", BrokerType.WRITE, new HashSet<>()));
governor.getTopicMetadataMap().put("test2", md);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", 23434), "test2", 10, 1000000, Compression.NONE, false,
10, 60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int i = 0; i < 100; i++) {
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
futures.add(writeToTopic);
}
producer.finalizeRequest();
System.out.println("Writes completed");
try {
for (Future<MemqWriteResult> future : futures) {
future.get();
}
fail("Must throw execution exception since redirection is being invoked");
} catch (ExecutionException e) {
}
producer.close();
server.stop();
}
@Test
public void testLocalServer() throws Exception {
long startMs = System.currentTimeMillis();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23434);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", 23434), "test", 10, 1000000, Compression.NONE, false, 10,
60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Timer timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_000_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
if (i % 10000 == 0) {
System.out.println(timer.getSnapshot().get99thPercentile() / 1_000_000);
}
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
// server should have written 55 batches
assertEquals(futures.size(), DelayedDevNullStorageHandler.getCounter());
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
assertEquals(MemqNettyRequest.getByteCounter(), DelayedDevNullStorageHandler.getByteCounter());
assertEquals(MemqNettyRequest.getByteCounter(), DelayedDevNullStorageHandler.getInputStreamCounter());
assertEquals(MemqNettyRequest.getAckedByteCounter(), MemqNettyRequest.getByteCounter());
TestAuditor.reset();
producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", 23434), "test", 10, 1000000, Compression.NONE, true, 10,
60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
value = UUID.randomUUID().toString().getBytes();
futures = new LinkedHashSet<>();
timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_000_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
if (i % 10000 == 0) {
System.out.println(timer.getSnapshot().get99thPercentile() / 1_000_000);
}
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
assertEquals(MemqNettyRequest.getAckedByteCounter(), MemqNettyRequest.getByteCounter());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
}
@Test
public void testLocalServerSSL() throws Exception {
int port = 23435;
String keystorePath = "target/testks.jks";
new File(keystorePath).delete();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) port);
SelfSignedCertificate cert = new SelfSignedCertificate("test-memq.pinterest.com");
KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
ks.load(null, null);
ks.setCertificateEntry("default", cert.cert());
ks.setEntry("default", new PrivateKeyEntry(cert.key(), new X509Certificate[] { cert.cert() }),
new KeyStore.PasswordProtection("test".toCharArray()));
String keystorePassword = "test";
ks.store(new FileOutputStream(new File(keystorePath)), keystorePassword.toCharArray());
SSLConfig sslConfig = new SSLConfig();
sslConfig.setProtocols(Collections.singletonList("TLSv1.2"));
sslConfig.setKeystorePassword(keystorePassword);
sslConfig.setKeystoreType(JKS);
sslConfig.setKeystorePath(keystorePath);
sslConfig.setTruststorePassword(keystorePassword);
sslConfig.setTruststorePath(keystorePath);
sslConfig.setTruststoreType(JKS);
nettyServerConfig.setSslConfig(sslConfig);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig(keystorePassword, "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", port), keystorePassword, 10, 1000000, Compression.NONE,
false, 10, 60_000, "local", 60_000, auditConfigs, sslConfig);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Timer timer = MiscUtils.oneMinuteWindowTimer(new MetricRegistry(), "requests.write.time");
for (int i = 0; i < 1_000_000; i++) {
Timer.Context ctx = timer.time();
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
ctx.stop();
if (i % 10000 == 0) {
System.out.println("[" + i + "/1000000]" +
"\twrite time: " + timer.getSnapshot().get99thPercentile() / 1_000_000);
}
futures.add(writeToTopic);
}
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get();
}
System.out.println("Completed all writes");
Thread.sleep(1000);
producer.close();
// server should have written 55 batches
assertEquals(futures.size(), DelayedDevNullStorageHandler.getCounter());
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
}
@Test
public void testMetadataRequests() throws Exception {
short port = 22311;
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) port);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
TopicMetadata md = new TopicMetadata("test2", "delayeddevnull", new Properties());
md.getWriteBrokers()
.add(new Broker("127.0.0.1", port, "2xl", "us-east-1a", BrokerType.WRITE, new HashSet<>()));
governor.getTopicMetadataMap().put("test2", md);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties properties = new Properties();
properties.put(ConsumerConfigs.CLUSTER, "test");
properties.put(ConsumerConfigs.CLIENT_ID, "test");
properties.put(ConsumerConfigs.GROUP_ID, "test231231");
properties.put(ConsumerConfigs.DRY_RUN_KEY, "true");
properties.put(ConsumerConfigs.BOOTSTRAP_SERVERS, "localhost:" + port);
properties.put(ConsumerConfigs.DIRECT_CONSUMER, "false");
StorageHandler input = getEmptyTestInput();
MemqConsumer<byte[], byte[]> consumer = new MemqConsumer<byte[], byte[]>(properties, input);
long ts = System.currentTimeMillis();
TopicMetadata topicMetadata = consumer.getTopicMetadata("test2", 10000);
ts = System.currentTimeMillis() - ts;
System.out.println("Fetched metadata in:" + ts + "ms");
assertNotNull(topicMetadata);
assertEquals("test2", topicMetadata.getTopicName());
assertEquals(1, topicMetadata.getWriteBrokers().size());
consumer.close();
// test producer metadata requests
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
MemqProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", port), "test2", 10, 1000000, Compression.NONE, false, 10,
60_000, "local", 60_000, auditConfigs, null);
producer.awaitConnect(100, TimeUnit.SECONDS);
producer.close();
properties.setProperty(ProducerConfigs.CLIENT_TYPE, "TCP");
properties.setProperty(ProducerConfigs.KEY_SERIALIZER,
ByteArraySerializer.class.getCanonicalName());
properties.setProperty(ProducerConfigs.VALUE_SERIALIZER,
ByteArraySerializer.class.getCanonicalName());
properties.setProperty(ProducerConfigs.TOPIC_NAME, "test2");
producer = MemqProducer.getInstance(properties);
producer.close();
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
}
public static StorageHandler getEmptyTestInput() {
StorageHandler input = new StorageHandler() {
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) {
return null;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
}
@Override
public String getReadUrl() {
return null;
}
};
return input;
}
@Test
public void testInvalidChecksumRejection() throws Exception {
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
short port = 23436;
MemqNettyServer server = createAndStartServer(topicConfig, port);
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", port), "test", 10, 1000000, Compression.NONE, false, 10,
60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
producer.setDebug();
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int i = 0; i < 50; i++) {
if (i % 2 == 0) {
MemqNettyRequest.setOverrideDebugChecksum(1);
} else {
MemqNettyRequest.setOverrideDebugChecksum(0);
}
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
futures.add(writeToTopic);
producer.finalizeRequest();
Thread.sleep(10L);
}
int invalidRequests = 0;
for (Future<MemqWriteResult> future : futures) {
try {
future.get();
} catch (Exception e) {
invalidRequests++;
}
}
System.out.println("Number of invalid requests:" + invalidRequests);
Thread.sleep(500);
producer.close();
assertTrue(invalidRequests > 0);
assertTrue(MemqNettyRequest.getByteCounter() > MemqNettyRequest.getAckedByteCounter());
assertEquals(MemqNettyRequest.getAckedByteCounter(), DelayedDevNullStorageHandler.getByteCounter());
assertEquals(MemqNettyRequest.getAckedByteCounter(),
DelayedDevNullStorageHandler.getInputStreamCounter());
System.out
.println("Number of bytes written on producer: " + MemqNettyRequest.getAckedByteCounter());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
}
public static void createTopic(String notificationTopic, String kafkaConnectString, String auditTopic) {
Properties adminProps = new Properties();
adminProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectString);
adminProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test12");
adminProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
AdminClient admin = AdminClient.create(adminProps);
admin.createTopics(Arrays.asList(new NewTopic(notificationTopic, 1, (short) 1)));
admin.createTopics(Arrays.asList(new NewTopic(auditTopic, 1, (short) 1)));
admin.close();
}
public static MemqNettyServer createAndStartServer(TopicConfig topicConfig,
short port) throws UnknownHostException, Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort(port);
configuration.setNettyServerConfig(nettyServerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
return server;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/TestEnvironmentProvider.java | memq/src/test/java/com/pinterest/memq/core/integration/TestEnvironmentProvider.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import com.pinterest.memq.core.config.EnvironmentProvider;
public final class TestEnvironmentProvider extends EnvironmentProvider {
@Override
public String getRack() {
return "local";
}
@Override
public String getInstanceType() {
return "2xl";
}
@Override
public String getIP() {
return "127.0.0.1";
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/CustomS3AsyncOutputHandlerIntegration.java | memq/src/test/java/com/pinterest/memq/core/integration/CustomS3AsyncOutputHandlerIntegration.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.function.BiFunction;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.google.common.io.Files;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler;
import com.pinterest.memq.commons.storage.s3.CustomS3AsyncStorageHandler;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.CoreUtils;
import com.pinterest.memq.core.utils.MemqUtils;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
import io.netty.util.internal.ThreadLocalRandom;
import software.amazon.awssdk.core.ResponseInputStream;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.GetObjectResponse;
import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
import software.amazon.awssdk.services.s3.model.S3Object;
public class CustomS3AsyncOutputHandlerIntegration {
public static final String INTEGRATION_TEST_BUCKET = System.getProperty("integration.test.bucket");
public static final String BASEPATH = "testing/integration/memq";
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@BeforeClass
public static void beforeClass() {
S3Client s3 = S3Client.builder().region(Region.US_EAST_1).build();
List<S3Object> listObjects = s3.listObjectsV2(ListObjectsV2Request.builder()
.bucket(INTEGRATION_TEST_BUCKET).prefix(BASEPATH).build()).contents();
for (S3Object s3ObjectSummary : listObjects) {
s3.deleteObject(DeleteObjectRequest.builder().bucket(INTEGRATION_TEST_BUCKET)
.key(s3ObjectSummary.key()).build());
System.out.println(
"Deleting old test data s3://" + INTEGRATION_TEST_BUCKET + "/" + s3ObjectSummary.key());
}
s3.close();
}
@Test
public void testSimpleS3Uploads() throws Exception {
String notificationTopic = "testnotifications";
sharedKafkaTestResource.getKafkaTestUtils().createTopic(notificationTopic, 1, (short) 1);
CustomS3AsyncStorageHandler handler = new CustomS3AsyncStorageHandler();
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.put("bucket", INTEGRATION_TEST_BUCKET);
outputHandlerConfig.put("path", BASEPATH + "/test-simpleuploads");
outputHandlerConfig.put("disableNotifications", "false");
outputHandlerConfig.put("enableHashing", "false");
Files.write("localhost:9092".getBytes(), new File("target/tests3outputhandlerserverset"));
outputHandlerConfig.put("notificationServerset", "target/tests3outputhandlerserverset");
outputHandlerConfig.put("notificationTopic", notificationTopic);
handler.initWriter(outputHandlerConfig, "test", new MetricRegistry());
List<Message> messages = new ArrayList<Message>();
int totalMessages = publishMessages(messages);
int checksum = CoreUtils.batchChecksum(messages);
int objectSize = CoreUtils.batchSizeInBytes(messages);
handler.writeOutput(objectSize, checksum, messages);
ConsumerResponse consumerResponse = extractConsumerObject(notificationTopic,
"testSimpleS3Uploads");
String md5 = md5ToBase64(consumerResponse);
System.out.println("Result:" + md5 + " " + consumerResponse.getBucket() + "/"
+ consumerResponse.getKey() + " etag:" + consumerResponse.geteTag());
String etagToBase64 = MemqUtils.etagToBase64(consumerResponse.geteTag());
assertEquals(md5, etagToBase64);
MemqLogMessageIterator<byte[], byte[]> iterator = new MemqLogMessageIterator<>("test", "test",
new DataInputStream(new ByteArrayInputStream(consumerResponse.getByteArray())),
consumerResponse.getNotification(), new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
int counts = 0;
while (iterator.hasNext()) {
iterator.next();
counts++;
}
assertEquals(totalMessages, counts);
handler.closeWriter();
}
private int publishMessages(List<Message> messages) throws IOException {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> (base + k).getBytes();
long baseRequestId = ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE);
int totalMessages = 0;
for (int i = 0; i < ThreadLocalRandom.current().nextInt(50); i++) {
Message m = new Message(1024 * 100, true);
m.setClientRequestId(baseRequestId + i);
m.setServerRequestId(baseRequestId + 1000 + i);
byte[] message = TestUtils.createMessage(UUID.randomUUID().toString(), getLogMessageBytes,
100, true, Compression.NONE, null, false);
m.getBuf().writeBytes(message);
messages.add(m);
totalMessages += 100;
}
return totalMessages;
}
public static ConsumerResponse extractConsumerObject(String notificationTopic,
String groupId) throws Exception {
Properties config = new Properties();
config.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
sharedKafkaTestResource.getKafkaConnectString());
config.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
config.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getCanonicalName());
config.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class.getCanonicalName());
config.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KafkaConsumer<String, String> notificationConsumer = new KafkaConsumer<>(config);
notificationConsumer.subscribe(Arrays.asList(notificationTopic));
ConsumerRecords<String, String> poll = notificationConsumer.poll(Duration.ofSeconds(5));
Gson gson = new Gson();
JsonObject notification = null;
for (ConsumerRecord<String, String> consumerRecord : poll) {
notification = gson.fromJson(consumerRecord.value(), JsonObject.class);
}
notificationConsumer.close();
S3Client s3 = S3Client.builder().region(Region.US_EAST_1).build();
String bucket = notification.get(AbstractS3StorageHandler.BUCKET).getAsString();
String key = notification.get(AbstractS3StorageHandler.KEY).getAsString();
ResponseInputStream<GetObjectResponse> resp = s3
.getObject(GetObjectRequest.builder().bucket(bucket).key(key).build());
GetObjectResponse object = resp.response();
String eTag = object.eTag().replace("\"", "");
ByteArrayOutputStream os = new ByteArrayOutputStream();
IOUtils.copy(resp, os);
os.close();
byte[] byteArray = os.toByteArray();
s3.close();
return new ConsumerResponse(byteArray, notification, bucket, key, eTag);
}
private String md5ToBase64(ConsumerResponse consumerResponse) throws NoSuchAlgorithmException {
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] digest = md.digest(consumerResponse.getByteArray());
String md5 = Base64.getEncoder().encodeToString(digest);
return md5;
}
public static class ConsumerResponse {
private byte[] byteArray;
private JsonObject notification;
private String bucket;
private String key;
private String eTag;
public ConsumerResponse(byte[] byteArray,
JsonObject notification,
String bucket,
String key,
String eTag) {
this.byteArray = byteArray;
this.notification = notification;
this.bucket = bucket;
this.key = key;
this.eTag = eTag;
}
public byte[] getByteArray() {
return byteArray;
}
public JsonObject getNotification() {
return notification;
}
public String getBucket() {
return bucket;
}
public String getKey() {
return key;
}
public String geteTag() {
return eTag;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/producer2/TestMemqClientServerPerf.java | memq/src/test/java/com/pinterest/memq/core/integration/producer2/TestMemqClientServerPerf.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration.producer2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.client.producer2.MemqProducer;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.storage.DelayedDevNullStorageHandler;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.integration.TestEnvironmentProvider;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.codahale.metrics.MetricRegistry;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class TestMemqClientServerPerf {
@Before
public void before() {
new File(new MemqConfig().getTopicCacheFile()).delete();
}
@After
public void after() {
new File(new MemqConfig().getTopicCacheFile()).delete();
DelayedDevNullStorageHandler.reset();
TestAuditor.reset();
MemqNettyRequest.reset();
}
@Test
public void testServerPerf() throws Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23437);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(4 * 1024 * 1024);
topicConfig.setBatchSizeMB(4);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
int numOfProducerThreads = 4;
ExecutorService es = Executors.newFixedThreadPool(numOfProducerThreads,
DaemonThreadFactory.INSTANCE);
final int TOTAL = 1_000_000;
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<byte[], byte[]>()
.cluster("testcluster")
.bootstrapServers("localhost:" + 23437)
.topic("test")
.maxInflightRequests(10)
.maxPayloadBytes(3000000)
.lingerMs(5000)
.compression(Compression.NONE)
.disableAcks(false)
.sendRequestTimeout(60_000)
.locality("local")
.auditProperties(auditConfigs)
;
List<MetricRegistry> registries = new ArrayList<>();
long startTime = System.currentTimeMillis();
for (int i = 0; i < numOfProducerThreads; i++) {
MetricRegistry registry = new MetricRegistry();
registries.add(registry);
es.submit(() -> {
try {
// run tests
MemqProducer<byte[], byte[]> producer = new MemqProducer.Builder<>(builder)
.metricRegistry(registry)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.maxInflightRequestsMemoryBytes(1024*1024*64)
.build();
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int j = 0; j < TOTAL; j++) {
Future<MemqWriteResult> writeToTopic = producer.write(null, value);
futures.add(writeToTopic);
}
producer.flush();
for (Future<MemqWriteResult> future : futures) {
future.get();
}
Thread.sleep(500);
producer.close();
} catch (Exception e) {
e.printStackTrace();
}
});
}
final AtomicBoolean flg = new AtomicBoolean(true);
Executors.newCachedThreadPool(DaemonThreadFactory.INSTANCE).submit(() -> {
long prevBytes = 0;
while (flg.get()) {
long curBytes = DelayedDevNullStorageHandler.getByteCounter();
System.out.println("Server Throughput: " + (curBytes - prevBytes) / 1024 / 1024 + "MB/s");
prevBytes = curBytes;
try {
Thread.sleep(1_000);
} catch (InterruptedException e) {
break;
}
}
});
es.shutdown();
es.awaitTermination(100, TimeUnit.SECONDS);
long endTime = System.currentTimeMillis();
long duration = (endTime - startTime) / 1000;
System.out.println("Took " + duration + " seconds");
assertEquals(registries.stream().mapToLong(mr -> mr.getCounters().get("requests.sent.bytes").getCount()).sum(), DelayedDevNullStorageHandler.getByteCounter());
flg.set(false);
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
assertTrue("Performance test should take less than 15 seconds", duration < 15);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/integration/producer2/TestMemqClientServerIntegration.java | memq/src/test/java/com/pinterest/memq/core/integration/producer2/TestMemqClientServerIntegration.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.integration.producer2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.net.UnknownHostException;
import java.security.KeyStore;
import java.security.KeyStore.PrivateKeyEntry;
import java.security.cert.X509Certificate;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.commons2.network.NetworkClient;
import com.pinterest.memq.client.commons2.network.netty.ClientChannelInitializer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer2.MemqProducer;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.storage.DelayedDevNullStorageHandler;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.integration.TestEnvironmentProvider;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPromise;
import io.netty.handler.ssl.util.SelfSignedCertificate;
public class TestMemqClientServerIntegration {
private static final String JKS = "JKS";
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@Before
@After
public void reset() {
new File(new MemqConfig().getTopicCacheFile()).delete();
DelayedDevNullStorageHandler.reset();
TestAuditor.reset();
}
@Test
public void testLocalServer() throws Exception {
long startMs = System.currentTimeMillis();
DelayedDevNullStorageHandler.reset();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23434);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties networkConfigs = new Properties();
networkConfigs.put(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS, "5000");
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<byte[], byte[]>()
.cluster("testcluster")
.bootstrapServers("localhost:23434")
.topic("test")
.maxInflightRequests(40)
.maxPayloadBytes(1000000)
.maxInflightRequestsMemoryBytes(1024*1024*64) // 64 MB
.lingerMs(500)
.compression(Compression.NONE)
.disableAcks(false)
.sendRequestTimeout(60_000)
.networkProperties(networkConfigs)
.locality("local")
.auditProperties(auditConfigs)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
Map<String, Timer> timers;
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
byte[] value = new byte[36];
ThreadLocalRandom.current().nextBytes(value);
for (int i = 0; i < 1_000_000; i++) {
if (i % 10000 == 0) {
timers = producer.getMetricRegistry().getTimers();
System.out.println("[" + i + "/1000000]" +
"\tTotal write time: " + timers.get("producer.write.time").getSnapshot().get99thPercentile() / 1_000_000 +
"\tInflight requests: " + producer.getMetricRegistry().getGauges().get("requests.inflight").getValue() +
(timers.get("requests.write.time") != null ? "\tActual write time: " + timers.get("requests.write.time").getSnapshot().get99thPercentile() / 1_000_000 : "" ) +
""
);
}
Future<MemqWriteResult> writeToTopic = producer.write(null, value);
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.flush();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
assertEquals(futures.size(), DelayedDevNullStorageHandler.getCounter());
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
Map<String, Counter> counters = producer.getMetricRegistry().getCounters();
assertEquals(counters.get("requests.sent.bytes").getCount(), DelayedDevNullStorageHandler.getByteCounter());
assertEquals(counters.get("requests.sent.bytes").getCount(), DelayedDevNullStorageHandler.getInputStreamCounter());
assertEquals(counters.get("requests.acked.bytes").getCount(), counters.get("requests.sent.bytes").getCount());
TestAuditor.reset();
builder.disableAcks(true);
builder.metricRegistry(new MetricRegistry());
producer = builder.build();
futures = new LinkedHashSet<>();
for (int i = 0; i < 1_000_000; i++) {
if (i % 10000 == 0) {
timers = producer.getMetricRegistry().getTimers();
System.out.println("[" + i + "/1000000]" +
"\tTotal write time: " + timers.get("producer.write.time").getSnapshot().get99thPercentile() / 1_000_000 +
"\tInflight requests: " + producer.getMetricRegistry().getGauges().get("requests.inflight").getValue() +
(timers.get("requests.write.time") != null ? "\tActual write time: " + timers.get("requests.write.time").getSnapshot().get99thPercentile() / 1_000_000 : "" ) +
""
);
}
Future<MemqWriteResult> writeToTopic = producer.write(null, value);
futures.add(writeToTopic);
}
System.out.println("Sent all writes: " + (System.currentTimeMillis() - startMs));
producer.flush();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes: " + (System.currentTimeMillis() - startMs));
Thread.sleep(500);
producer.close();
counters = producer.getMetricRegistry().getCounters();
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
assertEquals(counters.get("requests.acked.bytes").getCount(), counters.get("requests.sent.bytes").getCount());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
server.stop();
}
@Test
public void testLocalServerSSL() throws Exception {
int port = 23435;
String keystorePath = "target/testks.jks";
new File(keystorePath).delete();
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) port);
SelfSignedCertificate cert = new SelfSignedCertificate("test-memq.pinterest.com");
KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType());
ks.load(null, null);
ks.setCertificateEntry("default", cert.cert());
ks.setEntry("default", new PrivateKeyEntry(cert.key(), new X509Certificate[] { cert.cert() }),
new KeyStore.PasswordProtection("test".toCharArray()));
String keystorePassword = "test";
ks.store(new FileOutputStream(new File(keystorePath)), keystorePassword.toCharArray());
SSLConfig sslConfig = new SSLConfig();
sslConfig.setProtocols(Collections.singletonList("TLSv1.2"));
sslConfig.setKeystorePassword(keystorePassword);
sslConfig.setKeystoreType(JKS);
sslConfig.setKeystorePath(keystorePath);
sslConfig.setTruststorePassword(keystorePassword);
sslConfig.setTruststorePath(keystorePath);
sslConfig.setTruststoreType(JKS);
nettyServerConfig.setSslConfig(sslConfig);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig(keystorePassword, "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "1");
outputHandlerConfig.setProperty("delay.max.millis", "2");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<byte[], byte[]>()
.cluster("testcluster")
.bootstrapServers("localhost:" + port)
.topic("test")
.maxInflightRequests(120)
.maxPayloadBytes(1000000)
.maxInflightRequestsMemoryBytes(1024*1024*64) // 64 MB
.lingerMs(500)
.compression(Compression.NONE)
.disableAcks(false)
.sendRequestTimeout(60_000)
.locality("local")
.auditProperties(auditConfigs)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.sslConfig(sslConfig)
.metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
Map<String, Timer> timers;
for (int i = 0; i < 1_000_000; i++) {
if (i % 10000 == 0) {
timers = producer.getMetricRegistry().getTimers();
System.out.println("[" + i + "/1000000]" +
"\tTotal write time: " + timers.get("producer.write.time").getSnapshot().get99thPercentile() / 1_000_000 +
(timers.get("requests.write.time") != null ? "\tActual write time: " + timers.get("requests.write.time").getSnapshot().get99thPercentile() / 1_000_000 : "" ) +
""
);
}
Future<MemqWriteResult> writeToTopic = producer.write(null, value);
futures.add(writeToTopic);
}
producer.flush();
for (Future<MemqWriteResult> future : futures) {
future.get();
}
System.out.println("Completed all writes");
Thread.sleep(1000);
producer.close();
assertEquals(futures.size(), DelayedDevNullStorageHandler.getCounter());
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
server.stop();
}
@Test
public void testInvalidChecksumRejection() throws Exception {
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(128);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
short port = 23436;
MemqNettyServer server = createAndStartServer(topicConfig, port);
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<byte[], byte[]>()
.cluster("testcluster")
.bootstrapServers("localhost:" + port)
.topic("test")
.maxInflightRequests(60)
.maxInflightRequestsMemoryBytes(1024*1024*64) // 64 MB
.maxPayloadBytes(1000000)
.lingerMs(500)
.compression(Compression.NONE)
.disableAcks(false)
.sendRequestTimeout(60_000)
.locality("local")
.auditProperties(auditConfigs)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.metricRegistry(new MetricRegistry());
ChannelHandler wiretapper = new CRCTamperHandler();
ClientChannelInitializer.setWiretapper(wiretapper);
MemqProducer<byte[], byte[]> producer = builder.build();
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int i = 0; i < 50; i++) {
Future<MemqWriteResult> writeToTopic = producer.write(null, value);
futures.add(writeToTopic);
producer.flush();
Thread.sleep(10L);
}
int invalidRequests = 0;
for (Future<MemqWriteResult> future : futures) {
try {
future.get();
} catch (Exception e) {
invalidRequests++;
}
}
System.out.println("Number of invalid requests:" + invalidRequests);
Thread.sleep(500);
producer.close();
Map<String, Counter> counters = producer.getMetricRegistry().getCounters();
long ackedBytes = counters.get("requests.acked.bytes").getCount();
assertTrue(invalidRequests > 0);
assertTrue(counters.get("requests.sent.bytes").getCount() > ackedBytes);
assertEquals(ackedBytes, DelayedDevNullStorageHandler.getByteCounter());
assertEquals(ackedBytes, DelayedDevNullStorageHandler.getInputStreamCounter());
System.out
.println("Number of bytes written on producer: " + ackedBytes);
ClientChannelInitializer.setWiretapper(null);
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
server.stop();
}
MemqNettyServer createAndStartServer(TopicConfig topicConfig,
short port) throws UnknownHostException, Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort(port);
configuration.setNettyServerConfig(nettyServerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
return server;
}
@ChannelHandler.Sharable
static class CRCTamperHandler extends ChannelOutboundHandlerAdapter {
private final AtomicInteger count = new AtomicInteger(0);
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
RequestPacket req = new RequestPacket();
ByteBuf b = (ByteBuf) msg;
ByteBuf dup = b.duplicate();
req.readFields(b, RequestType.PROTOCOL_VERSION);
System.out.println(req.getRequestType());
if (req.getRequestType().equals(RequestType.WRITE)) {
WriteRequestPacket wrp = (WriteRequestPacket) req.getPayload();
if (count.getAndIncrement() % 2 == 0) {
System.out.println("Tampering request: " + req.getClientRequestId());
wrp = new WriteRequestPacket(wrp.isDisableAcks(), wrp.getTopicName().getBytes(), wrp.isChecksumExists(), wrp.getChecksum() + 1, wrp.getData());
ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer(b.capacity());
req = new RequestPacket(req.getProtocolVersion(), req.getClientRequestId(), req.getRequestType(), wrp);
req.setPreAllocOutBuf(buf);
msg = buf;
b.release();
} else {
msg = dup;
}
} else {
msg = dup;
}
super.write(ctx, msg, promise);
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/commons/TestMessageBufferInputStream.java | memq/src/test/java/com/pinterest/memq/core/commons/TestMessageBufferInputStream.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.commons;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import org.junit.Before;
import org.junit.Test;
import com.codahale.metrics.Counter;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.commons.MessageBufferInputStream;
import com.pinterest.memq.core.utils.CoreUtils;
@SuppressWarnings("unused")
public class TestMessageBufferInputStream {
private List<Message> list;
private AtomicInteger slotCounter;
@Before
public void before() {
slotCounter = new AtomicInteger();
String uuid = UUID.randomUUID().toString();
try {
Message m1 = new Message(1024, false);
m1.put(uuid.getBytes("utf-8"));
Message m2 = new Message(512, false);
m2.put("abcdefgh".getBytes("utf-8"));
Message m3 = new Message(128, false);
m3.put("123456789".getBytes("utf-8"));
list = Arrays.asList(m1, m2, m3);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void testBasicReads() throws IOException {
MessageBufferInputStream is = new MessageBufferInputStream(list, new Counter());
int count = 0;
byte b;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(53, count);
is.close();
}
@Test
public void testMultiRead() throws Exception {
int size = CoreUtils.batchSizeInBytes(list);
for (int i = 0; i < 10; i++) {
MessageBufferInputStream is = new MessageBufferInputStream(list, new Counter());
int c = 0;
byte b;
while ((b = (byte) is.read()) != -1) {
c++;
}
assertEquals("Failed for:" + i, size, c);
is.close();
}
size = CoreUtils.batchSizeInBytes(list);
MessageBufferInputStream is = new MessageBufferInputStream(list, new Counter());
for (int i = 0; i < 10; i++) {
is.resetToBeginnging();
int c = 0;
byte b;
while ((b = (byte) is.read()) != -1) {
c++;
}
assertEquals("Failed for:" + i, size, c);
is.close();
}
}
// @Test
public void testMarkSupport() throws IOException {
MessageBufferInputStream is = new MessageBufferInputStream(list, new Counter());
assertEquals(true, is.markSupported());
assertEquals(53, CoreUtils.batchSizeInBytes(list));
int count = 0;
for (int i = 0; i < 10; i++) {
is.read();
count++;
}
is.mark(1024 * 1024);
for (int i = 0; i < 10; i++) {
is.read();
count++;
}
assertEquals(20, count);
is.reset();
byte b;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(63, count);
is.reset();
count = 0;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(43, count);
is.reset();
count = 0;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(43, count);
is.close();
}
public void testMarkCombinations() throws IOException {
Message m1 = new Message(1024, false);
m1.put("asdaaq3erqddasdfqw3rqGdsasdfsadfgasdvadvasFDFDFsdsefasfdfsafSADfasdfasdfasdf"
.getBytes("utf-8"));
Message m2 = new Message(512, false);
m2.put("asdferq3rfdsasdvavasdgwr34t45#@$@$!#@%#$%!@#$~@$!##E@#!$3".getBytes("utf-8"));
Message m3 = new Message(128, false);
m3.put("123456789".getBytes("utf-8"));
list = Arrays.asList(m1, m2, m3);
MessageBufferInputStream is = new MessageBufferInputStream(list, new Counter());
assertEquals(142, CoreUtils.batchSizeInBytes(list));
int count = 0;
is.mark(0);
byte b;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(142, is.getBytesRead());
assertEquals(142, count);
is.reset();
count = 0;
while ((b = (byte) is.read()) != -1) {
count++;
}
assertEquals(142, count);
is.close();
}
public void testHeavyLoad() throws IOException {
List<Message> list = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Message m = new Message(1024 * 1024, true);
m.put(new byte[1024 * 100]);
list.add(m);
}
int BYTES = 1024 * 100 * 10;
assertEquals(BYTES, CoreUtils.batchSizeInBytes(list));
Random rand = new Random();
for (int i = 0; i < 100; i++) {
int nextInt = rand.nextInt(BYTES);
rewindMessages(list);
MessageBufferInputStream mis = new MessageBufferInputStream(list, new Counter());
mis.mark(1024 * 1024 * 100);
byte b;
int k = 0;
while ((b = (byte) mis.read()) != -1) {
k++;
if (k == nextInt) {
mis.reset();
}
}
assertEquals(nextInt + BYTES, mis.getBytesRead());
mis.close();
}
}
public void testEventResetRead() throws IOException {
int CNT = 100;
List<Message> list = new ArrayList<>();
for (int i = 0; i < CNT; i++) {
Message m = new Message(1024 * 1024, true);
list.add(m);
}
for (int p = 0; p < 10; p++) {
for (int k = 0; k < CNT; k++) {
Message m = list.get(k);
m.reset();
String data = UUID.randomUUID().toString();
for (int j = 0; j < ThreadLocalRandom.current().nextInt(1000); j++) {
data += UUID.randomUUID().toString();
}
m.put(data.getBytes());
}
int batchSizeInBytes = CoreUtils.batchSizeInBytes(list);
MessageBufferInputStream mis = new MessageBufferInputStream(list, new Counter());
mis.mark(10);
byte b;
int k = 0;
while ((b = (byte) mis.read()) != -1) {
k++;
}
System.out.println("Size in bytes:" + batchSizeInBytes);
assertEquals(batchSizeInBytes, k);
mis.close();
}
}
@Test
public void testReRead() throws IOException {
List<Message> list = new ArrayList<>();
int BYTES = 0;
for (int i = 0; i < 10; i++) {
Message m = new Message(1024 * 1024, true);
int byteCount = 1024 * ThreadLocalRandom.current().nextInt(10);
m.put(new byte[byteCount]);
BYTES += byteCount;
list.add(m);
}
assertEquals(BYTES, CoreUtils.batchSizeInBytes(list));
int k = 0;
for (int i = 0; i < 100; i++) {
MessageBufferInputStream mis = new MessageBufferInputStream(list, new Counter());
byte b;
while ((b = (byte) mis.read()) != -1) {
k++;
}
mis.close();
}
assertEquals(BYTES * 100, k);
}
@Test
public void testMarkBatchReads() throws IOException {
List<Message> list = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Message m = new Message(1024 * 1024, true);
m.put(new byte[1024 * 100]);
list.add(m);
}
int BYTES = 1024 * 100 * 10;
assertEquals(BYTES, CoreUtils.batchSizeInBytes(list));
MessageBufferInputStream mis = new MessageBufferInputStream(list, new Counter());
byte[] b = new byte[127];
int k = 0;
int t = 0;
while ((t = mis.read(b)) != -1) {
k += t;
}
assertEquals(BYTES, k);
mis.close();
}
@Test
public void testMd5SumCalculationCheck() throws IOException {
int BYTES = 0;
List<Message> list = new ArrayList<>();
for (int i = 0; i < 10; i++) {
Message m = new Message(1024 * 1024, true);
byte[] buf = new byte[1024 * ThreadLocalRandom.current().nextInt(10, 100)];
BYTES += buf.length;
m.put(buf);
list.add(m);
}
assertEquals(BYTES, CoreUtils.batchSizeInBytes(list));
MessageBufferInputStream mis = new MessageBufferInputStream(list, new Counter());
// MD5DigestCalculatingInputStream stream = new MD5DigestCalculatingInputStream(
// new LengthCheckInputStream(mis, BYTES, true));
byte[] b = new byte[127];
int k = 0;
int t = 0;
while ((t = mis.read(b)) != -1) {
k += t;
}
assertEquals(BYTES, k);
// stream.close();
for (int p = 0; p < 100; p++) {
BYTES = 0;
for (int i = 0; i < 10; i++) {
Message m = list.get(i);
m.reset();
byte[] buf = new byte[1024 * ThreadLocalRandom.current().nextInt(10, 100)];
BYTES += buf.length;
m.put(buf);
}
assertEquals(BYTES, CoreUtils.batchSizeInBytes(list));
mis = new MessageBufferInputStream(list, new Counter());
// stream = new MD5DigestCalculatingInputStream(new LengthCheckInputStream(mis, BYTES, true));
b = new byte[127];
k = 0;
t = 0;
while ((t = mis.read(b)) != -1) {
k += t;
}
assertEquals(BYTES, k);
// stream.close();
}
mis.close();
}
public void rewindMessages(List<Message> list) {
for (Message m : list) {
m.getBuf().resetReaderIndex();
m.getBuf().resetWriterIndex();
}
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/processing/TestOutputHandler.java | memq/src/test/java/com/pinterest/memq/core/processing/TestOutputHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
import java.util.List;
import java.util.Properties;
import com.codahale.metrics.MetricRegistry;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.core.commons.Message;
public class TestOutputHandler implements StorageHandler {
public TestOutputHandler() {
}
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
}
@Override
public String getReadUrl() {
return null;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/processing/bucketing/TestBucketingTopicProcessor.java | memq/src/test/java/com/pinterest/memq/core/processing/bucketing/TestBucketingTopicProcessor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing.bucketing;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import io.netty.buffer.ByteBufInputStream;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.embedded.EmbeddedChannel;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.TaskRequest;
import com.pinterest.memq.client.producer.netty.MemqNettyProducer;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.storage.DelayedDevNullStorageHandler;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.commons.MessageBufferInputStream;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.integration.TestEnvironmentProvider;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.rpc.TestAuditor;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
public class TestBucketingTopicProcessor {
@Test
public void testFullBatches() throws Exception {
MemqConfig configuration = new MemqConfig();
NettyServerConfig nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setPort((short) 23434);
configuration.setNettyServerConfig(nettyServerConfig);
TopicConfig topicConfig = new TopicConfig("test", "delayeddevnull");
topicConfig.setEnableBucketing2Processor(true);
topicConfig.setOutputParallelism(60);
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setBatchSizeMB(2);
topicConfig.setRingBufferSize(4);
topicConfig.setTickFrequencyMillis(10);
topicConfig.setBatchMilliSeconds(2000);
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty("delay.min.millis", "5000");
outputHandlerConfig.setProperty("delay.max.millis", "6000");
topicConfig.setStorageHandlerConfig(outputHandlerConfig);
configuration.setTopicConfig(new TopicConfig[] { topicConfig });
MemqManager memqManager = new MemqManager(null, configuration, new HashMap<>());
memqManager.init();
EnvironmentProvider provider = new TestEnvironmentProvider();
MemqGovernor governor = new MemqGovernor(memqManager, configuration, provider);
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, governor,
new HashMap<>(), null);
server.initialize();
Properties auditConfigs = new Properties();
auditConfigs.setProperty("class", "com.pinterest.memq.core.rpc.TestAuditor");
// run tests
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster",
new InetSocketAddress("localhost", 23434), "test", 10, 1000000, Compression.NONE, false, 10,
60_000, "local", 60_000, auditConfigs, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
byte[] value = UUID.randomUUID().toString().getBytes();
Set<Future<MemqWriteResult>> futures = new LinkedHashSet<>();
for (int i = 0; i < 1_000_000; i++) {
Future<MemqWriteResult> writeToTopic = producer.writeToTopic(null, value);
futures.add(writeToTopic);
}
producer.finalizeRequest();
for (Future<MemqWriteResult> future : futures) {
future.get(30, TimeUnit.SECONDS);
}
System.out.println("Completed all writes");
Thread.sleep(500);
producer.close();
// server should have written 55 batches
assertEquals(futures.size(), DelayedDevNullStorageHandler.getCounter());
assertEquals(futures.size(), TestAuditor.getAuditMessageList().size());
assertEquals(MemqNettyRequest.getByteCounter(), DelayedDevNullStorageHandler.getByteCounter());
assertEquals(MemqNettyRequest.getByteCounter(),
DelayedDevNullStorageHandler.getInputStreamCounter());
assertEquals(MemqNettyRequest.getAckedByteCounter(), MemqNettyRequest.getByteCounter());
server.getChildGroup().shutdownGracefully().sync();
server.getParentGroup().shutdownGracefully().sync();
server.getServerChannelFuture().channel().closeFuture().sync();
}
@Test
public void testTimeBasedBatch() throws InterruptedException {
MetricRegistry registry = new MetricRegistry();
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopic("test");
topicConfig.setBatchMilliSeconds(100);
topicConfig.setBatchSizeMB(10);
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setRingBufferSize(100);
final AtomicInteger totalBytes = new AtomicInteger();
StorageHandler outputHandler = new StorageHandler() {
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
MessageBufferInputStream mis = new MessageBufferInputStream(messages, null);
@SuppressWarnings("unused")
int b;
try {
int counter = 0;
while ((b = mis.read()) != -1) {
counter++;
}
totalBytes.addAndGet(counter);
System.out.println(counter + " " + sizeInBytes + " " + messages.size());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
@Override
public String getReadUrl() {
return null;
}
};
ScheduledExecutorService timerService = Executors.newScheduledThreadPool(1,
new DaemonThreadFactory());
BucketingTopicProcessor processor = new BucketingTopicProcessor(registry, topicConfig,
outputHandler, timerService, null);
int bytesWritten = 0;
try {
for (int i = 0; i < 20; i++) {
StringBuilder builder = new StringBuilder();
for (int k = 0; k < ThreadLocalRandom.current().nextInt(1,20); k++) {
builder.append(UUID.randomUUID().toString());
}
byte[] bytes = builder.toString().getBytes();
ByteBuf buf = Unpooled.wrappedBuffer(bytes);
WriteRequestPacket payload = new WriteRequestPacket(true, "test".getBytes(), false, 0, buf);
RequestPacket packet = new RequestPacket(RequestType.PROTOCOL_VERSION,
ThreadLocalRandom.current().nextLong(), RequestType.WRITE, payload);
processor.write(packet, payload, getTestContext());
bytesWritten += bytes.length;
Thread.sleep(500);
}
} catch (Exception e) {
}
processor.stopAndAwait();
assertEquals(0, registry.counter("batching.sizedbasedbatch").getCount());
assertEquals(20, registry.counter("batching.timebasedbatch").getCount());
assertEquals(0, registry.counter("batching.countbasedbatch").getCount());
assertEquals(bytesWritten, totalBytes.get());
}
@Test
public void testCountBasedBatch() throws InterruptedException {
MetricRegistry registry = new MetricRegistry();
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopic("test");
topicConfig.setBatchMilliSeconds(100);
topicConfig.setBatchSizeMB(10);
topicConfig.setMaxDispatchCount(1);
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setRingBufferSize(100);
topicConfig.setEnableServerHeaderValidation(true);
final AtomicInteger totalBytes = new AtomicInteger();
StorageHandler outputHandler = new StorageHandler() {
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException { @SuppressWarnings("unused")
int b;
try {
int counter = 0;
for (Message m : messages) {
ByteBufInputStream bbis = new ByteBufInputStream(m.getBuf());
while ((b = bbis.read()) != -1) {
counter++;
}
bbis.close();
}
totalBytes.addAndGet(counter);
System.out.println(counter + " " + sizeInBytes + " " + messages.size());
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public String getReadUrl() {
return null;
}
};
ScheduledExecutorService timerService = Executors.newScheduledThreadPool(1,
new DaemonThreadFactory());
BucketingTopicProcessor processor = new BucketingTopicProcessor(registry, topicConfig,
outputHandler, timerService, null);
int bytesWritten = 0;
try {
for (int i = 0; i < 20; i++) {
StringBuilder builder = new StringBuilder();
for (int k = 0; k < ThreadLocalRandom.current().nextInt(1,20); k++) {
builder.append(UUID.randomUUID().toString());
}
builder.append((char)0xff);
byte[] bytes = builder.toString().getBytes();
TaskRequest tr = new TestTaskRequest("test", i, 20000);
MemqMessageHeader header = new MemqMessageHeader(tr);
ByteBuf payloadBytes = Unpooled.buffer();
payloadBytes.writerIndex(MemqMessageHeader.getHeaderLength());
payloadBytes.writeBytes(bytes);
header.writeHeader(payloadBytes);
WriteRequestPacket payload = new WriteRequestPacket(true, "test".getBytes(), false, 0,
payloadBytes);
RequestPacket packet = new RequestPacket(RequestType.PROTOCOL_VERSION,
ThreadLocalRandom.current().nextLong(), RequestType.WRITE, payload);
processor.write(packet, payload, getTestContext());
bytesWritten += payloadBytes.readableBytes();
Thread.sleep(200);
}
} catch (Exception e) {
e.printStackTrace();
}
processor.stopAndAwait();
assertEquals(0, registry.counter("batching.sizedbasedbatch").getCount());
assertEquals(0, registry.counter("batching.timebasedbatch").getCount());
assertEquals(20, registry.counter("batching.countbasedbatch").getCount());
assertEquals(bytesWritten, totalBytes.get());
}
@Test
public void testWriteCompleteness() throws InterruptedException {
MetricRegistry registry = new MetricRegistry();
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopic("test");
topicConfig.setBatchMilliSeconds(10000);
topicConfig.setBatchSizeMB(10);
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setRingBufferSize(100);
final AtomicInteger totalBytes = new AtomicInteger();
StorageHandler outputHandler = new StorageHandler() {
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
MessageBufferInputStream mis = new MessageBufferInputStream(messages, null);
@SuppressWarnings("unused")
int b;
try {
int counter = 0;
while ((b = mis.read()) != -1) {
counter++;
}
totalBytes.addAndGet(counter);
System.out.println(counter + " " + sizeInBytes + " " + messages.size());
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
@Override
public String getReadUrl() {
return null;
}
};
ScheduledExecutorService timerService = Executors.newScheduledThreadPool(1,
new DaemonThreadFactory());
BucketingTopicProcessor processor = new BucketingTopicProcessor(registry, topicConfig,
outputHandler, timerService, null);
int bytesWritten = 0;
try {
for (int i = 0; i < 20; i++) {
StringBuilder builder = new StringBuilder();
for (int k = 0; k < ThreadLocalRandom.current().nextInt(1,20); k++) {
builder.append(UUID.randomUUID().toString());
}
byte[] bytes = builder.toString().getBytes();
ByteBuf buf = Unpooled.wrappedBuffer(bytes);
WriteRequestPacket payload = new WriteRequestPacket(true, "test".getBytes(), false, 0, buf);
RequestPacket packet = new RequestPacket(RequestType.PROTOCOL_VERSION,
ThreadLocalRandom.current().nextLong(), RequestType.WRITE, payload);
processor.write(packet, payload, getTestContext());
bytesWritten += bytes.length;
}
processor.forceDispatch();
} catch (Exception e) {
}
processor.stopAndAwait();
assertEquals(bytesWritten, totalBytes.get());
}
@Test
public void testHeaderValidation() throws InterruptedException {
MetricRegistry registry = new MetricRegistry();
TopicConfig topicConfig = new TopicConfig();
topicConfig.setTopic("test");
topicConfig.setBatchMilliSeconds(100);
topicConfig.setBatchSizeMB(10);
topicConfig.setMaxDispatchCount(1);
topicConfig.setBufferSize(1024 * 1024);
topicConfig.setRingBufferSize(100);
topicConfig.setEnableServerHeaderValidation(true);
final AtomicInteger totalBytes = new AtomicInteger();
StorageHandler outputHandler = new StorageHandler() {
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
MessageBufferInputStream mis = new MessageBufferInputStream(messages, null);
@SuppressWarnings("unused")
int b;
try {
int counter = 0;
while ((b = mis.read()) != -1) {
counter++;
}
totalBytes.addAndGet(counter);
System.out.println(counter + " " + sizeInBytes + " " + messages.size());
} catch (IOException e) {
e.printStackTrace();
}
}
@Override
public String getReadUrl() {
return null;
}
};
ScheduledExecutorService timerService = Executors.newScheduledThreadPool(1,
new DaemonThreadFactory());
BucketingTopicProcessor processor = new BucketingTopicProcessor(registry, topicConfig,
outputHandler, timerService, null);
int bytesWritten = 0;
try {
for (int i = 0; i < 20; i++) {
StringBuilder builder = new StringBuilder();
for (int k = 0; k < ThreadLocalRandom.current().nextInt(1,20); k++) {
builder.append(UUID.randomUUID().toString());
}
byte[] bytes = builder.toString().getBytes();
TaskRequest tr = new TestTaskRequest("test", i, 20000);
MemqMessageHeader header = new MemqMessageHeader(tr);
ByteBuf payloadBytes = Unpooled.buffer();
for (int j = 0; j < MemqMessageHeader.getHeaderLength(); j++) {
payloadBytes.writeByte(0x7F);
}
payloadBytes.writeBytes(bytes);
WriteRequestPacket payload = new WriteRequestPacket(true, "test".getBytes(), false, 0,
payloadBytes);
RequestPacket packet = new RequestPacket(RequestType.PROTOCOL_VERSION,
ThreadLocalRandom.current().nextLong(), RequestType.WRITE, payload);
processor.write(packet, payload, getTestContext());
bytesWritten += payloadBytes.readableBytes();
Thread.sleep(500);
}
} catch (Exception e) {
e.printStackTrace();
}
processor.stopAndAwait();
long invalidCount = registry.counter("tp.message.invalid.header.message_length_too_large")
.getCount()
+ registry.counter("tp.message.invalid.header.message_length_negative").getCount()
+ registry.counter("tp.message.invalid.header.exception").getCount();
assertEquals(20L, invalidCount);
assertEquals(bytesWritten, totalBytes.get());
}
private ChannelHandlerContext getTestContext() {
Channel ch = new EmbeddedChannel();
ch.pipeline().addLast(new ChannelDuplexHandler());
return ch.pipeline().firstContext();
}
private static class TestTaskRequest extends TaskRequest {
public TestTaskRequest(String topicName,
long currentRequestId,
int maxPayLoadBytes) throws IOException {
super(topicName, currentRequestId, Compression.NONE, null, false, maxPayLoadBytes, 10000,
null, 10000);
}
@Override
protected MemqWriteResult runRequest() throws Exception {
return null;
}
@Override
public long getEpoch() {
return 0;
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/rpc/TestAuditMessage.java | memq/src/test/java/com/pinterest/memq/core/rpc/TestAuditMessage.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.rpc;
public class TestAuditMessage {
byte[] cluster, hash, topic, hostAddress;
long epoch, id;
int messageCount;
boolean isProducer;
public TestAuditMessage(byte[] cluster,
byte[] hash,
byte[] topic,
byte[] hostAddress,
long epoch,
long id,
int messageCount,
boolean isProducer) {
super();
this.cluster = cluster;
this.hash = hash;
this.topic = topic;
this.hostAddress = hostAddress;
this.epoch = epoch;
this.id = id;
this.messageCount = messageCount;
this.isProducer = isProducer;
}
public byte[] getCluster() {
return cluster;
}
public void setCluster(byte[] cluster) {
this.cluster = cluster;
}
public byte[] getHash() {
return hash;
}
public void setHash(byte[] hash) {
this.hash = hash;
}
public byte[] getTopic() {
return topic;
}
public void setTopic(byte[] topic) {
this.topic = topic;
}
public byte[] getHostAddress() {
return hostAddress;
}
public void setHostAddress(byte[] hostAddress) {
this.hostAddress = hostAddress;
}
public long getEpoch() {
return epoch;
}
public void setEpoch(long epoch) {
this.epoch = epoch;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public int getMessageCount() {
return messageCount;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/rpc/TestAuditor.java | memq/src/test/java/com/pinterest/memq/core/rpc/TestAuditor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.rpc;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import com.pinterest.memq.client.commons.audit.Auditor;
public class TestAuditor extends Auditor {
private static List<TestAuditMessage> auditMessageList = new ArrayList<>();
@Override
public void init(Properties props) throws Exception {
}
@Override
public void auditMessage(byte[] cluster,
byte[] topic,
byte[] hostAddress,
long epoch,
long id,
byte[] hash,
int messageCount,
boolean isProducer,
String clientId) throws IOException {
synchronized(TestAuditor.class) {
auditMessageList.add(new TestAuditMessage(cluster, hash, topic, hostAddress, epoch, id,
messageCount, isProducer));
}
}
@Override
public void close() {
}
public static List<TestAuditMessage> getAuditMessageList() {
return auditMessageList;
}
public static void reset() {
auditMessageList.clear();
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/utils/TestCoreUtils.java | memq/src/test/java/com/pinterest/memq/core/utils/TestCoreUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.utils;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.processing.ProcessingStatus;
public class TestCoreUtils {
@Test
public void testBatchSizeCalculator() {
List<Message> batch = new ArrayList<>();
int totalBytes = 0;
for (int i = 0; i < 100; i++) {
int size = ThreadLocalRandom.current().nextInt(800);
totalBytes += size;
byte[] testData = new byte[size];
Message m = new Message(1024, false);
m.put(testData);
batch.add(m);
}
assertEquals(totalBytes, CoreUtils.batchSizeInBytes(batch));
}
@Test
public void testLRUExpiration() throws InterruptedException {
LoadingCache<Long, ProcessingStatus> ackMap = CacheBuilder.newBuilder().maximumSize(10000)
.expireAfterWrite(1, TimeUnit.SECONDS)
.removalListener(new RemovalListener<Long, ProcessingStatus>() {
@Override
public void onRemoval(RemovalNotification<Long, ProcessingStatus> notification) {
System.out.println("Removed");
}
}).build(new CacheLoader<Long, ProcessingStatus>() {
@Override
public ProcessingStatus load(Long key) throws Exception {
System.out.println("Loading:" + key);
return ProcessingStatus.FAILED;
}
});
ackMap.put(1L, ProcessingStatus.PENDING);
assertEquals(1, ackMap.size());
Executors.newScheduledThreadPool(1, DaemonThreadFactory.INSTANCE)
.scheduleWithFixedDelay(() -> ackMap.cleanUp(), 0, 1, TimeUnit.SECONDS);
Thread.sleep(3000);
assertEquals(0, ackMap.size());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/clustering/TestPartitionBalanceStrategy.java | memq/src/test/java/com/pinterest/memq/core/clustering/TestPartitionBalanceStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.clustering;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.junit.Test;
import com.google.common.collect.Sets;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.TopicConfig;
public class TestPartitionBalanceStrategy {
@Test
public void testPartitionBalanceStrategy() {
short port = 9092;
BalanceStrategy strategy = new ExpirationPartitionBalanceStrategy();
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
Set<TopicConfig> topics = new HashSet<>(
Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 100, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2)));
Set<Broker> newBrokers = strategy.balance(topics, brokers);
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
}
Map<String, Set<String>> one = buildAllocationMap(newBrokers);
System.out.println("Rebalancing again");
topics = new HashSet<>(Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 100, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2),
new TopicConfig(3, 1024 * 1024 * 2, 256, "test4", 5, 10, 2)));
newBrokers = strategy.balance(topics, newBrokers);
Map<String, Set<String>> two = buildAllocationMap(newBrokers);
for (Entry<String, Set<String>> entry : one.entrySet()) {
Set<String> oneSet = entry.getValue();
Set<String> twoSet = two.get(entry.getKey());
assertTrue(
"Broker " + entry.getKey() + " rebalancing should be stable for new topics being added, i.e. adding of new topics shouldn't cause existing topic allocations to be changed",
Sets.difference(oneSet, twoSet).isEmpty());
}
}
@Test
public void testUpdateConfigs() {
short port = 9092;
BalanceStrategy strategy = new ExpirationPartitionBalanceStrategy();
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
TopicConfig baseConfig = new TopicConfig(0, 1024 * 1024 * 2, 256, "", 15, 100, 2);
Set<TopicConfig> topics = new HashSet<>();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB((i + 1) * 100);
conf.setStorageHandlerName("delayeddevnull");
conf.setClusteringMultiplier(2);
topics.add(conf);
}
Set<Broker> newBrokers = strategy.balance(topics, brokers);
Map<Broker, Set<TopicConfig>> firstBrokerConfig = new HashMap<>();
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("delayeddevnull")));
firstBrokerConfig.put(broker, new HashSet<>(broker.getAssignedTopics()));
}
System.out.println("Rebalancing again");
topics.clear();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB((i + 1) * 100);
conf.setStorageHandlerName("customs3aync2");
conf.setClusteringMultiplier(2);
topics.add(conf);
}
newBrokers = strategy.balance(topics, newBrokers);
for (Broker broker : newBrokers) {
assertEquals(firstBrokerConfig.get(broker), broker.getAssignedTopics());
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("customs3aync2")));
}
}
@Test
public void testPartitionBalanceStrategyShrink() {
short port = 9092;
BalanceStrategy strategy = new ExpirationPartitionBalanceStrategy();
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
Set<TopicConfig> topics = new HashSet<>(
Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 150, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2)));
Set<Broker> newBrokers = strategy.balance(topics, brokers);
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
}
Map<String, Set<String>> one = buildAllocationMap(newBrokers);
int min1 = newBrokers.stream().mapToInt(Broker::getAvailableCapacity).min().getAsInt();
System.out.println("Rebalancing again");
topics = new HashSet<>(Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 150, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 100, 2)));
newBrokers = strategy.balance(topics, newBrokers);
Map<String, Set<String>> two = buildAllocationMap(newBrokers);
int min2 = newBrokers.stream().mapToInt(Broker::getAvailableCapacity).min().getAsInt();
Map<String, Integer> oneTopicCount = new HashMap<>();
for(Set<String> assignments : one.values()) {
assignments.forEach(t -> oneTopicCount.compute(t, (k, v) -> v == null ? 1 : (v + 1)));
}
Map<String, Integer> twoTopicCount = new HashMap<>();
for(Set<String> assignments : two.values()) {
assignments.forEach(t -> twoTopicCount.compute(t, (k, v) -> v == null ? 1 : (v + 1)));
}
assertTrue(min2 > min1);
assertEquals(0, oneTopicCount.get("test1") - twoTopicCount.get("test1"));
assertEquals(0, oneTopicCount.get("test2") - twoTopicCount.get("test2"));
assertEquals(3, oneTopicCount.get("test3") - twoTopicCount.get("test3"));
}
private Map<String, Set<String>> buildAllocationMap(Set<Broker> brokers) {
Map<String, Set<String>> brokerAllocationMap = new HashMap<>();
for (Broker broker : brokers) {
Set<String> set = new HashSet<>();
brokerAllocationMap.put(broker.getBrokerIP(), set);
for (TopicConfig topicConfig : broker.getAssignedTopics()) {
set.add(topicConfig.getTopic());
}
}
return brokerAllocationMap;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/clustering/TestExpirationPartitionBalanceStrategy.java | memq/src/test/java/com/pinterest/memq/core/clustering/TestExpirationPartitionBalanceStrategy.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.clustering;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.google.common.collect.Sets;
import com.pinterest.memq.core.config.MemqConfig;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mockito.Mockito;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
@RunWith(Parameterized.class)
public class TestExpirationPartitionBalanceStrategy {
@Parameterized.Parameters(name = "Class: {0}")
public static BalanceStrategy[] strategies() {
MemqConfig mockMemqConfig = Mockito.mock(MemqConfig.class);
Mockito.when(mockMemqConfig.getOpenTsdbConfig()).thenReturn(null);
return new BalanceStrategy[] {new ExpirationPartitionBalanceStrategy(), new ExpirationPartitionBalanceStrategyWithAssignmentFreeze(mockMemqConfig)};
}
private final BalanceStrategy strategy;
public TestExpirationPartitionBalanceStrategy(BalanceStrategy strategy) {
this.strategy = strategy;
}
@Test
public void testExpirationPartitionBalanceStrategy() {
short port = 9092;
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
Set<TopicConfig> topics = new HashSet<>(
Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 100, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2)));
Set<Broker> newBrokers = strategy.balance(topics, brokers);
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
}
Map<String, Set<String>> one = buildAllocationMap(newBrokers);
System.out.println("Rebalancing again");
topics = new HashSet<>(Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 100, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2),
new TopicConfig(3, 1024 * 1024 * 2, 256, "test4", 5, 10, 2)));
newBrokers = strategy.balance(topics, newBrokers);
Map<String, Set<String>> two = buildAllocationMap(newBrokers);
for (Entry<String, Set<String>> entry : one.entrySet()) {
Set<String> oneSet = entry.getValue();
Set<String> twoSet = two.get(entry.getKey());
assertTrue(
"Broker " + entry.getKey() + " rebalancing should be stable for new topics being added, i.e. adding of new topics shouldn't cause existing topic allocations to be changed",
Sets.difference(oneSet, twoSet).isEmpty());
}
}
@Test
public void testUpdateConfigs() {
short port = 9092;
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
TopicConfig baseConfig = new TopicConfig(0, 1024 * 1024 * 2, 256, "", 15, 100, 2);
Set<TopicConfig> topics = new HashSet<>();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB(i * 100);
conf.setStorageHandlerName("delayeddevnull");
topics.add(conf);
}
Set<Broker> newBrokers = strategy.balance(topics, brokers);
Map<Broker, Set<TopicConfig>> firstBrokerConfig = new HashMap<>();
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("delayeddevnull")));
firstBrokerConfig.put(broker, new HashSet<>(broker.getAssignedTopics()));
}
System.out.println("Rebalancing again");
topics.clear();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB(i * 100);
conf.setStorageHandlerName("customs3aync2");
conf.setClusteringMultiplier(2);
topics.add(conf);
}
newBrokers = strategy.balance(topics, brokers);
for (Broker broker : newBrokers) {
assertEquals(firstBrokerConfig.get(broker), broker.getAssignedTopics());
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("customs3aync2")));
}
}
@Test
public void testPartitionBalanceStrategyShrink() {
short port = 9092;
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
Set<TopicConfig> topics = new HashSet<>(
Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 150, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 500, 2)));
Set<Broker> newBrokers = strategy.balance(topics, brokers);
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
}
Map<String, Set<String>> one = buildAllocationMap(newBrokers);
int min1 = newBrokers.stream().mapToInt(Broker::getAvailableCapacity).min().getAsInt();
System.out.println("Rebalancing again");
topics = new HashSet<>(Arrays.asList(
new TopicConfig(1, 1024 * 1024 * 2, 256, "test1", 15, 150, 2),
new TopicConfig(0, 1024 * 1024 * 2, 256, "test2", 15, 200, 2),
new TopicConfig(2, 1024 * 1024 * 2, 256, "test3", 50, 100, 2)));
newBrokers = strategy.balance(topics, newBrokers);
Map<String, Set<String>> two = buildAllocationMap(newBrokers);
int min2 = newBrokers.stream().mapToInt(Broker::getAvailableCapacity).min().getAsInt();
Map<String, Integer> oneTopicCount = new HashMap<>();
for(Set<String> assignments : one.values()) {
assignments.forEach(t -> oneTopicCount.compute(t, (k, v) -> v == null ? 1 : (v + 1)));
}
Map<String, Integer> twoTopicCount = new HashMap<>();
for(Set<String> assignments : two.values()) {
assignments.forEach(t -> twoTopicCount.compute(t, (k, v) -> v == null ? 1 : (v + 1)));
}
assertTrue(min2 > min1);
assertEquals(0, oneTopicCount.get("test1") - twoTopicCount.get("test1"));
assertEquals(0, oneTopicCount.get("test2") - twoTopicCount.get("test2"));
assertEquals(3, oneTopicCount.get("test3") - twoTopicCount.get("test3"));
}
@Test
public void testExpiringAssignments() throws Exception {
short port = 9092;
if (strategy instanceof ExpirationPartitionBalanceStrategy)
((ExpirationPartitionBalanceStrategy) strategy).setDefaultExpirationTime(1000);
else if (strategy instanceof ExpirationPartitionBalanceStrategyWithAssignmentFreeze)
((ExpirationPartitionBalanceStrategyWithAssignmentFreeze) strategy).setDefaultExpirationTime(1000);
Set<Broker> brokers = new HashSet<>(
Arrays.asList(new Broker("1.1.1.9", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.8", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.7", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.6", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.5", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.4", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.3", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.2", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.1", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())));
TopicConfig baseConfig = new TopicConfig(0, 1024 * 1024 * 2, 256, "", 15, 100, 2);
Set<TopicConfig> topics = new HashSet<>();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB((i + 1) * 100);
conf.setStorageHandlerName("delayeddevnull");
conf.setClusteringMultiplier(2);
topics.add(conf);
}
Set<Broker> newBrokers = strategy.balance(topics, brokers);
System.out.println(buildAllocationMap(newBrokers));
Map<Broker, Set<TopicConfig>> firstBrokerConfig = new HashMap<>();
for (Broker broker : newBrokers) {
assertTrue(broker.getAssignedTopics().size() > 0);
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("delayeddevnull")));
firstBrokerConfig.put(broker, new HashSet<>(broker.getAssignedTopics()));
}
System.out.println("Rebalancing again");
topics.clear();
for (int i = 0; i < 3; i++) {
TopicConfig conf = new TopicConfig(baseConfig);
conf.setTopicOrder(i);
conf.setTopic("test" + i);
conf.setInputTrafficMB((i + 1) * 100);
conf.setStorageHandlerName("customs3aync2");
conf.setClusteringMultiplier(2);
topics.add(conf);
}
Set<Broker> additionalBrokers = new HashSet<>(Arrays.asList(
new Broker("1.1.1.12", port, "c5.2xlarge", "us-east-1c", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.11", port, "c5.2xlarge", "us-east-1b", BrokerType.WRITE, new HashSet<>()),
new Broker("1.1.1.10", port, "c5.2xlarge", "us-east-1a", BrokerType.WRITE, new HashSet<>())
));
newBrokers.addAll(additionalBrokers);
newBrokers = strategy.balance(topics, newBrokers);
System.out.println(buildAllocationMap(newBrokers));
for (Broker broker : newBrokers) {
if (additionalBrokers.contains(broker)) {
assertTrue(broker.getAssignedTopics().isEmpty());
} else {
assertEquals(firstBrokerConfig.get(broker), broker.getAssignedTopics());
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("customs3aync2")));
}
}
Thread.sleep(1500);
newBrokers.addAll(additionalBrokers);
newBrokers = strategy.balance(topics, newBrokers);
System.out.println(buildAllocationMap(newBrokers));
for (Broker broker : newBrokers) {
assertFalse(broker.getAssignedTopics().isEmpty());
assertTrue(broker.getAssignedTopics().stream().allMatch(tc -> tc.getStorageHandlerName().equals("customs3aync2")));
}
}
private Map<String, Set<String>> buildAllocationMap(Set<Broker> brokers) {
Map<String, Set<String>> brokerAllocationMap = new HashMap<>();
for (Broker broker : brokers) {
Set<String> set = new HashSet<>();
brokerAllocationMap.put(broker.getBrokerIP(), set);
for (TopicConfig topicConfig : broker.getAssignedTopics()) {
set.add(topicConfig.getTopic());
}
}
return brokerAllocationMap;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/clustering/TestExpirationPartitionBalanceStrategyWithAssignmentFreeze.java | memq/src/test/java/com/pinterest/memq/core/clustering/TestExpirationPartitionBalanceStrategyWithAssignmentFreeze.java | package com.pinterest.memq.core.clustering;
import com.google.common.collect.Sets;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.core.config.MemqConfig;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestExpirationPartitionBalanceStrategyWithAssignmentFreeze {
@Test
public void testSingleTopicSufficientBrokersBalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 6 brokers, 1 topic with 450MB input traffic
TopicConfig topicConfig = generateTopicConfig(0);
topicConfig.setInputTrafficMB(450);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig));
Set<Broker> brokers = getBrokers(2, 2, 2);
// topic assigned to 3 brokers
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// topic requires 6 brokers
topicConfig.setInputTrafficMB(900);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// after expiration, still keep the assignment
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// reduce input traffic to 450MB, topic should be assigned to 3 brokers
topicConfig.setInputTrafficMB(450);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
}
@Test
public void testSingleTopicSufficientBrokersImbalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 6 brokers, 1 topic with 450MB input traffic
TopicConfig topicConfig = generateTopicConfig(0);
topicConfig.setInputTrafficMB(450);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig));
Set<Broker> brokers = getBrokers(2, 2, 2);
// topic assigned to 3 brokers
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
Thread.sleep(expirationTime * 2);
// remove one broker from a
Set<Broker> removedA = removeBrokers(brokers, "a", 1);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
Thread.sleep(expirationTime * 2);
// remove one broker from b
Set<Broker> removedB = removeBrokers(brokers, "b", 1);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
// remove another broker from b
Set<Broker> removedB2 = removeBrokers(brokers, "b", 1);
assertEquals(3, brokers.size());
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(2, assignedBrokers.size()); // only 2 brokers left from previous assignment and it's frozen
Thread.sleep(expirationTime * 2);
// add removed brokers from b back
brokers.addAll(removedB);
brokers.addAll(removedB2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
// add removed broker from a back
brokers.addAll(removedA);
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
}
@Test
public void testSingleTopicInsufficientBrokersBalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 3 brokers, 1 topic with 450MB input traffic
TopicConfig topicConfig = generateTopicConfig(0);
topicConfig.setInputTrafficMB(450);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig));
Set<Broker> brokers = getBrokers(1, 1, 1);
// topic assigned to all 3 brokers
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// topic requires 6 brokers, but only 3 available, still keep the assignment
topicConfig.setInputTrafficMB(900);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// after expiration, still keep the assignment
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// add 3 more brokers, topic should be assigned to all 6 brokers
brokers.add(generateBroker(4, "a"));
brokers.add(generateBroker(5, "b"));
brokers.add(generateBroker(6, "c"));
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
// reduce input traffic to 450MB, topic should be assigned to 3 brokers
topicConfig.setInputTrafficMB(450);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(3, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
}
@Test
public void testSingleTopicInsufficientBrokersImbalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 6 brokers, 1 topic with 900MB input traffic
TopicConfig topicConfig = generateTopicConfig(0);
topicConfig.setInputTrafficMB(900);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig));
Set<Broker> brokers = getBrokers(2, 2, 2);
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
Thread.sleep(expirationTime * 2);
Set<Broker> removed = removeBrokers(brokers, "a", 1);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(5, assignedBrokers.size());
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(5, assignedBrokers.size());
// add removed brokers back
brokers.addAll(removed);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers = getAssignedBrokersForTopic(brokers, topicConfig.getTopic());
assertEquals(6, assignedBrokers.size());
assertLocalityBalance(assignedBrokers);
}
@Test
public void testMultiTopicSufficientBrokersBalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 12 brokers, 2 topics with 450 and 900MB input traffic, 900MB topic has priority
TopicConfig topicConfig1 = generateTopicConfig(0);
topicConfig1.setInputTrafficMB(450);
topicConfig1.setTopicOrder(1000);
TopicConfig topicConfig2 = generateTopicConfig(1);
topicConfig2.setInputTrafficMB(900);
topicConfig2.setTopicOrder(0);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig1, topicConfig2));
Set<Broker> brokers = getBrokers(4, 4, 4);
// topic 2 assigned to 6 brokers, topic 1 assigned to 3 brokers
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
Set<Broker> assignedBrokers2 = getAssignedBrokersForTopic(brokers, topicConfig2.getTopic());
assertEquals(3, assignedBrokers1.size());
assertEquals(6, assignedBrokers2.size());
assertLocalityBalance(assignedBrokers1);
assertLocalityBalance(assignedBrokers2);
// remove all assigned brokers from each AZ for topic 2 and reassign with inputTrafficMB = 450
brokers.removeAll(assignedBrokers2);
Set<Broker> removed = new HashSet<>(assignedBrokers2);
topicConfig2.setInputTrafficMB(450);
brokers = strategy.balance(topics, brokers);
assignedBrokers2 = getAssignedBrokersForTopic(brokers, topicConfig2.getTopic());
assertEquals(3, assignedBrokers2.size());
assertLocalityBalance(assignedBrokers2);
assertTrue(Sets.intersection(assignedBrokers2, removed).isEmpty());
topicConfig2.setInputTrafficMB(1350);
brokers.addAll(removed);
// topic 2 assigned to 9 brokers, topic 1 assigned to 3 brokers
brokers = strategy.balance(topics, brokers);
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assignedBrokers2 = getAssignedBrokersForTopic(brokers, topicConfig2.getTopic());
assertEquals(3, assignedBrokers1.size());
assertEquals(9, assignedBrokers2.size());
assertLocalityBalance(assignedBrokers1);
assertLocalityBalance(assignedBrokers2);
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assignedBrokers2 = getAssignedBrokersForTopic(brokers, topicConfig2.getTopic());
assertEquals(3, assignedBrokers1.size());
assertEquals(9, assignedBrokers2.size());
assertLocalityBalance(assignedBrokers1);
assertLocalityBalance(assignedBrokers2);
}
@Test
public void testMultiTopicInsufficientBrokersBalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 12 brokers, 2 topics with 450 and 900MB input traffic, topic0 has priority over topic1
TopicConfig topicConfig0 = generateTopicConfig(0);
topicConfig0.setInputTrafficMB(450);
topicConfig0.setTopicOrder(0);
TopicConfig topicConfig1 = generateTopicConfig(1);
topicConfig1.setInputTrafficMB(900);
topicConfig1.setTopicOrder(1000);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig0, topicConfig1));
Set<Broker> brokers = getBrokers(4, 4, 4);
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
Set<Broker> assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
Thread.sleep(expirationTime * 2);
// topic0 needs 9 brokers, topic1 still needs 6 brokers, but only 12 available in total
topicConfig0.setInputTrafficMB(1350); // setting this to 1800 (all 12 brokers) will result in topic1 being fully dropped
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(9, assignedBrokers0.size());
assertEquals(3, assignedBrokers1.size()); // TODO: this scenario is concerning because topic1's assignment got dropped
brokers.add(generateBroker(13, "a"));
brokers.add(generateBroker(14, "b"));
brokers.add(generateBroker(15, "c"));
// topic 1 needs 9 brokers, topic 2 needs 6 brokers, 15 brokers available
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(9, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(9, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
// remove 3 brokers, 1 from each AZ
Set<Broker> removed = removeBrokers(brokers, "a", 1);
removed = Sets.union(removed, removeBrokers(brokers, "b", 1));
removed = Sets.union(removed, removeBrokers(brokers, "c", 1));
assertEquals(12, brokers.size());
assertEquals(3, removed.size());
Set<Broker> removed0 = Sets.intersection(removed, assignedBrokers0);
Set<Broker> removed1 = Sets.intersection(removed, assignedBrokers1);
long now = System.currentTimeMillis();
while (System.currentTimeMillis() - now < expirationTime * 2) {
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(9 - removed0.size(), assignedBrokers0.size());
assertEquals(6 - removed1.size(), assignedBrokers1.size());
Thread.sleep(50);
}
brokers.addAll(removed);
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(9, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
}
@Test
public void testMultiTopicSufficientBrokersImbalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 12 brokers, 2 topics with 450 and 900MB input traffic, topic0 has priority over topic1
TopicConfig topicConfig0 = generateTopicConfig(0);
topicConfig0.setInputTrafficMB(450);
topicConfig0.setTopicOrder(0);
TopicConfig topicConfig1 = generateTopicConfig(1);
topicConfig1.setInputTrafficMB(900);
topicConfig1.setTopicOrder(1000);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig0, topicConfig1));
Set<Broker> brokers = getBrokers(4, 4, 4);
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
Set<Broker> assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
Set<Broker> removed = removeBrokers(brokers, "a", 3);
Set<Broker> removed0 = Sets.intersection(removed, assignedBrokers0);
Set<Broker> removed1 = Sets.intersection(removed, assignedBrokers1);
assertEquals(9, brokers.size());
long now = System.currentTimeMillis();
while (System.currentTimeMillis() - now < expirationTime * 2) {
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3 - removed0.size(), assignedBrokers0.size());
assertEquals(6 - removed1.size(), assignedBrokers1.size());
Thread.sleep(50);
}
brokers.addAll(removed);
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
Thread.sleep(expirationTime * 2);
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
}
@Test
public void testMultiTopicInsufficientBrokersImbalancedAz() throws Exception {
long expirationTime = 500;
BalanceStrategy strategy = getExpirationBalanceStrategyWithFreeze(expirationTime);
// 12 brokers, 2 topics with 450 and 900MB input traffic, topic0 has priority over topic1
TopicConfig topicConfig0 = generateTopicConfig(0);
topicConfig0.setInputTrafficMB(450);
topicConfig0.setTopicOrder(0);
TopicConfig topicConfig1 = generateTopicConfig(1);
topicConfig1.setInputTrafficMB(900);
topicConfig1.setTopicOrder(1000);
Set<TopicConfig> topics = new HashSet<>(Arrays.asList(topicConfig0, topicConfig1));
Set<Broker> brokers = getBrokers(3, 3, 3);
brokers = strategy.balance(topics, brokers);
Set<Broker> assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
Set<Broker> assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
Set<Broker> removed = removeBrokers(brokers, "a", 2);
Set<Broker> removed0 = Sets.intersection(removed, assignedBrokers0);
Set<Broker> removed1 = Sets.intersection(removed, assignedBrokers1);
assertEquals(7, brokers.size());
assertTrue(removed0.size() + removed1.size() > 0);
long now = System.currentTimeMillis();
while (System.currentTimeMillis() - now < expirationTime * 2) {
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3 - removed0.size(), assignedBrokers0.size());
assertEquals(6 - removed1.size(), assignedBrokers1.size());
Thread.sleep(50);
}
brokers.addAll(removed);
brokers = strategy.balance(topics, brokers);
assignedBrokers0 = getAssignedBrokersForTopic(brokers, topicConfig0.getTopic());
assignedBrokers1 = getAssignedBrokersForTopic(brokers, topicConfig1.getTopic());
assertEquals(3, assignedBrokers0.size());
assertEquals(6, assignedBrokers1.size());
assertLocalityBalance(assignedBrokers0);
assertLocalityBalance(assignedBrokers1);
}
private static BalanceStrategy getExpirationBalanceStrategyWithFreeze(long expirationTime) {
MemqConfig mockMemqConfig = Mockito.mock(MemqConfig.class);
Mockito.when(mockMemqConfig.getOpenTsdbConfig()).thenReturn(null);
ExpirationPartitionBalanceStrategyWithAssignmentFreeze strategy = new ExpirationPartitionBalanceStrategyWithAssignmentFreeze(mockMemqConfig);
strategy.setDefaultExpirationTime(expirationTime); // 500ms
return strategy;
}
private static BalanceStrategy getExpirationBalanceStrategy(long expirationTime) {
ExpirationPartitionBalanceStrategy strategy = new ExpirationPartitionBalanceStrategy();
strategy.setDefaultExpirationTime(expirationTime); // 500ms
return strategy;
}
private static Set<Broker> getBrokers(int numBrokersA, int numBrokersB, int numBrokersC) throws Exception {
Set<Broker> brokers = new HashSet<>();
for (int i = 1; i <= numBrokersA + numBrokersB + numBrokersC; i++) {
if (i % 3 == 1) {
brokers.add(generateBroker(i, "a"));
} else if (i % 3 == 2) {
brokers.add(generateBroker(i, "b"));
} else {
brokers.add(generateBroker(i, "c"));
}
}
return brokers;
}
private static Broker generateBroker(int index, String localityLetter) throws Exception {
return new Broker(
"1.1.1." + index,
(short) 9092,
"c6i.2xlarge",
"us-east-1" + localityLetter,
Broker.BrokerType.WRITE,
new HashSet<>()
);
}
private static TopicConfig generateTopicConfig(int index) throws Exception {
return new TopicConfig(
index,
1024 * 1024 * 50,
256,
"test_topic_" + index,
50,
0,
3
);
}
private static Set<Broker> getAssignedBrokersForTopic(Set<Broker> brokers, String topic) {
Set<Broker> assignedBrokers = new HashSet<>();
for (Broker broker : brokers) {
for (TopicAssignment topicAssignment : broker.getAssignedTopics()) {
if (topicAssignment.getTopic().equals(topic)) {
assignedBrokers.add(broker);
break;
}
}
}
return assignedBrokers;
}
private static Set<Broker> removeBrokers(Set<Broker> brokers, String localityOfBrokersToRemove, int numBrokersToRemove) {
Set<Broker> brokersToRemove = new HashSet<>();
for (Broker broker : brokers) {
if (broker.getLocality().equals("us-east-1" + localityOfBrokersToRemove)) {
brokersToRemove.add(broker);
if (brokersToRemove.size() == numBrokersToRemove) {
break;
}
}
}
brokers.removeAll(brokersToRemove);
return brokersToRemove;
}
private static Set<Broker> removeBrokers(Set<Broker> brokers, String ipToRemove) {
Set<Broker> brokersToRemove = new HashSet<>();
for (Broker broker : brokers) {
if (broker.getBrokerIP().equals(ipToRemove)) {
brokersToRemove.add(broker);
}
}
brokers.removeAll(brokersToRemove);
return brokersToRemove;
}
private static void assertLocalityBalance(Set<Broker> brokers) {
Map<String, Integer> localityCount = new HashMap<>();
for (Broker broker: brokers) {
localityCount.put(broker.getLocality(), localityCount.getOrDefault(broker.getLocality(), 0) + 1);
}
assertEquals(1, localityCount.values().stream().collect(Collectors.toSet()).size());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/test/java/com/pinterest/memq/core/clustering/TestMemqGovernor.java | memq/src/test/java/com/pinterest/memq/core/clustering/TestMemqGovernor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.clustering;
import static org.junit.Assert.assertEquals;
import java.io.FileNotFoundException;
import java.io.FileReader;
import org.junit.Test;
import com.google.gson.Gson;
import com.google.gson.JsonIOException;
import com.google.gson.JsonSyntaxException;
import com.pinterest.memq.commons.protocol.TopicConfig;
public class TestMemqGovernor {
@Test
public void testBackwardsCompatibility() throws JsonSyntaxException, JsonIOException,
FileNotFoundException {
Gson gson = new Gson();
TopicConfig oldConf = gson.fromJson(new FileReader("src/test/resources/old.test_topic.json"),
TopicConfig.class);
TopicConfig newConf = gson.fromJson(new FileReader("src/test/resources/new.test_topic.json"),
TopicConfig.class);
assertEquals("customs3aync2", oldConf.getStorageHandlerName());
assertEquals("customs3aync2", newConf.getStorageHandlerName());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/MemqMain.java | memq/src/main/java/com/pinterest/memq/core/MemqMain.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.codahale.metrics.jvm.CachedThreadStatesGaugeSet;
import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
import com.pinterest.memq.commons.mon.OpenTSDBClient;
import com.pinterest.memq.commons.mon.OpenTSDBReporter;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.EnvironmentProvider;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.mon.MemqMgrHealthCheck;
import com.pinterest.memq.core.mon.MonitorEndpoint;
import com.pinterest.memq.core.rpc.MemqNettyServer;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
import io.dropwizard.Application;
import io.dropwizard.setup.Environment;
public class MemqMain extends Application<MemqConfig> {
private static final Logger logger = Logger.getLogger(MemqMain.class.getName());
@Override
public void run(MemqConfig configuration, Environment environment) throws Exception {
MiscUtils.printAllLines(ClassLoader.getSystemResourceAsStream("logo.txt"));
Map<String, MetricRegistry> metricsRegistryMap = new HashMap<>();
enableJVMMetrics(metricsRegistryMap);
MetricRegistry misc = new MetricRegistry();
emitStartMetrics(misc);
metricsRegistryMap.put("_misc", misc);
OpenTSDBClient client = initializeMetricsTransmitter(configuration, environment,
metricsRegistryMap);
MemqManager memqManager = new MemqManager(client, configuration, metricsRegistryMap);
memqManager.init();
environment.lifecycle().manage(memqManager);
addAPIs(environment, memqManager);
if (configuration.isResetEnabled()) {
Executors.newScheduledThreadPool(1, new DaemonThreadFactory()).schedule(() -> {
logger.info("Memq scheduled restart, this is by design to reset heap.\n"
+ "Supervisord should restart Memq instance");
System.exit(0);
}, 1, TimeUnit.HOURS);
}
environment.healthChecks().register("base", new MemqMgrHealthCheck(memqManager));
MemqGovernor memqGovernor = initializeGovernor(configuration, memqManager);
MemqNettyServer nettyServer = initializeNettyServer(configuration, memqManager, memqGovernor,
metricsRegistryMap, client);
logger.info("Memq started");
initializeShutdownHooks(memqManager, memqGovernor, nettyServer);
initializeAdditionalModules(configuration, environment, memqManager, memqGovernor);
}
private void initializeShutdownHooks(MemqManager memqManager,
MemqGovernor memqGovernor,
MemqNettyServer nettyServer) {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
nettyServer.stop();
memqGovernor.stop();
try {
memqManager.stop();
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
private void enableJVMMetrics(Map<String, MetricRegistry> metricsRegistryMap) {
MetricRegistry registry = new MetricRegistry();
registry.register("gc", new GarbageCollectorMetricSet());
registry.register("threads", new CachedThreadStatesGaugeSet(10, TimeUnit.SECONDS));
registry.register("memory", new MemoryUsageGaugeSet());
metricsRegistryMap.put("_jvm", registry);
}
public MemqGovernor initializeGovernor(MemqConfig configuration,
MemqManager memqManager) throws Exception {
EnvironmentProvider provider = null;
try {
provider = Class.forName(configuration.getEnvironmentProvider())
.asSubclass(EnvironmentProvider.class).newInstance();
} catch (Exception e) {
throw new Exception("Failed to initialize environment provider", e);
}
MemqGovernor memqGovernor = new MemqGovernor(memqManager, configuration, provider);
if (configuration.getClusteringConfig() != null) {
memqGovernor.init();
}
return memqGovernor;
}
public MemqNettyServer initializeNettyServer(MemqConfig configuration,
MemqManager memqManager,
MemqGovernor memqGovernor,
Map<String, MetricRegistry> metricsRegistryMap,
OpenTSDBClient client) throws Exception {
MemqNettyServer server = new MemqNettyServer(configuration, memqManager, memqGovernor,
metricsRegistryMap, client);
server.initialize();
return server;
}
public void initializeAdditionalModules(MemqConfig config, Environment environment, MemqManager memqManager, MemqGovernor memqGovernor) throws Exception {
}
private OpenTSDBClient initializeMetricsTransmitter(MemqConfig configuration,
Environment environment,
Map<String, MetricRegistry> metricsRegistryMap) throws UnknownHostException {
if (configuration.getOpenTsdbConfig() != null) {
OpenTSDBClient client = new OpenTSDBClient(configuration.getOpenTsdbConfig().getHost(),
configuration.getOpenTsdbConfig().getPort());
String localHostname = MiscUtils.getHostname();
for (Entry<String, MetricRegistry> entry : metricsRegistryMap.entrySet()) {
ScheduledReporter reporter;
if (entry.getKey().startsWith("_")) { // non-topic metrics
reporter = OpenTSDBReporter.createReporter("", entry.getValue(), entry.getKey(),
(String name, Metric metric) -> true, TimeUnit.SECONDS, TimeUnit.SECONDS, client,
localHostname);
} else { // topic metrics
reporter = OpenTSDBReporter.createReporterWithTags("", entry.getValue(), entry.getKey(),
(String name, Metric metric) -> true, TimeUnit.SECONDS, TimeUnit.SECONDS, client,
localHostname, Collections.singletonMap("topic", entry.getKey()));
}
reporter.start(configuration.getOpenTsdbConfig().getFrequencyInSeconds(), TimeUnit.SECONDS);
}
return client;
}
return null;
}
private void addAPIs(Environment environment, MemqManager memqManager) {
environment.jersey().setUrlPattern("/api/*");
environment.jersey().register(new MonitorEndpoint(memqManager.getRegistry()));
}
private void emitStartMetrics(MetricRegistry registry) {
final long startTs = System.currentTimeMillis();
registry.gauge("start", () -> (Gauge<Integer>) () -> System.currentTimeMillis() - startTs < 3 * 60_000 ? 1 : 0);
}
public static void main(String[] args) throws Exception {
new MemqMain().run(args);
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/MemqManager.java | memq/src/main/java/com/pinterest/memq/core/MemqManager.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core;
import java.io.File;
import java.io.IOException;
import java.net.UnknownHostException;
import java.nio.file.Files;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.ws.rs.BadRequestException;
import javax.ws.rs.InternalServerErrorException;
import javax.ws.rs.NotFoundException;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.google.gson.Gson;
import com.pinterest.memq.commons.mon.OpenTSDBClient;
import com.pinterest.memq.commons.mon.OpenTSDBReporter;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerTable;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.processing.TopicProcessor;
import com.pinterest.memq.core.processing.TopicProcessorState;
import com.pinterest.memq.core.processing.bucketing.BucketingTopicProcessor;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
import io.dropwizard.lifecycle.Managed;
public class MemqManager implements Managed {
private static final Logger logger = Logger.getLogger(MemqManager.class.getName());
private static final Gson gson = new Gson();
private Map<String, TopicProcessor> processorMap = new ConcurrentHashMap<>();
private Map<String, TopicAssignment> topicMap = new ConcurrentHashMap<>();
private MemqConfig configuration;
private ScheduledExecutorService timerService;
private ScheduledExecutorService cleanupService;
private Map<String, MetricRegistry> metricsRegistryMap;
private AtomicBoolean disabled;
private String topicCacheFile;
private OpenTSDBClient client;
public MemqManager(OpenTSDBClient client,
MemqConfig configuration,
Map<String, MetricRegistry> metricsRegistryMap) throws UnknownHostException {
this.configuration = configuration;
this.topicCacheFile = configuration.getTopicCacheFile();
this.metricsRegistryMap = metricsRegistryMap;
timerService = Executors.newScheduledThreadPool(1, DaemonThreadFactory.INSTANCE);
cleanupService = Executors.newScheduledThreadPool(configuration.getCleanerThreadCount(),
DaemonThreadFactory.INSTANCE);
this.disabled = new AtomicBoolean();
this.client = client;
}
public void init() throws Exception {
loadStorageHandlers(configuration);
File file = new File(topicCacheFile);
if (file.exists()) {
byte[] bytes = Files.readAllBytes(file.toPath());
TopicAssignment[] topics = gson.fromJson(new String(bytes), TopicAssignment[].class);
topicMap = new ConcurrentHashMap<>();
for (TopicAssignment topicConfig : topics) {
topicMap.put(topicConfig.getTopic(), topicConfig);
}
}
if (configuration.getTopicConfig() != null) {
for (TopicConfig topicConfig : configuration.getTopicConfig()) {
topicMap.put(topicConfig.getTopic(), new TopicAssignment(topicConfig, -1));
}
}
for (Entry<String, TopicAssignment> entry : topicMap.entrySet()) {
createTopicProcessor(entry.getValue());
}
}
public void createTopicProcessor(TopicAssignment topicConfig) throws BadRequestException,
UnknownHostException {
if (processorMap.containsKey(topicConfig.getTopic())) {
return;
}
if (topicConfig.getStorageHandlerName() == null) {
throw new BadRequestException("Missing handler " + topicConfig.toString());
}
MetricRegistry registry = new MetricRegistry();
metricsRegistryMap.put(topicConfig.getTopic(), registry);
ScheduledReporter reporter = null;
if (client != null) {
String localHostname = MiscUtils.getHostname();
reporter = OpenTSDBReporter.createReporterWithTags("", registry, topicConfig.getTopic(),
(String name, com.codahale.metrics.Metric metric) -> true, TimeUnit.SECONDS,
TimeUnit.SECONDS, client, localHostname,
Collections.singletonMap("topic", topicConfig.getTopic()));
reporter.start(configuration.getOpenTsdbConfig().getFrequencyInSeconds(), TimeUnit.SECONDS);
}
StorageHandler storageHandler;
try {
storageHandler = StorageHandlerTable.getClass(topicConfig.getStorageHandlerName()).newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new BadRequestException("Invalid output class", e);
}
try {
storageHandler.initWriter(topicConfig.getStorageHandlerConfig(), topicConfig.getTopic(), registry);
} catch (Exception e) {
throw new InternalServerErrorException(e);
}
if (topicConfig.getBufferSize() == 0) {
topicConfig.setBufferSize(configuration.getDefaultBufferSize());
}
if (topicConfig.getRingBufferSize() == 0) {
topicConfig.setRingBufferSize(configuration.getDefaultRingBufferSize());
}
TopicProcessor tp = new BucketingTopicProcessor(registry, topicConfig, storageHandler, timerService, reporter);
processorMap.put(topicConfig.getTopic(), tp);
topicMap.put(topicConfig.getTopic(), topicConfig);
logger.info("Configured and started TopicProcessor for:" + topicConfig.getTopic());
}
public void updateTopicCache() {
String json = gson.toJson(topicMap.values());
try {
Files.write(new File(topicCacheFile).toPath(), json.getBytes());
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to update topic cache", e);
}
}
public TopicProcessorState getProcessorState(String topic) {
TopicProcessor topicProcessor = checkAndGetTopicProcessor(topic);
return topicProcessor.getState();
}
/**
*
* @param topic
* @return future task to track deletion
*/
public Future<?> deleteTopicProcessor(String topic) {
final TopicProcessor topicProcessor = checkAndGetTopicProcessor(topic);
return cleanupService.submit(() -> {
try {
topicProcessor.stopAndAwait();
} catch (InterruptedException e) {
logger.severe("Termination failed");
}
if (topicProcessor.getState() != TopicProcessorState.STOPPED) {
logger.severe("Topic processor not stopped for topic:" + topic);
}
processorMap.remove(topic);
topicMap.remove(topic);
});
}
private TopicProcessor checkAndGetTopicProcessor(String topic) {
final TopicProcessor topicProcessor = processorMap.get(topic);
if (topicProcessor == null) {
throw new NotFoundException(
"Topic processor for:" + topic + " doesn't exist on this instance");
}
return topicProcessor;
}
public MemqConfig getConfiguration() {
return configuration;
}
public Map<String, TopicProcessor> getProcessorMap() {
return processorMap;
}
public Set<TopicAssignment> getTopicAssignment() {
return new HashSet<>(topicMap.values());
}
public Map<String, MetricRegistry> getRegistry() {
return metricsRegistryMap;
}
@Override
public void start() throws Exception {
logger.info("Memq manager started");
}
@Override
public void stop() throws Exception {
try {
for (Iterator<Entry<String, TopicProcessor>> iterator = processorMap.entrySet()
.iterator(); iterator.hasNext();) {
Entry<String, TopicProcessor> entry = iterator.next();
iterator.remove();
entry.getValue().stopNow();
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Processor stop failed", e);
}
timerService.shutdownNow();
cleanupService.shutdownNow();
logger.info("Memq Manager stopped");
}
public boolean isRunning() {
return !timerService.isShutdown() && !cleanupService.isShutdown();
}
public boolean isDisabled() {
return disabled.get();
}
public void setDisabled(boolean disabled) {
this.disabled.set(disabled);
}
public void loadStorageHandlers(MemqConfig configuration) {
String[] storageHandlerPackageNames = configuration.getStorageHandlerPackageNames();
if (storageHandlerPackageNames != null && storageHandlerPackageNames.length > 0) {
for (String packageName : storageHandlerPackageNames) {
StorageHandlerTable.findAndRegisterOutputHandlers(packageName);
}
}
}
public boolean updateTopic(TopicAssignment topicConfig) {
String topic = topicConfig.getTopic();
TopicConfig existingConfig = topicMap.get(topic);
// return since topic doesn't exist
if (existingConfig == null) {
return false;
}
// return since topic isn't updated
if (!existingConfig.isDifferentFrom(topicConfig)) {
return false;
}
topicMap.put(topic, topicConfig);
TopicProcessor processor = processorMap.get(topic);
if (processor == null) {
// TODO: throw exception since processor should exist
return false;
}
logger.info("Topic config of topic " + topic + " changed, original: " + existingConfig + ", new: " + topicConfig);
processor.reconfigure(topicConfig);
return true;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/tools/TopicAdmin.java | memq/src/main/java/com/pinterest/memq/core/tools/TopicAdmin.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.tools;
import java.io.File;
import java.nio.file.Files;
import java.util.concurrent.TimeUnit;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.ExponentialBackoffRetry;
import com.google.gson.Gson;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.core.clustering.MemqGovernor;
public class TopicAdmin {
private static final String ZOOKEEPER_CONNECTION = "zk";
private static final String TOPIC_CONFIG_JSON_FILE = "tcjf";
public static void main(String[] args) throws Exception {
// create Options object
Options options = new Options();
// add t option
options.addOption(Option.builder().required(true).hasArg().longOpt(ZOOKEEPER_CONNECTION)
.argName("zookeeper connection string").build());
options.addOption(Option.builder().required(true).hasArg().longOpt(TOPIC_CONFIG_JSON_FILE).argName("topic configuration json file").build());
CommandLineParser parser = new DefaultParser();
CommandLine cmd = null;
try {
cmd = parser.parse(options, args);
} catch (Exception e) {
printHelp(options);
return;
}
String zookeeperConnectionString = cmd.getOptionValue(ZOOKEEPER_CONNECTION);
String topicConfigFile = cmd.getOptionValue(TOPIC_CONFIG_JSON_FILE);
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
CuratorFramework client = CuratorFrameworkFactory.newClient(zookeeperConnectionString,
retryPolicy);
client.start();
client.blockUntilConnected(100, TimeUnit.SECONDS);
Gson gson = new Gson();
TopicConfig config = gson.fromJson(
new String(Files.readAllBytes(new File(topicConfigFile).toPath())), TopicConfig.class);
MemqGovernor.createTopic(client, config);
client.close();
}
private static void printHelp(Options options) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("memqcli", options);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/TopicProcessor.java | memq/src/main/java/com/pinterest/memq/core/processing/TopicProcessor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.pinterest.memq.commons.protocol.ReadRequestPacket;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
public abstract class TopicProcessor {
protected volatile TopicProcessorState state = TopicProcessorState.STOPPED;
protected Timer totalWriteLatency;
protected Timer slotWaitLatency;
protected Counter checkProcessedCounter;
protected Counter writeCounter;
protected Counter writeRejectCounter;
protected Counter writeErrorCounter;
protected Timer tickerLatency;
protected Counter processedCounter;
protected Counter failedCounter;
protected Counter pendingCounter;
protected Counter slotCheckCounter;
protected void initializeMetrics(MetricRegistry registry) {
totalWriteLatency = MiscUtils.oneMinuteWindowTimer(registry, "tp.totalWriteLatency");
slotWaitLatency = MiscUtils.oneMinuteWindowTimer(registry, "tp.slotWaitLatency");
tickerLatency = MiscUtils.oneMinuteWindowTimer(registry, "tp.tickPublishLatency");
writeCounter = registry.counter("tp.writeCounter");
writeErrorCounter = registry.counter("tp.writeErrors");
writeRejectCounter = registry.counter("tp.writeRejectCounter");
checkProcessedCounter = registry.counter("tp.checkProcessedCounter");
processedCounter = registry.counter("tp.ackProcessed");
failedCounter = registry.counter("tp.ackFailed");
pendingCounter = registry.counter("tp.ackPending");
slotCheckCounter = registry.counter("tp.slotCheckCounter");
}
public boolean reconfigure(TopicConfig topicConfig) {
return true;
}
public abstract long write(RequestPacket basePacket,
WriteRequestPacket writePacket,
ChannelHandlerContext ctx);
public abstract void stopNow();
public abstract void stopAndAwait() throws InterruptedException;
public TopicProcessorState getState() {
return state;
}
public abstract Ackable getAcker();
public abstract int getRemaining();
public abstract float getAvailableCapacity();
public abstract TopicConfig getTopicConfig();
public void registerChannel(Channel channel) {
}
public abstract void read(RequestPacket requestPacket,
ReadRequestPacket readPacket,
ChannelHandlerContext ctx);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/Ackable.java | memq/src/main/java/com/pinterest/memq/core/processing/Ackable.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
public interface Ackable {
void markProcessed(long ackId);
void markPending(long ackId);
void markFailed(long ackId);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/ProcessingStatus.java | memq/src/main/java/com/pinterest/memq/core/processing/ProcessingStatus.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
public enum ProcessingStatus {
FAILED,
PENDING,
PROCESSED
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/TopicProcessorState.java | memq/src/main/java/com/pinterest/memq/core/processing/TopicProcessorState.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
public enum TopicProcessorState {
INITIALIZING,
RUNNING,
STOPPING,
STOPPED
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/MapAcker.java | memq/src/main/java/com/pinterest/memq/core/processing/MapAcker.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.MetricRegistry;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
public class MapAcker implements Ackable {
private static final Logger logger = Logger.getLogger(MapAcker.class.getName());
private static final int DEFAULT_ACK_MAP_TIMEOUT_MINUTES = 10;
private static final int DEFAULT_ACKER_SHUTDOWN_TIMEOUT = DEFAULT_ACK_MAP_TIMEOUT_MINUTES
* 3600_000;
private LoadingCache<Long, ProcessingStatus> ackMap;
private String topicName;
private MetricRegistry registry;
private Counter missingAckEntryCounter;
public MapAcker(String topicName, MetricRegistry registry) {
this.topicName = topicName;
this.registry = registry;
initializeAckerMap();
}
public void initializeAckerMap() {
missingAckEntryCounter = registry.counter("acker.missingAckEntryCounter");
registry.register("acker.ackMapSize", new Gauge<Integer>() {
@Override
public Integer getValue() {
return (int) ackMap.size();
}
});
ackMap = CacheBuilder.newBuilder().maximumSize(10000)
.expireAfterAccess(DEFAULT_ACK_MAP_TIMEOUT_MINUTES, TimeUnit.MINUTES)
.build(new CacheLoader<Long, ProcessingStatus>() {
@Override
public ProcessingStatus load(Long key) throws Exception {
missingAckEntryCounter.inc();
return ProcessingStatus.FAILED;
}
});
logger.info("Initialized acker map");
}
public LoadingCache<Long, ProcessingStatus> getAckMap() {
return ackMap;
}
public void setAckMap(LoadingCache<Long, ProcessingStatus> ackMap) {
this.ackMap = ackMap;
}
public void stop() {
int timer = 0;
while (ackMap.size() > 0) {
try {
ackMap.cleanUp();
if (timer > DEFAULT_ACKER_SHUTDOWN_TIMEOUT) {
break;
}
Thread.sleep(1000);
timer += 1000;
} catch (InterruptedException e) {
logger.log(Level.SEVERE, "Stop thread interrupted for:" + topicName, e);
}
}
logger.fine("Successfully cleaned up ack map");
}
public void markProcessed(long ackKey) {
ackMap.put(ackKey, ProcessingStatus.PROCESSED);
}
public void markFailed(long ackKey) {
ackMap.put(ackKey, ProcessingStatus.FAILED);
}
public void markPending(long ackKey) {
ackMap.put(ackKey, ProcessingStatus.PENDING);
}
public ProcessingStatus getProcessingStatus(long ackKey) throws ExecutionException {
return ackMap.get(ackKey);
}
@Override
public String toString() {
return "Acker [ackMap=" + ackMap.asMap() + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/bucketing/Batch.java | memq/src/main/java/com/pinterest/memq/core/processing/bucketing/Batch.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing.bucketing;
import java.io.IOException;
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SlidingTimeWindowArrayReservoir;
import com.codahale.metrics.Timer;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.CoreUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.channel.ChannelHandlerContext;
public class Batch {
private static final Logger logger = Logger.getLogger(Batch.class.getName());
private final ScheduledExecutorService scheduler;
private final ExecutorService executor;
private final StorageHandler handler;
private long sizeDispatchThreshold;
private int countDispatchThreshold;
private Duration timeDispatchThreshold;
private Message[] messages;
private final AtomicInteger usedCapacity = new AtomicInteger();
private final AtomicInteger activeWrites = new AtomicInteger();
private final AtomicInteger messageIdx = new AtomicInteger();
private final AtomicInteger messagesCount = new AtomicInteger();
private final AtomicBoolean available = new AtomicBoolean(true);
private final MetricRegistry registry;
private final BatchManager manager;
private boolean dispatching = false;
private volatile Future<?> timeDispatchTask;
private volatile long startTime;
private Counter sizeBasedBatchCounter;
private Counter timeBasedBatchCounter;
private Counter countBasedBatchCounter;
private Timer accumulationTime;
public Batch(BatchManager manager,
int countDispatchThreshold,
long sizeDispatchThreshold,
Duration timeDispatchThreshold,
ScheduledExecutorService scheduler,
ExecutorService executor,
StorageHandler handler,
MetricRegistry registry) {
this.sizeDispatchThreshold = sizeDispatchThreshold;
this.countDispatchThreshold = countDispatchThreshold;
this.timeDispatchThreshold = timeDispatchThreshold;
this.scheduler = scheduler;
this.executor = executor;
this.handler = handler;
this.messages = new Message[countDispatchThreshold];
this.registry = registry;
this.manager = manager;
initializeMetrics(registry);
}
protected void initializeMetrics(MetricRegistry registry) {
this.sizeBasedBatchCounter = registry.counter("batching.sizedbasedbatch");
this.timeBasedBatchCounter = registry.counter("batching.timebasedbatch");
this.countBasedBatchCounter = registry.counter("batching.countbasedbatch");
this.accumulationTime = MiscUtils.oneMinuteWindowTimer(registry,"batching.accumulation.time");
}
public void reset(long sizeDispatchThreshold, int countDispatchThreshold, Duration timeDispatchThreshold) {
startTime = System.currentTimeMillis();
scheduleTimeBasedDispatch();
usedCapacity.set(0);
messageIdx.set(0);
for (int i = 0; i < messagesCount.get(); i++) {
messages[i].recycle();
messages[i] = null;
}
messagesCount.set(0);
available.set(true);
this.sizeDispatchThreshold = sizeDispatchThreshold;
if (this.countDispatchThreshold != countDispatchThreshold) {
// reset messages[] size to new countDispatchThreshold
messages = new Message[countDispatchThreshold];
this.countDispatchThreshold = countDispatchThreshold;
}
this.timeDispatchThreshold = timeDispatchThreshold;
synchronized (this) {
dispatching = false;
}
}
protected void clear() {
activeWrites.set(0);
}
protected void scheduleTimeBasedDispatch() {
if (timeDispatchTask != null) {
timeDispatchTask.cancel(true);
}
timeDispatchTask = scheduler.schedule(() -> {
if (!Thread.interrupted()) {
if (System.currentTimeMillis() - startTime >= timeDispatchThreshold.toMillis()) {
// if wasAvailable == true, the payload was sealed due to time threshold, so we should try to dispatch
// if it was false, it means that a write has been initiated and sealed the payload, so the dispatching is on that write
boolean wasAvailable = seal();
if (wasAvailable && isReadyToUpload()) {
tryDispatch(true);
}
}
}
}, timeDispatchThreshold.toMillis(), TimeUnit.MILLISECONDS);
}
public boolean write(WriteRequestPacket writePacket,
long serverRequestId,
long clientRequestId,
short protocolVersion,
ChannelHandlerContext ctx) {
int dataLength = writePacket.getDataLength();
activeWrites.incrementAndGet();
try {
if (!available.get()) {
return false;
}
int usage = usedCapacity.addAndGet(dataLength);
if (usage < sizeDispatchThreshold) {
int idx = messageIdx.getAndIncrement();
if (idx < countDispatchThreshold) {
messages[idx] = Message.newInstance(
writePacket.getData().retainedSlice(),
clientRequestId,
serverRequestId,
ctx,
protocolVersion
);
messagesCount.getAndIncrement();
if (idx == countDispatchThreshold - 1) {
// last message should shut the door and finalize the batch
seal();
}
return true;
}
}
seal();
return false;
} finally {
activeWrites.decrementAndGet();
// if payload is not available
if (!isAvailable() && isReadyToUpload()) {
tryDispatch(false);
}
}
}
public boolean seal() {
return available.getAndSet(false);
}
protected void tryDispatch(boolean isTimeBased) {
if (!dispatching) {
synchronized (this) {
if (!dispatching) {
if (!isTimeBased) {
timeDispatchTask.cancel(true);
}
dispatching = true;
if (isTimeBased) {
timeBasedBatchCounter.inc();
} else if (messagesCount.get() >= countDispatchThreshold) {
countBasedBatchCounter.inc();
} else {
sizeBasedBatchCounter.inc();
}
accumulationTime.update(System.currentTimeMillis() - startTime, TimeUnit.MILLISECONDS);
dispatch(isTimeBased);
}
}
}
}
protected void dispatch(boolean isTimeBased) {
timeDispatchTask.cancel(true);
executor.execute(new DispatchTask(isTimeBased));
}
protected boolean isReadyToUpload() {
return activeWrites.get() == 0;
}
public boolean isAvailable() {
return available.get();
}
protected class DispatchTask implements Runnable {
private Counter uploadBatchCounter;
private Counter uploadBytesCounter;
private Timer uploadLatency;
private Counter outputErrorCounter;
private Counter uploadMessageCounter;
private Counter ackChannelWriteError;
private Timer ackLatency;
private Counter activeParallelTasks;
private Histogram batchSizeBytes;
private final boolean isTimeBased;
private Histogram batchMessageCountHistogram;
public DispatchTask(boolean isTimeBased) {
initializeMetrics();
this.isTimeBased = isTimeBased;
}
private void initializeMetrics() {
this.uploadBatchCounter = registry.counter("output.batchCount");
this.activeParallelTasks = registry.counter("output.activeParallelTasks");
this.uploadMessageCounter = registry.counter("output.messageCount");
this.uploadBytesCounter = registry.counter("output.uploadBytes");
this.outputErrorCounter = registry.counter("output.error");
this.ackChannelWriteError = registry.counter("acker.ackerror");
this.uploadLatency = MiscUtils.oneMinuteWindowTimer(registry,"output.uploadLatency");
this.ackLatency = MiscUtils.oneMinuteWindowTimer(registry,"acker.push.latency");
this.batchSizeBytes = registry.histogram("output.batchSizeBytes", () ->
new Histogram(new SlidingTimeWindowArrayReservoir(1, TimeUnit.MINUTES)));
this.batchMessageCountHistogram = registry.histogram("output.batchMessageCount", () ->
new Histogram(new SlidingTimeWindowArrayReservoir(1, TimeUnit.MINUTES)));
}
@Override
public void run() {
final List<Message> messageList = Arrays.asList(messages).subList(0, messagesCount.get());
if (messageList.isEmpty()) {
clear();
return;
}
short responseCode = ResponseCodes.INTERNAL_SERVER_ERROR;
int sizeInBytes = CoreUtils.batchSizeInBytes(messageList);
activeParallelTasks.inc();
try {
int checksum = CoreUtils.batchChecksum(messageList);
Timer.Context uploadTimer = uploadLatency.time();
handler.writeOutput(sizeInBytes, checksum, messageList);
uploadTimer.stop();
responseCode = ResponseCodes.OK;
updateSuccessMetrics(messageList, sizeInBytes);
} catch (WriteFailedException | IOException e) {
logger.log(Level.SEVERE, "Failed to upload batch: ", e);
responseCode = ResponseCodes.REQUEST_FAILED;
outputErrorCounter.inc();
} finally {
activeParallelTasks.dec();
clearMessageBuffers(messageList);
ackMessages(messageList, responseCode);
clear();
manager.recycle(Batch.this, isTimeBased);
}
}
private void updateSuccessMetrics(List<Message> messageList, int sizeInBytes) {
uploadBytesCounter.inc(sizeInBytes);
uploadMessageCounter.inc(messageList.size());
batchMessageCountHistogram.update(messageList.size());
uploadBatchCounter.inc();
batchSizeBytes.update(sizeInBytes);
}
protected void clearMessageBuffers(List<Message> messages) {
messages.forEach(message -> {
message.getBuf().release();
});
}
protected void ackMessages(List<Message> messages, short responseCode) {
Timer.Context ackTimer = ackLatency.time();
for (Message m : messages) {
ChannelHandlerContext channelRef = m.getPipelineReference();
if (channelRef != null) {
try {
channelRef.writeAndFlush(new ResponsePacket(m.getClientProtocolVersion(),
m.getClientRequestId(), RequestType.WRITE, responseCode,
new WriteResponsePacket()));
} catch (Exception e2) {
ackChannelWriteError.inc();
}
}
}
ackTimer.stop();
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/bucketing/BatchManager.java | memq/src/main/java/com/pinterest/memq/core/processing/bucketing/BatchManager.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing.bucketing;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.core.commons.MemqProcessingThreadFactory;
import com.pinterest.memq.core.utils.MiscUtils;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.Queue;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.zip.CRC32;
import javax.ws.rs.BadRequestException;
public class BatchManager {
private volatile Batch currentBatch;
private final Queue<Batch> recycledBatches;
private final ScheduledExecutorService scheduler;
private final ExecutorService dispatcher;
private volatile Duration timeDispatchThreshold;
private volatile long sizeDispatchThreshold;
private volatile int countDispatchThreshold;
private final StorageHandler handler;
private final MetricRegistry registry;
private static final int PAYLOAD_CACHE_SIZE_LIMIT = 10;
private Histogram payloadRetries;
private Timer payloadWriteTime;
private Timer payloadAcquireTime;
private Timer payloadValidationTime;
private Counter payloadCreation;
public BatchManager(long sizeDispatchThreshold, int countDispatchThreshold,
Duration timeDispatchThreshold,
ScheduledExecutorService scheduler, StorageHandler handler,
int outputParallelism, MetricRegistry registry) {
this.sizeDispatchThreshold = sizeDispatchThreshold;
this.countDispatchThreshold = countDispatchThreshold;
this.timeDispatchThreshold = timeDispatchThreshold;
this.scheduler = scheduler;
this.handler = handler;
this.dispatcher = Executors.newFixedThreadPool(outputParallelism, new MemqProcessingThreadFactory("processing-"));
this.registry = registry;
this.recycledBatches = new ArrayBlockingQueue<>(PAYLOAD_CACHE_SIZE_LIMIT);
initializeMetrics(registry);
}
public boolean reconfigure(long sizeDispatchThreshold, int countDispatchThreshold, Duration timeDispatchThreshold) {
if (sizeDispatchThreshold != this.sizeDispatchThreshold) {
this.sizeDispatchThreshold = sizeDispatchThreshold;
}
if (countDispatchThreshold != this.countDispatchThreshold) {
this.countDispatchThreshold = countDispatchThreshold;
}
if (!timeDispatchThreshold.equals(this.timeDispatchThreshold)) {
this.timeDispatchThreshold = timeDispatchThreshold;
}
// the batches will be updated during batch.reset(sizeDispatchThreshold, countDispatchThreshold, timeDispatchThreshold)
return true;
}
protected void initializeMetrics(MetricRegistry registry) {
this.payloadRetries = registry.histogram("batching.payload.retries");
registry.gauge("batching.payload.cache.size", () ->
(Gauge<Integer>) recycledBatches::size
);
this.payloadCreation = registry.counter("batching.payload.creation");
this.payloadWriteTime = MiscUtils.oneMinuteWindowTimer(registry,"batching.payload.write");
this.payloadAcquireTime = MiscUtils.oneMinuteWindowTimer(registry, "batching.payload.acquire");
this.payloadValidationTime = MiscUtils.oneMinuteWindowTimer(registry, "batching.payload.validate");
}
public void write(WriteRequestPacket writePacket,
long serverRequestId,
long clientRequestId,
short protocolVersion,
ChannelHandlerContext ctx) {
if (writePacket.isChecksumExists()) {
Timer.Context payloadValidationTimer = payloadValidationTime.time();
try {
validateChecksumAndRejectMessage(writePacket.getData().slice(), writePacket.getChecksum());
} catch (Exception e) {
throw new BadRequestException(clientRequestId + " : " + e.getMessage());
} finally {
payloadValidationTimer.stop();
}
}
int retries = 0;
Batch batch = getAvailablePayload();
Timer.Context payloadWriteTimeTimer = payloadWriteTime.time();
try {
while (batch != null) {
if(batch.write(writePacket, serverRequestId, clientRequestId, protocolVersion, ctx)) {
payloadRetries.update(retries);
return;
} else {
batch = getAvailablePayload();
retries++;
}
}
throw new BadRequestException(
"Failed to write message " + clientRequestId + " : no available payload"
);
} finally {
payloadWriteTimeTimer.stop();
}
}
protected Batch getAvailablePayload() {
Timer.Context acquirePayloadTimeTimer = payloadAcquireTime.time();
try {
if (currentBatch == null || !currentBatch.isAvailable()) {
synchronized (this) {
if (currentBatch == null || !currentBatch.isAvailable()) {
Batch batch = recycledBatches.poll();
if (batch == null) {
batch = new Batch(
this,
countDispatchThreshold,
sizeDispatchThreshold,
timeDispatchThreshold,
scheduler,
dispatcher,
handler,
registry
);
payloadCreation.inc();
}
batch.reset(sizeDispatchThreshold, countDispatchThreshold, timeDispatchThreshold); // reset thresholds in case configs are updated
currentBatch = batch;
}
return currentBatch;
}
}
return currentBatch;
} finally {
acquirePayloadTimeTimer.stop();
}
}
public void recycle(Batch p, boolean isTimeBased) {
recycledBatches.offer(p);
}
private void validateChecksumAndRejectMessage(ByteBuf checksumBuffer,
int payloadChecksum) throws Exception {
ByteBuffer byteBuffer = checksumBuffer.nioBuffer();
CRC32 crc32 = new CRC32();
crc32.update(byteBuffer);
long localChecksum = (int) crc32.getValue();
if (localChecksum != payloadChecksum) {
throw new Exception(
"Invalid checksum - header: " + payloadChecksum + " payload: " + localChecksum);
}
}
public void stopNow() {
dispatcher.shutdownNow();
}
public void stop() throws InterruptedException {
dispatcher.shutdown();
dispatcher.awaitTermination(100, TimeUnit.SECONDS);
handler.closeWriter();
}
public void forceDispatch() {
currentBatch.seal();
currentBatch.dispatch(false);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/processing/bucketing/BucketingTopicProcessor.java | memq/src/main/java/com/pinterest/memq/core/processing/bucketing/BucketingTopicProcessor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.processing.bucketing;
import java.io.File;
import java.io.IOException;
import java.time.Duration;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
import javax.ws.rs.BadRequestException;
import javax.ws.rs.InternalServerErrorException;
import javax.ws.rs.core.Response;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.codahale.metrics.SlidingTimeWindowArrayReservoir;
import com.codahale.metrics.Timer;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons2.DataNotFoundException;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.protocol.ReadRequestPacket;
import com.pinterest.memq.commons.protocol.ReadResponsePacket;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.core.processing.Ackable;
import com.pinterest.memq.core.processing.TopicProcessor;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.DefaultFileRegion;
import io.netty.channel.group.ChannelGroup;
import io.netty.channel.group.DefaultChannelGroup;
import io.netty.util.concurrent.GlobalEventExecutor;
public class BucketingTopicProcessor extends TopicProcessor {
private static final BatchData EMPTY_BATCH_DATA = new BatchData();
private static final Logger logger = Logger.getLogger(BucketingTopicProcessor.class.getName());
private final BatchManager batchManager;
private final AtomicLong serverRequestIdGenerator = new AtomicLong(
ThreadLocalRandom.current().nextLong());
private final ScheduledReporter reporter;
private final String topicName;
private final StorageHandler storageHandler;
private volatile long sizeDispatchThreshold;
private volatile boolean enableHeaderValidation;
private ChannelGroup channelGroup;
private Histogram messageSizeHistogram;
private Counter invalidHeaderTooLargeCounter;
private Counter invalidHeaderNegativeCounter;
private Counter invalidHeaderExceptionCounter;
private Counter emptyDataCounter;
public BucketingTopicProcessor(MetricRegistry registry,
TopicConfig topicConfig,
StorageHandler storageHandler,
ScheduledExecutorService timerService,
ScheduledReporter reporter) {
this.sizeDispatchThreshold = topicConfig.getBatchSizeBytes();
this.enableHeaderValidation = topicConfig.isEnableServerHeaderValidation();
this.reporter = reporter;
this.topicName = topicConfig.getTopic();
this.storageHandler = storageHandler;
this.channelGroup = new DefaultChannelGroup(topicName, GlobalEventExecutor.INSTANCE);
this.batchManager = new BatchManager(sizeDispatchThreshold, topicConfig.getMaxDispatchCount(),
Duration.ofMillis(topicConfig.getBatchMilliSeconds()), timerService, storageHandler,
topicConfig.getOutputParallelism(), registry);
initializeMetrics(registry);
}
@Override
public boolean reconfigure(TopicConfig topicConfig) {
long newSizeDispatchThreshold = topicConfig.getBatchSizeBytes();
if (newSizeDispatchThreshold != sizeDispatchThreshold) {
sizeDispatchThreshold = newSizeDispatchThreshold;
}
if (topicConfig.isEnableServerHeaderValidation() != enableHeaderValidation) {
enableHeaderValidation = topicConfig.isEnableServerHeaderValidation();
}
batchManager.reconfigure(sizeDispatchThreshold, topicConfig.getMaxDispatchCount(),
Duration.ofMillis(topicConfig.getBatchMilliSeconds()));
storageHandler.reconfigure(topicConfig.getStorageHandlerConfig());
return true;
}
@Override
public long write(RequestPacket basePacket,
WriteRequestPacket writePacket,
ChannelHandlerContext ctx) {
if (writePacket.isDisableAcks()) {
// send an OK to producer even if ack is disabled
ctx.writeAndFlush(new ResponsePacket(basePacket.getProtocolVersion(),
basePacket.getClientRequestId(), basePacket.getRequestType(), ResponseCodes.OK,
new WriteResponsePacket()));
// context no longer needed during the write, set it to null so acks won't be sent
ctx = null;
}
messageSizeHistogram.update(writePacket.getDataLength());
if (writePacket.getDataLength() > sizeDispatchThreshold) {
writeRejectCounter.inc();
throw new BadRequestException(
"Payload too big for " + basePacket.getClientRequestId() + " " + sizeDispatchThreshold);
}
if (enableHeaderValidation) {
validateHeader(ctx, basePacket, writePacket);
}
long serverRequestId = serverRequestIdGenerator.getAndIncrement();
writeCounter.inc();
if (writePacket.getDataLength() == MemqMessageHeader.getHeaderLength()) {
// empty data, immediately respond without writing anything
emptyDataCounter.inc();
if (ctx != null) {
ctx.writeAndFlush(
new ResponsePacket(basePacket.getProtocolVersion(), basePacket.getClientRequestId(),
basePacket.getRequestType(), ResponseCodes.OK, new WriteResponsePacket()));
}
return serverRequestId;
}
Timer.Context totalWriteLatencyTimer = totalWriteLatency.time();
batchManager.write(writePacket, serverRequestId, basePacket.getClientRequestId(),
basePacket.getProtocolVersion(), ctx);
totalWriteLatencyTimer.stop();
return serverRequestId;
}
@Override
public void stopNow() {
batchManager.stopNow();
}
@Override
public void stopAndAwait() throws InterruptedException {
if (reporter != null) {
reporter.close();
}
batchManager.stop();
}
protected void forceDispatch() {
batchManager.forceDispatch();
}
@Override
public Ackable getAcker() {
return null;
}
@Override
public int getRemaining() {
return 0;
}
@Override
public float getAvailableCapacity() {
return 0;
}
@Override
public TopicConfig getTopicConfig() {
return null;
}
@Override
protected void initializeMetrics(MetricRegistry registry) {
super.initializeMetrics(registry);
messageSizeHistogram = registry.histogram("tp.message.size",
() -> new Histogram(new SlidingTimeWindowArrayReservoir(1, TimeUnit.MINUTES)));
invalidHeaderTooLargeCounter = registry
.counter("tp.message.invalid.header.message_length_too_large");
invalidHeaderNegativeCounter = registry
.counter("tp.message.invalid.header.message_length_negative");
invalidHeaderExceptionCounter = registry.counter("tp.message.invalid.header.exception");
emptyDataCounter = registry.counter("tp.message.empty.data");
registry.gauge("tp.channel.group.size", () -> channelGroup::size);
}
protected void validateHeader(ChannelHandlerContext ctx,
RequestPacket basePacket,
WriteRequestPacket writePacket) {
MemqMessageHeader header;
try {
header = new MemqMessageHeader(writePacket.getData().slice());
} catch (Exception e) {
logger.log(
Level.SEVERE, "Failed to parse message header from: " + getRemoteAddressFromCtx(ctx)
+ ", topic: " + topicName + ", clientRequestId: " + basePacket.getClientRequestId(),
e);
invalidHeaderExceptionCounter.inc();
return;
}
if (header.getMessageLength() > sizeDispatchThreshold) {
logger.severe("Received message with invalid header message length: "
+ header.getMessageLength() + " from " + getRemoteAddressFromCtx(ctx) + ", topic: "
+ topicName + ", clientRequestId: " + basePacket.getClientRequestId());
invalidHeaderTooLargeCounter.inc();
} else if (header.getMessageLength() < 0) {
logger.severe("Received message with invalid header message length: "
+ header.getMessageLength() + " from " + getRemoteAddressFromCtx(ctx) + ", topic: "
+ topicName + ", clientRequestId: " + basePacket.getClientRequestId());
invalidHeaderNegativeCounter.inc();
}
}
private String getRemoteAddressFromCtx(ChannelHandlerContext ctx) {
return (ctx != null && ctx.channel() != null) ? ctx.channel().remoteAddress().toString()
: "n/a";
}
@Override
public void read(RequestPacket requestPacket,
ReadRequestPacket readPacket,
ChannelHandlerContext ctx) {
try {
BatchData data = null;
if (readPacket.isReadHeaderOnly()) {
data = storageHandler.fetchHeaderForBatchBuf(readPacket.getNotification());
} else if (readPacket.getReadIndex().getOffset() == ReadRequestPacket.DISABLE_READ_AT_INDEX) {
data = storageHandler.fetchBatchStreamForNotificationBuf(readPacket.getNotification());
} else if (readPacket.getReadIndex().getOffset() > ReadRequestPacket.DISABLE_READ_AT_INDEX) {
data = storageHandler.fetchMessageAtIndexBuf(readPacket.getNotification(),
readPacket.getReadIndex());
} else {
// unknown request type
throw new BadRequestException("Invalid read request condition");
}
if (data.getDataAsBuf() != null) {
// if send file is disabled then data will be sent in read packets
ctx.writeAndFlush(
new ResponsePacket(RequestType.PROTOCOL_VERSION, requestPacket.getClientRequestId(),
requestPacket.getRequestType(), ResponseCodes.OK, new ReadResponsePacket(data)));
} else {
ctx.write(
new ResponsePacket(RequestType.PROTOCOL_VERSION, requestPacket.getClientRequestId(),
requestPacket.getRequestType(), ResponseCodes.OK, new ReadResponsePacket(data)));
File file = (File) data.getSendFileRef();
ctx.writeAndFlush(new DefaultFileRegion(file, 0, file.length()));
}
logger.fine(() -> "Read completed for topic:" + readPacket.getTopicName() + " "
+ readPacket.getNotification());
} catch (DataNotFoundException e) {
logger.fine("Data not found");
ctx.writeAndFlush(new ResponsePacket(RequestType.PROTOCOL_VERSION,
requestPacket.getClientRequestId(), requestPacket.getRequestType(), ResponseCodes.NO_DATA,
new ReadResponsePacket(EMPTY_BATCH_DATA)));
} catch (IOException e) {
throw new InternalServerErrorException(Response.serverError().build(), e);
}
}
@Override
public void registerChannel(Channel channel) {
channelGroup.add(channel);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/rpc/MemqNettyServer.java | memq/src/main/java/com/pinterest/memq/core/rpc/MemqNettyServer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.rpc;
import java.net.UnknownHostException;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManagerFactory;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.pinterest.memq.commons.config.SSLConfig;
import com.pinterest.memq.commons.mon.OpenTSDBClient;
import com.pinterest.memq.commons.mon.OpenTSDBReporter;
import com.pinterest.memq.core.MemqManager;
import com.pinterest.memq.core.clustering.MemqGovernor;
import com.pinterest.memq.core.config.AuthorizerConfig;
import com.pinterest.memq.core.config.MemqConfig;
import com.pinterest.memq.core.config.NettyServerConfig;
import com.pinterest.memq.core.security.Authorizer;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MemqUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PoolArenaMetric;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.ssl.ClientAuth;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.timeout.IdleStateHandler;
public class MemqNettyServer {
public static final String SSL_HANDLER_NAME = "ssl";
private static final Logger logger = Logger.getLogger(MemqNettyServer.class.getName());
// Include the metric names that are dependent on the Netty server but are not initialized in this class.
private static final List<String> DEPENDENCY_METRIC_NAMES = Arrays.asList(
BrokerTrafficShapingHandler.READ_LIMIT_METRIC_NAME
);
private EventLoopGroup childGroup;
private EventLoopGroup parentGroup;
private ChannelFuture serverChannelFuture;
private MemqConfig configuration;
private MemqManager memqManager;
private Map<String, MetricRegistry> metricsRegistryMap;
private OpenTSDBClient client = null;
private boolean useEpoll;
private MemqGovernor memqGovernor;
public MemqNettyServer(MemqConfig configuration,
MemqManager memqManager,
MemqGovernor governor,
Map<String, MetricRegistry> metricsRegistryMap,
OpenTSDBClient client) {
this.configuration = configuration;
this.memqManager = memqManager;
this.memqGovernor = governor;
this.metricsRegistryMap = metricsRegistryMap;
this.client = client;
}
public void initialize() throws Exception {
MetricRegistry registry = initializeMetrics();
Authorizer authorizer = enableAuthenticationAuthorizationAuditing(configuration);
NettyServerConfig nettyServerConfig = configuration.getNettyServerConfig();
this.useEpoll = Epoll.isAvailable() && nettyServerConfig.isEnableEpoll();
childGroup = getEventLoopGroup(nettyServerConfig.getNumEventLoopThreads());
// there can only be maximum of 1 acceptor threads
parentGroup = getEventLoopGroup(1);
logger.info("Starting Netty Server with epoll:" + useEpoll);
try {
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(parentGroup, childGroup);
boolean isTrafficShapingEnabled = nettyServerConfig.getMaxBrokerInputTrafficMbPerSec() != -1;
long readLimit = 0;
long checkIntervalMs = nettyServerConfig.getBrokerInputTrafficShapingCheckIntervalMs();
if (isTrafficShapingEnabled) {
readLimit = nettyServerConfig.getMaxBrokerInputTrafficMbPerSec() * 1024 * 1024;
logger.info(String.format(
"Broker traffic shaping is enabled with read limit: %d MB/s and check interval: %d ms",
nettyServerConfig.getMaxBrokerInputTrafficMbPerSec(),
checkIntervalMs
));
}
BrokerTrafficShapingHandler trafficShapingHandler = new BrokerTrafficShapingHandler(
childGroup,
0,
readLimit,
checkIntervalMs,
registry
);
if (isTrafficShapingEnabled) {
trafficShapingHandler.setMetricsReportingIntervalSec(
nettyServerConfig.getBrokerInputTrafficShapingMetricsReportIntervalSec());
trafficShapingHandler.startPeriodicMetricsReporting(childGroup);
}
if (useEpoll) {
serverBootstrap.channel(EpollServerSocketChannel.class);
} else {
serverBootstrap.channel(NioServerSocketChannel.class);
}
serverBootstrap.localAddress(nettyServerConfig.getPort());
serverBootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
protected void initChannel(SocketChannel channel) throws Exception {
SSLConfig sslConfig = nettyServerConfig.getSslConfig();
ChannelPipeline pipeline = channel.pipeline();
int idleTimeoutSec = ThreadLocalRandom.current().nextInt(
configuration.getServerConnectionIdleTimeoutDeltaSec())
+ configuration.getServerConnectionIdleTimeoutSec();
pipeline.addLast(new IdleStateHandler(0, 0, idleTimeoutSec, TimeUnit.SECONDS));
pipeline.addLast(new ServerConnectionLifecycleHandler());
if (isTrafficShapingEnabled) {
pipeline.addLast(trafficShapingHandler);
logger.info("Attach traffic shaping handler to channel: " + channel.id().asShortText());
}
if (sslConfig != null) {
KeyManagerFactory kmf = MemqUtils.extractKMFFromSSLConfig(sslConfig);
TrustManagerFactory tmf = MemqUtils.extractTMPFromSSLConfig(sslConfig);
SslContext ctx = SslContextBuilder.forServer(kmf).clientAuth(ClientAuth.REQUIRE)
.trustManager(tmf).protocols(sslConfig.getProtocols()).build();
SslHandler sslHandler = ctx.newHandler(channel.alloc());
pipeline.addLast(SSL_HANDLER_NAME, sslHandler);
}
pipeline.addLast(new LengthFieldBasedFrameDecoder(ByteOrder.BIG_ENDIAN,
nettyServerConfig.getMaxFrameByteLength(), 0, Integer.BYTES, 0, 0, false));
pipeline.addLast(new MemqResponseEncoder(registry));
pipeline.addLast(new MemqRequestDecoder(memqManager, memqGovernor, authorizer, registry));
}
});
serverChannelFuture = serverBootstrap.bind().sync();
} catch (Exception e) {
logger.log(Level.SEVERE, "Failed to start Netty server", e);
}
logger.info("\n\nNetty server has started on port:" + nettyServerConfig.getPort() + "\n");
}
private MetricRegistry initializeMetrics() throws UnknownHostException {
MetricRegistry registry = new MetricRegistry();
metricsRegistryMap.put("_netty", registry);
registry.gauge("unpooled.direct.used.bytes",
() -> (Gauge<Long>) () -> UnpooledByteBufAllocator.DEFAULT.metric().usedDirectMemory());
registry.gauge("unpooled.heap.used.bytes",
() -> (Gauge<Long>) () -> UnpooledByteBufAllocator.DEFAULT.metric().usedHeapMemory());
registry.gauge("pooled.direct.used.bytes",
() -> (Gauge<Long>) () -> PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory());
registry.gauge("pooled.heap.used.bytes",
() -> (Gauge<Long>) () -> PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory());
registry.gauge("pooled.direct.arenas",
() -> (Gauge<Integer>) () -> PooledByteBufAllocator.DEFAULT.metric().numDirectArenas());
registry.gauge("pooled.direct.total.active.allocation.count",
() -> (Gauge<Long>) () -> PooledByteBufAllocator.DEFAULT.metric().directArenas().stream()
.mapToLong(PoolArenaMetric::numActiveAllocations).sum());
registry.gauge("pooled.direct.total.active.allocation.bytes",
() -> (Gauge<Long>) () -> PooledByteBufAllocator.DEFAULT.metric().directArenas().stream()
.mapToLong(PoolArenaMetric::numActiveBytes).sum());
if (client != null) {
String localHostname = MiscUtils.getHostname();
List<String> metricsNames = new ArrayList<>(registry.getNames());
metricsNames.addAll(DEPENDENCY_METRIC_NAMES);
for (String metricName : metricsNames) {
ScheduledReporter reporter = OpenTSDBReporter.createReporter("netty", registry, metricName,
(String name, Metric metric) -> true, TimeUnit.SECONDS, TimeUnit.SECONDS, client,
localHostname);
reporter.start(configuration.getOpenTsdbConfig().getFrequencyInSeconds(), TimeUnit.SECONDS);
}
}
return registry;
}
private Authorizer enableAuthenticationAuthorizationAuditing(MemqConfig configuration) throws Exception {
AuthorizerConfig authorizerConfig = configuration.getAuthorizerConfig();
if (authorizerConfig != null) {
Authorizer authorizer = authorizerConfig.getClass().asSubclass(Authorizer.class)
.newInstance();
authorizer.init(authorizerConfig);
return authorizer;
}
return null;
}
private EventLoopGroup getEventLoopGroup(int nThreads) {
if (useEpoll) {
logger.info("Epoll is available and will be used");
return new EpollEventLoopGroup(nThreads, new DaemonThreadFactory());
} else {
return new NioEventLoopGroup(nThreads, new DaemonThreadFactory());
}
}
public EventLoopGroup getChildGroup() {
return childGroup;
}
public EventLoopGroup getParentGroup() {
return parentGroup;
}
public ChannelFuture getServerChannelFuture() {
return serverChannelFuture;
}
public void stop() {
serverChannelFuture.channel().close();
if (serverChannelFuture.channel().parent() != null) {
serverChannelFuture.channel().parent().close();
}
}
public MemqGovernor getMemqGovernor() {
return memqGovernor;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq/src/main/java/com/pinterest/memq/core/rpc/WriteResponse.java | memq/src/main/java/com/pinterest/memq/core/rpc/WriteResponse.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.rpc;
import com.pinterest.memq.commons.protocol.RequestType;
public class WriteResponse extends Response {
public WriteResponse(long requestId, short responseCode) {
super(requestId, responseCode);
}
@Override
public RequestType getRequestType() {
return RequestType.WRITE;
}
@Override
public byte[] getSerializedBytes() {
return null;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.