repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
andrewauclair/ModernDocking | https://github.com/andrewauclair/ModernDocking/blob/cb69cbcf9bad74d73b1e32de5f813e86872d20e6/docking-single-app/src/io/github/andrewauclair/moderndocking/app/LayoutPersistence.java | docking-single-app/src/io/github/andrewauclair/moderndocking/app/LayoutPersistence.java | /*
Copyright (c) 2023 Andrew Auclair
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package io.github.andrewauclair.moderndocking.app;
import io.github.andrewauclair.moderndocking.api.LayoutPersistenceAPI;
import io.github.andrewauclair.moderndocking.exception.DockingLayoutException;
import io.github.andrewauclair.moderndocking.layouts.ApplicationLayout;
import io.github.andrewauclair.moderndocking.layouts.WindowLayout;
import java.io.File;
/**
* Persist and restore Application and Window layouts to/from files
*/
public class LayoutPersistence {
private static final LayoutPersistenceAPI instance = new LayoutPersistenceAPI(Docking.getSingleInstance()){};
private LayoutPersistence() {
}
/**
* saves a docking layout to the given file
*
* @param file File to save the docking layout into
* @param layout The layout to save
* @throws DockingLayoutException Thrown if we failed to save the layout to the file
*/
public static void saveLayoutToFile(File file, ApplicationLayout layout) throws DockingLayoutException {
instance.saveLayoutToFile(file, layout);
}
/**
* Load an ApplicationLayout from the specified file
*
* @param file File to load the ApplicationLayout from
* @return ApplicationLayout loaded from the file
* @throws DockingLayoutException Thrown if we failed to read from the file or something went wrong with loading the layout
*/
public static ApplicationLayout loadApplicationLayoutFromFile(File file) throws DockingLayoutException {
return instance.loadApplicationLayoutFromFile(file);
}
public static boolean saveWindowLayoutToFile(File file, WindowLayout layout) {
return instance.saveWindowLayoutToFile(file, layout);
}
/**
* Load a WindowLayout from an XML file
*
* @param file File to load WindowLayout from
* @return The loaded WindowLayout
*/
public static WindowLayout loadWindowLayoutFromFile(File file) {
return instance.loadWindowLayoutFromFile(file);
}
}
| java | MIT | cb69cbcf9bad74d73b1e32de5f813e86872d20e6 | 2026-01-05T02:41:37.174237Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/test/java/com/stephan/tof/jmxmon/bean/JVMContextTest.java | src/test/java/com/stephan/tof/jmxmon/bean/JVMContextTest.java | package com.stephan.tof.jmxmon.bean;
import static org.assertj.core.api.Assertions.assertThat;
import java.io.File;
import java.io.IOException;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class JVMContextTest {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
}
@Before
public void setUp() throws Exception {
}
@Test
public void testJsonTransfer() throws IOException {
JVMContext context1 = new JVMContext();
context1.getJvmData(1111).getGcData("1a").setCollectionCount(11);
context1.getJvmData(1111).getGcData("1b").setCollectionCount(12);
context1.getJvmData(2222).getGcData("2a").setCollectionCount(21);
context1.getJvmData(2222).getGcData("2b").setCollectionCount(22);
context1.getJvmData(2222).getGcData("2c").setCollectionCount(23);
File f = new File("test.jvmcontext.json");
JacksonUtil.writeBeanToFile(f, context1, true);
System.out.println("json file path=" + f.getAbsolutePath());
JVMContext context2 = JacksonUtil.readBeanFromFile(f, JVMContext.class);
assertThat(context2.getJvmData(1111).getGcData("1b").getCollectionCount()).isEqualTo(12);
assertThat(context2.getJvmData(2222).getGcData("2a").getCollectionCount()).isEqualTo(21);
assertThat(context2.getJvmData(2222).getGcData("2c").getCollectionCount()).isEqualTo(23);
assertThat(context2.getJvmData(2222).getGcData("2a").getCollectionTime()).isEqualTo(0);
assertThat(context2.getJvmData(2222).getGcData("2b").getCollectionTime()).isEqualTo(0);
assertThat(context2.getJvmData(2222).getGcData("2c").getCollectionTime()).isEqualTo(0);
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/test/java/com/stephan/tof/jmxmon/bean/FalconPostDataTest.java | src/test/java/com/stephan/tof/jmxmon/bean/FalconPostDataTest.java | package com.stephan.tof.jmxmon.bean;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.tuple;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.stephan.tof.jmxmon.Constants.CounterType;
public class FalconPostDataTest {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
}
@Before
public void setUp() throws Exception {
}
@Test
public void testJsonTransfer() throws IOException {
FalconItem item1 = new FalconItem("m1", "ep1",
System.currentTimeMillis() / 1000, 60, 123,
CounterType.COUNTER.toString(), "tag1=a,tag2=b");
FalconItem item2 = new FalconItem("m2", "ep2",
System.currentTimeMillis() / 1000, 60, 156789.123456F,
CounterType.GAUGE.toString(), "tag3=a,tag4=b");
List<FalconItem> writeItems = new ArrayList<FalconItem>();
writeItems.add(item1);
writeItems.add(item2);
File f = new File("test.falconPostData.json");
JacksonUtil.writeBeanToFile(f, writeItems, true);
System.out.println("json file path=" + f.getAbsolutePath());
@SuppressWarnings("unchecked")
List<FalconItem> readItems = JacksonUtil
.readBeanFromFile(f, List.class);
assertThat(readItems).hasSize(2);
assertThat(readItems).extracting(
"metric", "endpoint", "step", "value", "counterType", "tags").containsExactly(
tuple("m1", "ep1", 60, 123D, CounterType.COUNTER.toString(), "tag1=a,tag2=b"),
tuple("m2", "ep2", 60, 156789.12D, CounterType.GAUGE.toString(), "tag3=a,tag4=b"));
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JVMGCGenInfoExtractor.java | src/main/java/com/stephan/tof/jmxmon/JVMGCGenInfoExtractor.java | package com.stephan.tof.jmxmon;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import com.stephan.tof.jmxmon.Constants.CounterType;
import com.stephan.tof.jmxmon.JVMGCGenInfoExtractor.GCGenInfo;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.bean.GCData;
import com.stephan.tof.jmxmon.bean.JVMContext;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public class JVMGCGenInfoExtractor extends JVMDataExtractor<Map<String, GCGenInfo>> {
public JVMGCGenInfoExtractor(ProxyClient proxyClient, int jmxPort) throws IOException {
super(proxyClient, jmxPort);
}
/**
* 获取时间窗口内,每代GC的平均耗时 </br>
* 返回值:gcMXBean name -> avgGCTime
*/
@Override
public Map<String, GCGenInfo> call() throws Exception {
Map<String, GCGenInfo> result = new HashMap<String, GCGenInfo>();
JVMContext c = Config.I.getJvmContext();
for (GarbageCollectorMXBean gcMXBean : getGcMXBeanList()) {
long gcTotalTime = gcMXBean.getCollectionTime();
long gcTotalCount = gcMXBean.getCollectionCount();
GCData gcData = c.getJvmData(getJmxPort()).getGcData(gcMXBean.getName());
long lastGCTotalTime = gcData.getCollectionTime();
long lastGCTotalCount = gcData.getCollectionCount();
long tmpGCTime = gcTotalTime - lastGCTotalTime;
long gcCount = gcTotalCount - lastGCTotalCount;
if (lastGCTotalCount <= 0 || gcCount < 0) {
gcCount = -1;
}
double avgGCTime = gcCount > 0 ? tmpGCTime / gcCount : 0;
GCGenInfo gcGenInfo = new GCGenInfo(avgGCTime, gcCount);
result.put(gcMXBean.getName(), gcGenInfo);
logger.debug("mxbean=" + gcMXBean.getName() +
", gcTotalTime=" + gcTotalTime + ", gcTotalCount=" + gcTotalCount +
", lastGCTotalTime=" + lastGCTotalTime + ", lastGCTotalCount=" + lastGCTotalCount +
", avgGCTime=" + avgGCTime + ", gcCount=" + gcCount);
// update last data
gcData.setCollectionTime(gcTotalTime);
gcData.setCollectionCount(gcTotalCount);
gcData.setUnitTimeCollectionCount(gcCount);
}
return result;
}
@Override
public List<FalconItem> build(Map<String, GCGenInfo> jmxResultData)
throws Exception {
List<FalconItem> items = new ArrayList<FalconItem>();
// 将jvm信息封装成openfalcon格式数据
for (String gcMXBeanName : jmxResultData.keySet()) {
FalconItem avgTimeItem = new FalconItem();
avgTimeItem.setCounterType(CounterType.GAUGE.toString());
avgTimeItem.setEndpoint(Config.I.getHostname());
avgTimeItem.setMetric(StringUtils.lowerCase(gcMXBeanName + Constants.metricSeparator + Constants.gcAvgTime));
avgTimeItem.setStep(Constants.defaultStep);
avgTimeItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
avgTimeItem.setTimestamp(System.currentTimeMillis() / 1000);
avgTimeItem.setValue(jmxResultData.get(gcMXBeanName).getGcAvgTime());
items.add(avgTimeItem);
FalconItem countItem = new FalconItem();
countItem.setCounterType(CounterType.GAUGE.toString());
countItem.setEndpoint(Config.I.getHostname());
countItem.setMetric(StringUtils.lowerCase(gcMXBeanName + Constants.metricSeparator + Constants.gcCount));
countItem.setStep(Constants.defaultStep);
countItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
countItem.setTimestamp(System.currentTimeMillis() / 1000);
countItem.setValue(jmxResultData.get(gcMXBeanName).getGcCount());
items.add(countItem);
}
return items;
}
class GCGenInfo {
private final double gcAvgTime;
private final long gcCount;
public GCGenInfo(double gcAvgTime, long gcCount) {
this.gcAvgTime = gcAvgTime;
this.gcCount = gcCount;
}
/**
* @return the gcAvgTime
*/
public double getGcAvgTime() {
return gcAvgTime;
}
/**
* @return the gcCount
*/
public long getGcCount() {
return gcCount;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JVMDataExtractor.java | src/main/java/com/stephan/tof/jmxmon/JVMDataExtractor.java | package com.stephan.tof.jmxmon;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.RuntimeMXBean;
import java.lang.management.ThreadMXBean;
import java.util.Collection;
import com.stephan.tof.jmxmon.jmxutil.MemoryPoolProxy;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public abstract class JVMDataExtractor<T> extends JMXCall<T> {
private final Collection<GarbageCollectorMXBean> gcMXBeanList;
private final RuntimeMXBean runtimeMXBean;
private final Collection<MemoryPoolProxy> memoryPoolList;
private final ThreadMXBean threadMXBean;
public JVMDataExtractor(ProxyClient proxyClient, int jmxPort) throws IOException {
super(proxyClient, jmxPort);
gcMXBeanList = proxyClient.getGarbageCollectorMXBeans();
runtimeMXBean = proxyClient.getRuntimeMXBean();
memoryPoolList = proxyClient.getMemoryPoolProxies();
threadMXBean = proxyClient.getThreadMXBean();
}
/**
* @return the gcMXBeanList
*/
public Collection<GarbageCollectorMXBean> getGcMXBeanList() {
return gcMXBeanList;
}
/**
* @return the runtimeMXBean
*/
public RuntimeMXBean getRuntimeMXBean() {
return runtimeMXBean;
}
/**
* @return the memoryPool
*/
public Collection<MemoryPoolProxy> getMemoryPoolList() {
return memoryPoolList;
}
/**
* @return the threadMXBean
*/
public ThreadMXBean getThreadMXBean() {
return threadMXBean;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JMXMonitor.java | src/main/java/com/stephan/tof/jmxmon/JMXMonitor.java | package com.stephan.tof.jmxmon;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.stephan.tof.jmxmon.HttpClientUtils.HttpResult;
import com.stephan.tof.jmxmon.JVMGCGenInfoExtractor.GCGenInfo;
import com.stephan.tof.jmxmon.JVMMemoryUsedExtractor.MemoryUsedInfo;
import com.stephan.tof.jmxmon.JVMThreadExtractor.ThreadInfo;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.bean.JacksonUtil;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public class JMXMonitor {
private static Logger logger = LoggerFactory.getLogger(JMXMonitor.class);
public static void main(String[] args) {
if (args.length != 1) {
throw new IllegalArgumentException("Usage: configFile");
}
try {
Config.I.init(args[0]);
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new IllegalStateException(e); // 抛出异常便于外部脚本感知
}
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
runTask();
}
}, 0, Config.I.getStep(), TimeUnit.SECONDS);
}
/**
*
*/
private static void runTask() {
try {
List<FalconItem> items = new ArrayList<FalconItem>();
for (int jmxPort : Config.I.getJmxPorts()) {
// 从JMX中获取JVM信息
ProxyClient proxyClient = null;
try {
proxyClient = ProxyClient.getProxyClient(Config.I.getJmxHost(), jmxPort, null, null);
proxyClient.connect();
JMXCall<Map<String, GCGenInfo>> gcGenInfoExtractor = new JVMGCGenInfoExtractor(proxyClient, jmxPort);
Map<String, GCGenInfo> genInfoMap = gcGenInfoExtractor.call();
items.addAll(gcGenInfoExtractor.build(genInfoMap));
JMXCall<Double> gcThroughputExtractor = new JVMGCThroughputExtractor(proxyClient, jmxPort);
Double gcThroughput = gcThroughputExtractor.call();
items.addAll(gcThroughputExtractor.build(gcThroughput));
JMXCall<MemoryUsedInfo> memoryUsedExtractor = new JVMMemoryUsedExtractor(proxyClient, jmxPort);
MemoryUsedInfo memoryUsedInfo = memoryUsedExtractor.call();
items.addAll(memoryUsedExtractor.build(memoryUsedInfo));
JMXCall<ThreadInfo> threadExtractor = new JVMThreadExtractor(proxyClient, jmxPort);
ThreadInfo threadInfo = threadExtractor.call();
items.addAll(threadExtractor.build(threadInfo));
} finally {
if (proxyClient != null) {
proxyClient.disconnect();
}
}
}
// 发送items给Openfalcon agent
String content = JacksonUtil.writeBeanToString(items, false);
HttpResult postResult = HttpClientUtils.getInstance().post(Config.I.getAgentPostUrl(), content);
logger.info("post status=" + postResult.getStatusCode() +
", post url=" + Config.I.getAgentPostUrl() + ", content=" + content);
if (postResult.getStatusCode() != HttpClientUtils.okStatusCode ||
postResult.getT() != null) {
throw postResult.getT();
}
// 将context数据回写文件
Config.I.flush();
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JVMMemoryUsedExtractor.java | src/main/java/com/stephan/tof/jmxmon/JVMMemoryUsedExtractor.java | package com.stephan.tof.jmxmon;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import com.stephan.tof.jmxmon.Constants.CounterType;
import com.stephan.tof.jmxmon.JVMMemoryUsedExtractor.MemoryUsedInfo;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.bean.GCData;
import com.stephan.tof.jmxmon.bean.JVMContext;
import com.stephan.tof.jmxmon.jmxutil.MemoryPoolProxy;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public class JVMMemoryUsedExtractor extends JVMDataExtractor<MemoryUsedInfo> {
public JVMMemoryUsedExtractor(ProxyClient proxyClient, int jmxPort)
throws IOException {
super(proxyClient, jmxPort);
}
@Override
public MemoryUsedInfo call() throws Exception {
long oldGenUsed = 0;
long maxOldGenMemory = 0;
Collection<MemoryPoolProxy> memoryPoolList = getMemoryPoolList();
for (MemoryPoolProxy memoryPool : memoryPoolList) {
String poolName = memoryPool.getStat().getPoolName();
// see: http://stackoverflow.com/questions/16082004/how-to-identify-tenured-space/16083569#16083569
if (poolName.contains("Old Gen") || poolName.contains("Tenured Gen")) {
oldGenUsed = memoryPool.getStat().getUsage().getUsed();
maxOldGenMemory = memoryPool.getStat().getUsage().getMax();
break;
}
}
double oldGenUsedRatio = maxOldGenMemory > 0 ? oldGenUsed * 100d / maxOldGenMemory : 0;
// see: http://stackoverflow.com/questions/32002001/how-to-get-minor-and-major-garbage-collection-count-in-jdk-7-and-jdk-8
JVMContext c = Config.I.getJvmContext();
GarbageCollectorMXBean[] gcMXBeanArray = getGcMXBeanList().toArray(new GarbageCollectorMXBean[0]);
// 在一个上报周期内,老年代的内存变化大小~=新生代晋升大小
GarbageCollectorMXBean majorGCMXBean = gcMXBeanArray[1];
GCData majorGcData = c.getJvmData(getJmxPort()).getGcData(majorGCMXBean.getName());
long lastOldGenMemoryUsed = majorGcData.getMemoryUsed();
long newGenPromotion = oldGenUsed - lastOldGenMemoryUsed;
if (lastOldGenMemoryUsed <= 0 || newGenPromotion < 0) {
newGenPromotion = -1;
}
// 在一个上报周期内,YGC次数
GarbageCollectorMXBean minorGCMXBean = gcMXBeanArray[0];
GCData minorGcData = c.getJvmData(getJmxPort()).getGcData(minorGCMXBean.getName());
long gcCount = minorGcData.getUnitTimeCollectionCount();
long newGenAvgPromotion = 0;
if (gcCount > 0 && newGenPromotion > 0) {
newGenAvgPromotion = (long) (newGenPromotion / gcCount);
}
MemoryUsedInfo memoryUsedInfo = new MemoryUsedInfo(oldGenUsed, oldGenUsedRatio, newGenPromotion, newGenAvgPromotion);
// update last data
majorGcData.setMemoryUsed(oldGenUsed);
return memoryUsedInfo;
}
@Override
public List<FalconItem> build(MemoryUsedInfo jmxResultData) throws Exception {
List<FalconItem> items = new ArrayList<FalconItem>();
// 将jvm信息封装成openfalcon格式数据
FalconItem oldGenUsedItem = new FalconItem();
oldGenUsedItem.setCounterType(CounterType.GAUGE.toString());
oldGenUsedItem.setEndpoint(Config.I.getHostname());
oldGenUsedItem.setMetric(StringUtils.lowerCase(Constants.oldGenMemUsed));
oldGenUsedItem.setStep(Constants.defaultStep);
oldGenUsedItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
oldGenUsedItem.setTimestamp(System.currentTimeMillis() / 1000);
oldGenUsedItem.setValue(jmxResultData.getOldGenUsed());
items.add(oldGenUsedItem);
FalconItem oldGenUsedRatioItem = new FalconItem();
oldGenUsedRatioItem.setCounterType(CounterType.GAUGE.toString());
oldGenUsedRatioItem.setEndpoint(Config.I.getHostname());
oldGenUsedRatioItem.setMetric(StringUtils.lowerCase(Constants.oldGenMemRatio));
oldGenUsedRatioItem.setStep(Constants.defaultStep);
oldGenUsedRatioItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
oldGenUsedRatioItem.setTimestamp(System.currentTimeMillis() / 1000);
oldGenUsedRatioItem.setValue(jmxResultData.getOldGenUsedRatio());
items.add(oldGenUsedRatioItem);
FalconItem newGenPromotionItem = new FalconItem();
newGenPromotionItem.setCounterType(CounterType.GAUGE.toString());
newGenPromotionItem.setEndpoint(Config.I.getHostname());
newGenPromotionItem.setMetric(StringUtils.lowerCase(Constants.newGenPromotion));
newGenPromotionItem.setStep(Constants.defaultStep);
newGenPromotionItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
newGenPromotionItem.setTimestamp(System.currentTimeMillis() / 1000);
newGenPromotionItem.setValue(jmxResultData.getNewGenPromotion());
items.add(newGenPromotionItem);
FalconItem newGenAvgPromotionItem = new FalconItem();
newGenAvgPromotionItem.setCounterType(CounterType.GAUGE.toString());
newGenAvgPromotionItem.setEndpoint(Config.I.getHostname());
newGenAvgPromotionItem.setMetric(StringUtils.lowerCase(Constants.newGenAvgPromotion));
newGenAvgPromotionItem.setStep(Constants.defaultStep);
newGenAvgPromotionItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
newGenAvgPromotionItem.setTimestamp(System.currentTimeMillis() / 1000);
newGenAvgPromotionItem.setValue(jmxResultData.getNewGenAvgPromotion());
items.add(newGenAvgPromotionItem);
return items;
}
class MemoryUsedInfo {
private final double oldGenUsedRatio;
private final long oldGenUsed;
private final long newGenPromotion;
private final long newGenAvgPromotion;
public MemoryUsedInfo(long oldGenUsed, double oldGenUsedRatio, long newGenPromotion, long newGenAvgPromotion) {
this.oldGenUsed = oldGenUsed;
this.oldGenUsedRatio = oldGenUsedRatio;
this.newGenPromotion = newGenPromotion;
this.newGenAvgPromotion = newGenAvgPromotion;
}
/**
* @return the oldGenUsedRatio
*/
public double getOldGenUsedRatio() {
return oldGenUsedRatio;
}
/**
* @return the oldGenUsed
*/
public long getOldGenUsed() {
return oldGenUsed;
}
/**
* @return the newGenPromotion
*/
public long getNewGenPromotion() {
return newGenPromotion;
}
/**
* @return the newGenAvgPromotion
*/
public long getNewGenAvgPromotion() {
return newGenAvgPromotion;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/Config.java | src/main/java/com/stephan/tof/jmxmon/Config.java | package com.stephan.tof.jmxmon;
import java.io.File;
import java.io.IOException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.stephan.tof.jmxmon.bean.JVMContext;
import com.stephan.tof.jmxmon.bean.JacksonUtil;
public class Config {
public static final Config I = new Config();
private Logger logger = LoggerFactory.getLogger(getClass());
private String workDir;
private File jvmContextFile; // 用来存放JVM数据文件(比如上一次的gc总耗时、gc总次数等),文件保存在workDir下
private JVMContext jvmContext = new JVMContext(); // JVM数据文件对象
private String hostname;
private String agentPostUrl;
private int step;
private String jmxHost;
private int[] jmxPorts;
private Config(){}
public void init(String configPath) throws ConfigurationException, IOException{
logger.info("init config");
PropertiesConfiguration config = new PropertiesConfiguration(configPath);
config.setThrowExceptionOnMissing(true);
this.workDir = config.getString("workDir");
if (new File(workDir).isDirectory() == false) {
throw new IllegalArgumentException("workDir is not a directory");
}
this.hostname = config.getString("hostname", Utils.getHostNameForLinux());
this.jvmContextFile = new File(workDir, "jmxmon.jvm.context.json");
if (jvmContextFile.exists() && jvmContextFile.isFile() &&
jvmContextFile.length() > 0) {
logger.info(jvmContextFile.getAbsolutePath() + " is exist, start loading...");
this.jvmContext = JacksonUtil.readBeanFromFile(jvmContextFile, JVMContext.class);
} else {
logger.info(jvmContextFile.getAbsolutePath() + " is not exist");
}
this.agentPostUrl = config.getString("agent.posturl");
this.step = config.getInt("step", Constants.defaultStep);
// 默认的jmxHost为localhost,除非通过-D参数设置(线上不建议以远程方式采集,最好每台机器上部署agent,这样agent才能水平伸缩)
this.jmxHost = System.getProperty("debug.jmx.host");
if (this.jmxHost == null) {
this.jmxHost = "localhost";
}
String[] jmxPortArray = config.getStringArray("jmx.ports");
jmxPorts = new int[jmxPortArray.length];
for (int i = 0; i < jmxPortArray.length; i++) {
jmxPorts[i] = Integer.parseInt(jmxPortArray[i]);
}
logger.info("init config ok");
}
/**
* 保存数据文件
* @throws IOException
*/
public void flush() throws IOException {
JacksonUtil.writeBeanToFile(jvmContextFile, jvmContext, true);
}
/**
* @return the workDir
*/
public String getWorkDir() {
return workDir;
}
/**
* @return the hostname
*/
public String getHostname() {
return hostname;
}
/**
* @return the agentPostUrl
*/
public String getAgentPostUrl() {
return agentPostUrl;
}
/**
* @return the step
*/
public int getStep() {
return step;
}
/**
* @return the jmxHost
*/
public String getJmxHost() {
return jmxHost;
}
/**
* @return the jmxPorts
*/
public int[] getJmxPorts() {
return jmxPorts;
}
/**
* @return the jvmContext
*/
public JVMContext getJvmContext() {
return jvmContext;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/Utils.java | src/main/java/com/stephan/tof/jmxmon/Utils.java | package com.stephan.tof.jmxmon;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class Utils {
private Utils(){}
public static String getHostNameForLinux() {
try {
return (InetAddress.getLocalHost()).getHostName();
} catch (UnknownHostException uhe) {
String host = uhe.getMessage(); // host = "hostname: hostname"
if (host != null) {
int colon = host.indexOf(':');
if (colon > 0) {
return host.substring(0, colon);
}
}
return "UnknownHost";
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JMXCall.java | src/main/java/com/stephan/tof/jmxmon/JMXCall.java | package com.stephan.tof.jmxmon;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public abstract class JMXCall<T> {
protected final Logger logger = LoggerFactory.getLogger(getClass());
private final ProxyClient proxyClient;
private final int jmxPort;
public JMXCall(final ProxyClient proxyClient, int jmxPort) {
this.proxyClient = proxyClient;
if (jmxPort <= 0) {
throw new IllegalStateException("jmxPort is 0, client=" + proxyClient.getUrl());
}
this.jmxPort = jmxPort;
}
/**
* 调用jmx接口获取数据
*
* @return
* @throws Exception
*/
public abstract T call() throws Exception;
/**
* 将jmx获取到的数据组装成Openfalcon数据返回
*
* @param jmxResultData
* @return
* @throws Exception
*/
public abstract List<FalconItem> build(T jmxResultData) throws Exception;
/**
* @return the proxyClient
*/
public ProxyClient getProxyClient() {
return proxyClient;
}
/**
* @return the jmxPort
*/
public int getJmxPort() {
return jmxPort;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JVMThreadExtractor.java | src/main/java/com/stephan/tof/jmxmon/JVMThreadExtractor.java | package com.stephan.tof.jmxmon;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import com.stephan.tof.jmxmon.Constants.CounterType;
import com.stephan.tof.jmxmon.JVMThreadExtractor.ThreadInfo;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public class JVMThreadExtractor extends JVMDataExtractor<ThreadInfo> {
public JVMThreadExtractor(ProxyClient proxyClient, int jmxPort)
throws IOException {
super(proxyClient, jmxPort);
}
@Override
public ThreadInfo call() throws Exception {
int threadNum = getThreadMXBean().getThreadCount();
int peakThreadNum = getThreadMXBean().getPeakThreadCount();
ThreadInfo threadInfo = new ThreadInfo(threadNum, peakThreadNum);
return threadInfo;
}
@Override
public List<FalconItem> build(ThreadInfo jmxResultData) throws Exception {
List<FalconItem> items = new ArrayList<FalconItem>();
// 将jvm信息封装成openfalcon格式数据
FalconItem threadNumItem = new FalconItem();
threadNumItem.setCounterType(CounterType.GAUGE.toString());
threadNumItem.setEndpoint(Config.I.getHostname());
threadNumItem.setMetric(StringUtils.lowerCase(Constants.threadActiveCount));
threadNumItem.setStep(Constants.defaultStep);
threadNumItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
threadNumItem.setTimestamp(System.currentTimeMillis() / 1000);
threadNumItem.setValue(jmxResultData.getThreadNum());
items.add(threadNumItem);
FalconItem peakThreadNumItem = new FalconItem();
peakThreadNumItem.setCounterType(CounterType.GAUGE.toString());
peakThreadNumItem.setEndpoint(Config.I.getHostname());
peakThreadNumItem.setMetric(StringUtils.lowerCase(Constants.threadPeakCount));
peakThreadNumItem.setStep(Constants.defaultStep);
peakThreadNumItem.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
peakThreadNumItem.setTimestamp(System.currentTimeMillis() / 1000);
peakThreadNumItem.setValue(jmxResultData.getPeakThreadNum());
items.add(peakThreadNumItem);
return items;
}
class ThreadInfo {
private final int threadNum;
private final int peakThreadNum;
public ThreadInfo(int threadNum, int peakThreadNum) {
this.threadNum = threadNum;
this.peakThreadNum = peakThreadNum;
}
/**
* @return the threadNum
*/
public int getThreadNum() {
return threadNum;
}
/**
* @return the peakThreadNum
*/
public int getPeakThreadNum() {
return peakThreadNum;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/JVMGCThroughputExtractor.java | src/main/java/com/stephan/tof/jmxmon/JVMGCThroughputExtractor.java | package com.stephan.tof.jmxmon;
import java.io.IOException;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.RuntimeMXBean;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import com.stephan.tof.jmxmon.Constants.CounterType;
import com.stephan.tof.jmxmon.bean.FalconItem;
import com.stephan.tof.jmxmon.jmxutil.ProxyClient;
public class JVMGCThroughputExtractor extends JVMDataExtractor<Double> {
public JVMGCThroughputExtractor(ProxyClient proxyClient, int jmxPort)
throws IOException {
super(proxyClient, jmxPort);
}
@Override
public Double call() throws Exception {
RuntimeMXBean rbean = getRuntimeMXBean();
long upTime = rbean.getUptime();
long totalGCTime = 0;
Collection<GarbageCollectorMXBean> list = getGcMXBeanList();
for (GarbageCollectorMXBean bean : list) {
totalGCTime += bean.getCollectionTime();
}
double gcThroughput = (double) (upTime - totalGCTime) * 100 / (double) upTime;
return gcThroughput;
}
@Override
public List<FalconItem> build(Double jmxResultData) throws Exception {
List<FalconItem> items = new ArrayList<FalconItem>();
// 将jvm信息封装成openfalcon格式数据
FalconItem item = new FalconItem();
item.setCounterType(CounterType.GAUGE.toString());
item.setEndpoint(Config.I.getHostname());
item.setMetric(StringUtils.lowerCase(Constants.gcThroughput));
item.setStep(Constants.defaultStep);
item.setTags(StringUtils.lowerCase("jmxport=" + getJmxPort()));
item.setTimestamp(System.currentTimeMillis() / 1000);
item.setValue(jmxResultData);
items.add(item);
return items;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/HttpClientUtils.java | src/main/java/com/stephan/tof/jmxmon/HttpClientUtils.java | package com.stephan.tof.jmxmon;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* update httpclient to HttpUrlConnection<br>
*
* @since 2015-08-17 <br>
* @author stevenDing <br>
*
*/
public class HttpClientUtils {
private static final int readTimeout = 20000;
private static final int connTimeout = 5000;
private Logger logger = LoggerFactory.getLogger(this.getClass());
private static final HttpClientUtils httpClient = new HttpClientUtils();
private static final int defaultRetryTimes = 3;
public static final int errorStatusCode = 900;
public static final int okStatusCode = 200;
public static final String defaultContentType = "application/json; charset=utf-8";
public static final String urlencodedContentType = "application/x-www-form-urlencoded";
public static HttpClientUtils getInstance() {
return httpClient;
}
public HttpResult post(String url, String content,String contentType){
StringBuffer buffer = new StringBuffer();
BufferedReader reader = null;
HttpURLConnection conn = null;
HttpResult result = new HttpResult();
int i=0;
while(true){
if(i>=defaultRetryTimes || result.getStatusCode()==okStatusCode){
break;
}
i++;
try{
URL postUrl = new URL(url);
// 打开连接
conn = (HttpURLConnection) postUrl.openConnection();
conn.setConnectTimeout(connTimeout);
conn.setReadTimeout(readTimeout);
// 设置是否向connection输出,因为这个是post请求,参数要放在http正文内,因此需要设为true
conn.setDoOutput(true);
conn.setDoInput(true);
conn.setRequestMethod("POST");
// Post 请求不能使用缓存
conn.setUseCaches(false);
// URLConnection.setInstanceFollowRedirects是成员函数,仅作用于当前函数
conn.setInstanceFollowRedirects(true);
// 配置本次连接的Content-type,配置为application/x-www-form-urlencoded的
// 意思是正文是urlencoded编码过的form参数,下面我们可以看到我们对正文内容使用URLEncoder.encode进行编码
//conn.setRequestProperty("Content-Type","application/x-www-form-urlencoded");
//conn.setRequestProperty("Content-Type", "text/html; charset=utf-8");
//conn.setRequestProperty("Content-Type", "text/xml; charset=utf-8");
//conn.setRequestProperty("Content-Type", "text/json; charset=utf-8");
//conn.setRequestProperty("Content-Type", "application/json; charset=utf-8");
if(contentType!=null && contentType.length()>0){
conn.setRequestProperty("Content-Type", contentType);
}else{
conn.setRequestProperty("Content-Type", defaultContentType);
}
// 连接,从postUrl.openConnection()至此的配置必须要在connect之前完成,
// 要注意的是connection.getOutputStream会隐含的进行connect。
conn.connect();
DataOutputStream out = new DataOutputStream(conn.getOutputStream());
out.write(content.getBytes("UTF-8"));
out.flush();
out.close(); // flush and close
reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
String line;
while ((line = reader.readLine()) != null) {
buffer.append(line);
}
result.setContent(buffer.toString());
result.setStatusCode(okStatusCode);
}catch(MalformedURLException e) {
logger.error(e.getMessage(),e);
result.setStatusCode(errorStatusCode);
result.setT(e);
}catch (IOException e){
logger.error(e.getMessage(),e);
result.setStatusCode(errorStatusCode);
result.setT(e);
}catch(Exception e){
logger.error(e.getMessage(),e);
result.setStatusCode(errorStatusCode);
result.setT(e);
}finally{
if(reader!=null){
try{
reader.close();
}catch(IOException e){
logger.error(e.getMessage(),e);
}
}
if(conn!=null){
conn.disconnect();
}
}
}
return result;
}
public HttpResult post(String url, String content) {
return post(url, content, defaultContentType);
}
public class HttpResult {
private String content;
private int statusCode = errorStatusCode;
private Throwable t;
public Throwable getT() {
return t;
}
public void setT(Throwable t) {
this.t = t;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public int getStatusCode() {
return statusCode;
}
public void setStatusCode(int statusCode) {
this.statusCode = statusCode;
}
@Override
public String toString() {
return "statusCode :"+statusCode+" content :"+content;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/Constants.java | src/main/java/com/stephan/tof/jmxmon/Constants.java | package com.stephan.tof.jmxmon;
public class Constants {
public static enum CounterType { COUNTER, GAUGE }
public static final String gcAvgTime = "gc.avg.time";
public static final String gcCount = "gc.count";
public static final String gcThroughput = "gc.throughput";
public static final String newGenPromotion = "new.gen.promotion";
public static final String newGenAvgPromotion = "new.gen.avg.promotion";
public static final String oldGenMemUsed = "old.gen.mem.used";
public static final String oldGenMemRatio = "old.gen.mem.ratio";
public static final String threadActiveCount = "thread.active.count";
public static final String threadPeakCount = "thread.peak.count";
public static final String tagSeparator = ",";
public static final String metricSeparator = ".";
public static final int defaultStep = 60; // 单位秒
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/JacksonUtil.java | src/main/java/com/stephan/tof/jmxmon/bean/JacksonUtil.java | /**
*
*/
package com.stephan.tof.jmxmon.bean;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* @author Stephan gao
* @since 2015年4月20日
*
*/
public class JacksonUtil {
private static ObjectMapper om = new ObjectMapper();
// static {
// // 设置输入时忽略JSON字符串中存在而Java对象实际没有的属性
// om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
// }
private JacksonUtil() {
}
public static void setObjectMapper(ObjectMapper om) {
JacksonUtil.om = om;
}
public static <T> T readBeanFromFile(File srcFile, Class<T> clazz)
throws IOException {
return om.readValue(srcFile, clazz);
}
public static <T> T readBeanFromString(String jsonStr, Class<T> clazz)
throws IOException {
return om.readValue(jsonStr, clazz);
}
public static <T> T readBeanFromStream(InputStream input, Class<T> clazz) throws JsonParseException, JsonMappingException, IOException {
return om.readValue(input, clazz);
}
public static <T> void writeBeanToFile(File dstFile, T t,
boolean withPrettyPrinter) throws IOException {
if (withPrettyPrinter) {
om.writerWithDefaultPrettyPrinter().writeValue(dstFile, t);
} else {
om.writeValue(dstFile, t);
}
}
public static <T> String writeBeanToString(T t, boolean withPrettyPrinter)
throws JsonProcessingException {
if (withPrettyPrinter) {
return om.writerWithDefaultPrettyPrinter().writeValueAsString(t);
} else {
return om.writeValueAsString(t);
}
}
public static Map<String, Object> readMapFromString(String jsonStr)
throws JsonParseException, JsonMappingException, IOException {
return om.readValue(jsonStr, Map.class);
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/JVMContext.java | src/main/java/com/stephan/tof/jmxmon/bean/JVMContext.java | package com.stephan.tof.jmxmon.bean;
import java.util.LinkedHashMap;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* 用来缓存通过JMX获取到的JVM数据,每次程序退出时需要将此对象序列化到文件中,以便下次启动时能够再次加载到内存中使用
*
* @author Stephan gao
* @since 2016年4月26日
*
*/
public class JVMContext {
/**
* port -> jvm data
*/
@JsonProperty
private Map<Integer, JVMData> jvmDatas = new LinkedHashMap<Integer, JVMData>();
public JVMData getJvmData(Integer jmxPort) {
if(jvmDatas.containsKey(jmxPort)) {
return jvmDatas.get(jmxPort);
} else {
JVMData jvmData = new JVMData();
jvmDatas.put(jmxPort, jvmData);
return jvmData;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/FalconItem.java | src/main/java/com/stephan/tof/jmxmon/bean/FalconItem.java | package com.stephan.tof.jmxmon.bean;
import org.apache.commons.lang.builder.ToStringBuilder;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
/**
* { "metric": "metric.demo", "endpoint": "qd-open-falcon-judge01.hd",
* "timestamp": $ts, "step": 60, "value": 9, "counterType": "GAUGE", "tags":
* "project=falcon,module=judge" }
*
* @author Stephan gao
* @since 2016年4月28日
*
*/
public class FalconItem {
@JsonProperty
private String metric;
@JsonProperty
private String endpoint;
@JsonProperty
private long timestamp;
@JsonProperty
private int step;
@JsonProperty
@JsonSerialize(using = CustomDoubleSerialize.class)
private double value;
@JsonProperty
private String counterType;
@JsonProperty
private String tags;
public FalconItem() {
}
public FalconItem(String metric, String endpoint, long timestamp, int step,
double value, String counterType, String tags) {
this.metric = metric;
this.endpoint = endpoint;
this.timestamp = timestamp;
this.step = step;
this.value = value;
this.counterType = counterType;
this.tags = tags;
}
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this);
}
/**
* @return the metric
*/
public String getMetric() {
return metric;
}
/**
* @return the endpoint
*/
public String getEndpoint() {
return endpoint;
}
/**
* @return the timestamp
*/
public long getTimestamp() {
return timestamp;
}
/**
* @return the step
*/
public int getStep() {
return step;
}
/**
* @return the value
*/
public double getValue() {
return value;
}
/**
* @return the counterType
*/
public String getCounterType() {
return counterType;
}
/**
* @return the tags
*/
public String getTags() {
return tags;
}
/**
* @param metric
* the metric to set
*/
public void setMetric(String metric) {
this.metric = metric;
}
/**
* @param endpoint
* the endpoint to set
*/
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
/**
* @param timestamp
* the timestamp to set
*/
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
/**
* @param step
* the step to set
*/
public void setStep(int step) {
this.step = step;
}
/**
* @param value
* the value to set
*/
public void setValue(double value) {
this.value = value;
}
/**
* @param counterType
* the counterType to set
*/
public void setCounterType(String counterType) {
this.counterType = counterType;
}
/**
* @param tags
* the tags to set
*/
public void setTags(String tags) {
this.tags = tags;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/JVMData.java | src/main/java/com/stephan/tof/jmxmon/bean/JVMData.java | package com.stephan.tof.jmxmon.bean;
import java.util.LinkedHashMap;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonProperty;
public class JVMData {
/**
* GCMXBean name -> GCData
*/
@JsonProperty
private Map<String, GCData> gcDatas = new LinkedHashMap<String, GCData>();
public GCData getGcData(String beanName) {
if (gcDatas.containsKey(beanName)) {
return gcDatas.get(beanName);
} else {
GCData gcData = new GCData();
gcDatas.put(beanName, gcData);
return gcData;
}
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/CustomDoubleSerialize.java | src/main/java/com/stephan/tof/jmxmon/bean/CustomDoubleSerialize.java | package com.stephan.tof.jmxmon.bean;
import java.io.IOException;
import java.text.DecimalFormat;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
public class CustomDoubleSerialize extends JsonSerializer<Double> {
private DecimalFormat df = new DecimalFormat("##.00");
@Override
public void serialize(Double value, JsonGenerator jgen,
SerializerProvider provider) throws IOException,
JsonProcessingException {
jgen.writeNumber(Double.parseDouble(df.format(value)));
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/bean/GCData.java | src/main/java/com/stephan/tof/jmxmon/bean/GCData.java | package com.stephan.tof.jmxmon.bean;
import com.fasterxml.jackson.annotation.JsonProperty;
public class GCData {
@JsonProperty
private long collectionTime;
@JsonProperty
private long collectionCount;
@JsonProperty
private long memoryUsed;
@JsonProperty
private long unitTimeCollectionCount;
/**
* @return the collectionTime
*/
public long getCollectionTime() {
return collectionTime;
}
/**
* @return the collectionCount
*/
public long getCollectionCount() {
return collectionCount;
}
/**
* @param collectionTime the collectionTime to set
*/
public void setCollectionTime(long collectionTime) {
this.collectionTime = collectionTime;
}
/**
* @param collectionCount the collectionCount to set
*/
public void setCollectionCount(long collectionCount) {
this.collectionCount = collectionCount;
}
/**
* @return the memoryUsed
*/
public long getMemoryUsed() {
return memoryUsed;
}
/**
* @param memoryUsed the memoryUsed to set
*/
public void setMemoryUsed(long memoryUsed) {
this.memoryUsed = memoryUsed;
}
/**
* @return the unitTimeCollectionCount
*/
public long getUnitTimeCollectionCount() {
return unitTimeCollectionCount;
}
/**
* @param unitTimeCollectionCount the unitTimeCollectionCount to set
*/
public void setUnitTimeCollectionCount(long unitTimeCollectionCount) {
this.unitTimeCollectionCount = unitTimeCollectionCount;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/MemoryPoolStat.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/MemoryPoolStat.java | /*
* %W% %E%
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.stephan.tof.jmxmon.jmxutil;
import java.lang.management.MemoryUsage;
public class MemoryPoolStat {
private String poolName;
private long usageThreshold;
private MemoryUsage usage;
private long lastGcId;
private long lastGcStartTime;
private long lastGcEndTime;
private long collectThreshold;
private MemoryUsage beforeGcUsage;
private MemoryUsage afterGcUsage;
MemoryPoolStat(String name,
long usageThreshold,
MemoryUsage usage,
long lastGcId,
long lastGcStartTime,
long lastGcEndTime,
long collectThreshold,
MemoryUsage beforeGcUsage,
MemoryUsage afterGcUsage) {
this.poolName = name;
this.usageThreshold = usageThreshold;
this.usage = usage;
this.lastGcId = lastGcId;
this.lastGcStartTime = lastGcStartTime;
this.lastGcEndTime = lastGcEndTime;
this.collectThreshold = collectThreshold;
this.beforeGcUsage = beforeGcUsage;
this.afterGcUsage = afterGcUsage;
}
/**
* Returns the memory pool name.
*/
public String getPoolName() {
return poolName;
}
/**
* Returns the current memory usage.
*/
public MemoryUsage getUsage() {
return usage;
}
/**
* Returns the current usage threshold.
* -1 if not supported.
*/
public long getUsageThreshold() {
return usageThreshold;
}
/**
* Returns the current collection usage threshold.
* -1 if not supported.
*/
public long getCollectionUsageThreshold() {
return collectThreshold;
}
/**
* Returns the Id of GC.
*/
public long getLastGcId() {
return lastGcId;
}
/**
* Returns the start time of the most recent GC on
* the memory pool for this statistics in milliseconds.
*
* Return 0 if no GC occurs.
*/
public long getLastGcStartTime() {
return lastGcStartTime;
}
/**
* Returns the end time of the most recent GC on
* the memory pool for this statistics in milliseconds.
*
* Return 0 if no GC occurs.
*/
public long getLastGcEndTime() {
return lastGcEndTime;
}
/**
* Returns the memory usage before the most recent GC started.
* null if no GC occurs.
*/
public MemoryUsage getBeforeGcUsage() {
return beforeGcUsage;
}
/**
* Returns the memory usage after the most recent GC finished.
* null if no GC occurs.
*/
public MemoryUsage getAfterGcUsage() {
return beforeGcUsage;
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/LocalVirtualMachine.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/LocalVirtualMachine.java | /*
* %W% %E%
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.stephan.tof.jmxmon.jmxutil;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import sun.jvmstat.monitor.HostIdentifier;
import sun.jvmstat.monitor.MonitorException;
import sun.jvmstat.monitor.MonitoredHost;
import sun.jvmstat.monitor.MonitoredVm;
import sun.jvmstat.monitor.MonitoredVmUtil;
import sun.jvmstat.monitor.VmIdentifier;
// Sun private
import sun.management.ConnectorAddressLink;
import com.sun.tools.attach.AgentInitializationException;
import com.sun.tools.attach.AgentLoadException;
import com.sun.tools.attach.AttachNotSupportedException;
// Sun specific
import com.sun.tools.attach.VirtualMachine;
import com.sun.tools.attach.VirtualMachineDescriptor;
public class LocalVirtualMachine {
private String address;
private String commandLine;
private String displayName;
private int vmid;
private boolean isAttachSupported;
public LocalVirtualMachine(int vmid, String commandLine, boolean canAttach, String connectorAddress) {
this.vmid = vmid;
this.commandLine = commandLine;
this.address = connectorAddress;
this.isAttachSupported = canAttach;
this.displayName = getDisplayName(commandLine);
}
private static String getDisplayName(String commandLine) {
// trim the pathname of jar file if it's a jar
String[] res = commandLine.split(" ", 2);
if (res[0].endsWith(".jar")) {
File jarfile = new File(res[0]);
String displayName = jarfile.getName();
if (res.length == 2) {
displayName += " " + res[1];
}
return displayName;
}
return commandLine;
}
public int vmid() {
return vmid;
}
public boolean isManageable() {
return (address != null);
}
public boolean isAttachable() {
return isAttachSupported;
}
public void startManagementAgent() throws IOException {
if (address != null) {
// already started
return;
}
if (!isAttachable()) {
throw new IOException("This virtual machine \"" + vmid +
"\" does not support dynamic attach.");
}
loadManagementAgent();
// fails to load or start the management agent
if (address == null) {
// should never reach here
throw new IOException("Fails to find connector address");
}
}
public String connectorAddress() {
// return null if not available or no JMX agent
return address;
}
public String displayName() {
return displayName;
}
public String toString() {
return commandLine;
}
// This method returns the list of all virtual machines currently
// running on the machine
public static Map<Integer, LocalVirtualMachine> getAllVirtualMachines() {
Map<Integer, LocalVirtualMachine> map =
new HashMap<Integer, LocalVirtualMachine>();
getMonitoredVMs(map);
getAttachableVMs(map);
return map;
}
private static void getMonitoredVMs(Map<Integer, LocalVirtualMachine> map) {
MonitoredHost host;
Set vms;
try {
host = MonitoredHost.getMonitoredHost(new HostIdentifier((String)null));
vms = host.activeVms();
} catch (java.net.URISyntaxException sx) {
throw new InternalError(sx.getMessage());
} catch (MonitorException mx) {
throw new InternalError(mx.getMessage());
}
for (Object vmid: vms) {
if (vmid instanceof Integer) {
int pid = ((Integer) vmid).intValue();
String name = vmid.toString(); // default to pid if name not available
boolean attachable = false;
String address = null;
try {
MonitoredVm mvm = host.getMonitoredVm(new VmIdentifier(name));
// use the command line as the display name
name = MonitoredVmUtil.commandLine(mvm);
attachable = MonitoredVmUtil.isAttachable(mvm);
address = ConnectorAddressLink.importFrom(pid);
mvm.detach();
} catch (Exception x) {
// ignore
}
map.put((Integer) vmid,
new LocalVirtualMachine(pid, name, attachable, address));
}
}
}
private static final String LOCAL_CONNECTOR_ADDRESS_PROP =
"com.sun.management.jmxremote.localConnectorAddress";
private static void getAttachableVMs(Map<Integer, LocalVirtualMachine> map) {
List<VirtualMachineDescriptor> vms = VirtualMachine.list();
for (VirtualMachineDescriptor vmd : vms) {
try {
Integer vmid = Integer.valueOf(vmd.id());
if (!map.containsKey(vmid)) {
boolean attachable = false;
String address = null;
try {
VirtualMachine vm = VirtualMachine.attach(vmd);
attachable = true;
Properties agentProps = vm.getAgentProperties();
address = (String) agentProps.get(LOCAL_CONNECTOR_ADDRESS_PROP);
vm.detach();
} catch (AttachNotSupportedException x) {
// not attachable
} catch (IOException x) {
// ignore
}
map.put(vmid, new LocalVirtualMachine(vmid.intValue(),
vmd.displayName(),
attachable,
address));
}
} catch (NumberFormatException e) {
// do not support vmid different than pid
}
}
}
public static LocalVirtualMachine getLocalVirtualMachine(int vmid) {
Map<Integer, LocalVirtualMachine> map = getAllVirtualMachines();
LocalVirtualMachine lvm = map.get(vmid);
if (lvm == null) {
// Check if the VM is attachable but not included in the list
// if it's running with a different security context.
// For example, Windows services running
// local SYSTEM account are attachable if you have Adminstrator
// privileges.
boolean attachable = false;
String address = null;
String name = String.valueOf(vmid); // default display name to pid
try {
VirtualMachine vm = VirtualMachine.attach(name);
attachable = true;
Properties agentProps = vm.getAgentProperties();
address = (String) agentProps.get(LOCAL_CONNECTOR_ADDRESS_PROP);
vm.detach();
lvm = new LocalVirtualMachine(vmid, name, attachable, address);
} catch (AttachNotSupportedException x) {
// not attachable
throw new IllegalStateException(x);
} catch (IOException x) {
// ignore
throw new IllegalStateException(x);
}
}
return lvm;
}
// load the management agent into the target VM
private void loadManagementAgent() throws IOException {
VirtualMachine vm = null;
String name = String.valueOf(vmid);
try {
vm = VirtualMachine.attach(name);
} catch (AttachNotSupportedException x) {
IOException ioe = new IOException(x.getMessage());
ioe.initCause(x);
throw ioe;
}
String home = vm.getSystemProperties().getProperty("java.home");
// Normally in ${java.home}/jre/lib/management-agent.jar but might
// be in ${java.home}/lib in build environments.
String agent = home + File.separator + "jre" + File.separator +
"lib" + File.separator + "management-agent.jar";
File f = new File(agent);
if (!f.exists()) {
agent = home + File.separator + "lib" + File.separator +
"management-agent.jar";
f = new File(agent);
if (!f.exists()) {
throw new IOException("Management agent not found");
}
}
agent = f.getCanonicalPath();
try {
vm.loadAgent(agent, "com.sun.management.jmxremote");
} catch (AgentLoadException x) {
IOException ioe = new IOException(x.getMessage());
ioe.initCause(x);
throw ioe;
} catch (AgentInitializationException x) {
IOException ioe = new IOException(x.getMessage());
ioe.initCause(x);
throw ioe;
}
// get the connector address
Properties agentProps = vm.getAgentProperties();
address = (String) agentProps.get(LOCAL_CONNECTOR_ADDRESS_PROP);
vm.detach();
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/NamedThreadFactory.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/NamedThreadFactory.java | package com.stephan.tof.jmxmon.jmxutil;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Help for threadpool to set thread name
*
* @author <a href="mailto:bluedavy@gmail.com">bluedavy</a>
*/
public class NamedThreadFactory implements ThreadFactory {
static final AtomicInteger poolNumber = new AtomicInteger(1);
final AtomicInteger threadNumber = new AtomicInteger(1);
final ThreadGroup group;
final String namePrefix;
final boolean isDaemon;
public NamedThreadFactory() {
this("pool");
}
public NamedThreadFactory(String name) {
this(name, false);
}
public NamedThreadFactory(String preffix, boolean daemon) {
SecurityManager s = System.getSecurityManager();
group = (s != null) ? s.getThreadGroup() : Thread.currentThread()
.getThreadGroup();
namePrefix = preffix + "-" + poolNumber.getAndIncrement() + "-thread-";
isDaemon = daemon;
}
public Thread newThread(Runnable r) {
Thread t = new Thread(group, r, namePrefix
+ threadNumber.getAndIncrement(), 0);
t.setDaemon(isDaemon);
if (t.getPriority() != Thread.NORM_PRIORITY) {
t.setPriority(Thread.NORM_PRIORITY);
}
return t;
}
} | java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/ProxyClient.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/ProxyClient.java | /*
* %W% %E%
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.stephan.tof.jmxmon.jmxutil;
import static java.lang.management.ManagementFactory.CLASS_LOADING_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.COMPILATION_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE;
import static java.lang.management.ManagementFactory.MEMORY_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.MEMORY_POOL_MXBEAN_DOMAIN_TYPE;
import static java.lang.management.ManagementFactory.OPERATING_SYSTEM_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.RUNTIME_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.THREAD_MXBEAN_NAME;
import static java.lang.management.ManagementFactory.newPlatformMXBeanProxy;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.io.IOException;
import java.lang.management.BufferPoolMXBean;
import java.lang.management.ClassLoadingMXBean;
import java.lang.management.CompilationMXBean;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.OperatingSystemMXBean;
import java.lang.management.RuntimeMXBean;
import java.lang.management.ThreadMXBean;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.rmi.NotBoundException;
import java.rmi.Remote;
import java.rmi.registry.LocateRegistry;
import java.rmi.registry.Registry;
import java.rmi.server.RMIClientSocketFactory;
import java.rmi.server.RemoteObject;
import java.rmi.server.RemoteObjectInvocationHandler;
import java.rmi.server.RemoteRef;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.IntrospectionException;
import javax.management.InvalidAttributeValueException;
import javax.management.MBeanException;
import javax.management.MBeanInfo;
import javax.management.MBeanOperationInfo;
import javax.management.MBeanServerConnection;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.remote.rmi.RMIConnector;
import javax.management.remote.rmi.RMIServer;
import javax.rmi.ssl.SslRMIClientSocketFactory;
import javax.swing.event.SwingPropertyChangeSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sun.rmi.server.UnicastRef2;
import sun.rmi.transport.LiveRef;
import com.sun.management.HotSpotDiagnosticMXBean;
public class ProxyClient implements JConsoleContext {
private static Logger logger = LoggerFactory.getLogger(ProxyClient.class);
private ConnectionState connectionState = ConnectionState.DISCONNECTED;
// The SwingPropertyChangeSupport will fire events on the EDT
private SwingPropertyChangeSupport propertyChangeSupport =
new SwingPropertyChangeSupport(this, true);
private static Map<String, ProxyClient> cache =
Collections.synchronizedMap(new HashMap<String, ProxyClient>());
private volatile boolean isDead = true;
private String hostName = null;
private int port = 0;
private String userName = null;
private String password = null;
private boolean hasPlatformMXBeans = false;
private boolean hasHotSpotDiagnosticMXBean= false;
private boolean hasCompilationMXBean = false;
private boolean supportsLockUsage = false;
// REVISIT: VMPanel and other places relying using getUrl().
// set only if it's created for local monitoring
private LocalVirtualMachine lvm;
// set only if it's created from a given URL via the Advanced tab
private String advancedUrl = null;
private JMXServiceURL jmxUrl = null;
private MBeanServerConnection mbsc = null;
private SnapshotMBeanServerConnection server = null;
private JMXConnector jmxc = null;
private RMIServer stub = null;
private static final SslRMIClientSocketFactory sslRMIClientSocketFactory =
new SslRMIClientSocketFactory();
private String registryHostName = null;
private int registryPort = 0;
private boolean vmConnector = false;
private boolean sslRegistry = false;
private boolean sslStub = false;
final private String connectionName;
final private String displayName;
private ClassLoadingMXBean classLoadingMBean = null;
private CompilationMXBean compilationMBean = null;
private MemoryMXBean memoryMBean = null;
private OperatingSystemMXBean operatingSystemMBean = null;
private RuntimeMXBean runtimeMBean = null;
private ThreadMXBean threadMBean = null;
private com.sun.management.OperatingSystemMXBean sunOperatingSystemMXBean = null;
private HotSpotDiagnosticMXBean hotspotDiagnosticMXBean = null;
private List<MemoryPoolProxy> memoryPoolProxies = null;
private List<GarbageCollectorMXBean> garbageCollectorMBeans = null;
private List<BufferPoolMXBean> bufferPoolMXBeans = null;
final static private String HOTSPOT_DIAGNOSTIC_MXBEAN_NAME =
"com.sun.management:type=HotSpotDiagnostic";
private ProxyClient(String hostName, int port,
String userName, String password) throws IOException {
this.connectionName = getConnectionName(hostName, port, userName);
this.displayName = connectionName;
if (hostName.equals("localhost") && port == 0) {
// Monitor self
this.hostName = hostName;
this.port = port;
} else {
// Create an RMI connector client and connect it to
// the RMI connector server
final String urlPath = "/jndi/rmi://" + hostName + ":" + port +
"/jmxrmi";
JMXServiceURL url = new JMXServiceURL("rmi", "", 0, urlPath);
setParameters(url, userName, password);
vmConnector = true;
registryHostName = hostName;
registryPort = port;
checkSslConfig();
}
}
private ProxyClient(String url,
String userName, String password) throws IOException {
this.advancedUrl = url;
this.connectionName = getConnectionName(url, userName);
this.displayName = connectionName;
setParameters(new JMXServiceURL(url), userName, password);
}
private ProxyClient(LocalVirtualMachine lvm) throws IOException {
this.lvm = lvm;
this.connectionName = getConnectionName(lvm);
this.displayName = "pid: " + lvm.vmid() + " " + lvm.displayName();
}
private void setParameters(JMXServiceURL url, String userName, String password) {
this.jmxUrl = url;
this.hostName = jmxUrl.getHost();
this.port = jmxUrl.getPort();
this.userName = userName;
this.password = password;
}
private static void checkStub(Remote stub,
Class<? extends Remote> stubClass) {
// Check remote stub is from the expected class.
//
if (stub.getClass() != stubClass) {
if (!Proxy.isProxyClass(stub.getClass())) {
throw new SecurityException(
"Expecting a " + stubClass.getName() + " stub!");
} else {
InvocationHandler handler = Proxy.getInvocationHandler(stub);
if (handler.getClass() != RemoteObjectInvocationHandler.class) {
throw new SecurityException(
"Expecting a dynamic proxy instance with a " +
RemoteObjectInvocationHandler.class.getName() +
" invocation handler!");
} else {
stub = (Remote) handler;
}
}
}
// Check RemoteRef in stub is from the expected class
// "sun.rmi.server.UnicastRef2".
//
RemoteRef ref = ((RemoteObject)stub).getRef();
if (ref.getClass() != UnicastRef2.class) {
throw new SecurityException(
"Expecting a " + UnicastRef2.class.getName() +
" remote reference in stub!");
}
// Check RMIClientSocketFactory in stub is from the expected class
// "javax.rmi.ssl.SslRMIClientSocketFactory".
//
LiveRef liveRef = ((UnicastRef2)ref).getLiveRef();
RMIClientSocketFactory csf = liveRef.getClientSocketFactory();
if (csf == null || csf.getClass() != SslRMIClientSocketFactory.class) {
throw new SecurityException(
"Expecting a " + SslRMIClientSocketFactory.class.getName() +
" RMI client socket factory in stub!");
}
}
private static final String rmiServerImplStubClassName =
"javax.management.remote.rmi.RMIServerImpl_Stub";
private static final Class<? extends Remote> rmiServerImplStubClass;
static {
// FIXME: RMIServerImpl_Stub is generated at build time
// after jconsole is built. We need to investigate if
// the Makefile can be fixed to build jconsole in the
// right order. As a workaround for now, we dynamically
// load RMIServerImpl_Stub class instead of statically
// referencing it.
Class<? extends Remote> serverStubClass = null;
try {
serverStubClass = Class.forName(rmiServerImplStubClassName).asSubclass(Remote.class);
} catch (ClassNotFoundException e) {
// should never reach here
throw (InternalError) new InternalError(e.getMessage()).initCause(e);
}
rmiServerImplStubClass = serverStubClass;
}
private void checkSslConfig() throws IOException {
// Get the reference to the RMI Registry and lookup RMIServer stub
//
Registry registry;
try {
registry =
LocateRegistry.getRegistry(registryHostName, registryPort,
sslRMIClientSocketFactory);
try {
stub = (RMIServer) registry.lookup("jmxrmi");
} catch (NotBoundException nbe) {
throw (IOException)
new IOException(nbe.getMessage()).initCause(nbe);
}
sslRegistry = true;
} catch (IOException e) {
registry =
LocateRegistry.getRegistry(registryHostName, registryPort);
try {
stub = (RMIServer) registry.lookup("jmxrmi");
} catch (NotBoundException nbe) {
throw (IOException)
new IOException(nbe.getMessage()).initCause(nbe);
}
sslRegistry = false;
}
// Perform the checks for secure stub
//
try {
checkStub(stub, rmiServerImplStubClass);
sslStub = true;
} catch (SecurityException e) {
sslStub = false;
}
}
/**
* Returns true if the underlying RMI registry is SSL-protected.
*
* @exception UnsupportedOperationException If this {@code ProxyClient}
* does not denote a JMX connector for a JMX VM agent.
*/
public boolean isSslRmiRegistry() {
// Check for VM connector
//
if (!isVmConnector()) {
throw new UnsupportedOperationException(
"ProxyClient.isSslRmiRegistry() is only supported if this " +
"ProxyClient is a JMX connector for a JMX VM agent");
}
return sslRegistry;
}
/**
* Returns true if the retrieved RMI stub is SSL-protected.
*
* @exception UnsupportedOperationException If this {@code ProxyClient}
* does not denote a JMX connector for a JMX VM agent.
*/
public boolean isSslRmiStub() {
// Check for VM connector
//
if (!isVmConnector()) {
throw new UnsupportedOperationException(
"ProxyClient.isSslRmiStub() is only supported if this " +
"ProxyClient is a JMX connector for a JMX VM agent");
}
return sslStub;
}
/**
* Returns true if this {@code ProxyClient} denotes
* a JMX connector for a JMX VM agent.
*/
public boolean isVmConnector() {
return vmConnector;
}
private void setConnectionState(ConnectionState state) {
ConnectionState oldState = this.connectionState;
this.connectionState = state;
propertyChangeSupport.firePropertyChange(CONNECTION_STATE_PROPERTY,
oldState, state);
}
public ConnectionState getConnectionState() {
return this.connectionState;
}
void flush() {
if (server != null) {
server.flush();
}
}
public void connect() {
setConnectionState(ConnectionState.CONNECTING);
try {
tryConnect();
setConnectionState(ConnectionState.CONNECTED);
} catch (Exception e) {
setConnectionState(ConnectionState.DISCONNECTED);
throw new IllegalStateException(e);
}
}
private void tryConnect() throws IOException {
if (jmxUrl == null && "localhost".equals(hostName) && port == 0) {
// Monitor self
this.jmxc = null;
this.mbsc = ManagementFactory.getPlatformMBeanServer();
this.server = Snapshot.newSnapshot(mbsc);
} else {
// Monitor another process
if (lvm != null) {
if (!lvm.isManageable()) {
lvm.startManagementAgent();
if (!lvm.isManageable()) {
// FIXME: what to throw
throw new IOException(lvm + "not manageable");
}
}
if (this.jmxUrl == null) {
this.jmxUrl = new JMXServiceURL(lvm.connectorAddress());
}
}
// Need to pass in credentials ?
if (userName == null && password == null) {
if (isVmConnector()) {
// Check for SSL config on reconnection only
if (stub == null) {
checkSslConfig();
}
this.jmxc = new RMIConnector(stub, null);
jmxc.connect();
} else {
this.jmxc = JMXConnectorFactory.connect(jmxUrl);
}
} else {
Map<String, String[]> env = new HashMap<String, String[]>();
env.put(JMXConnector.CREDENTIALS,
new String[] {userName, password});
if (isVmConnector()) {
// Check for SSL config on reconnection only
if (stub == null) {
checkSslConfig();
}
this.jmxc = new RMIConnector(stub, null);
jmxc.connect(env);
} else {
this.jmxc = JMXConnectorFactory.connect(jmxUrl, env);
}
}
this.mbsc = jmxc.getMBeanServerConnection();
this.server = Snapshot.newSnapshot(mbsc);
}
this.isDead = false;
try {
ObjectName on = new ObjectName(THREAD_MXBEAN_NAME);
this.hasPlatformMXBeans = server.isRegistered(on);
this.hasHotSpotDiagnosticMXBean =
server.isRegistered(new ObjectName(HOTSPOT_DIAGNOSTIC_MXBEAN_NAME));
// check if it has 6.0 new APIs
if (this.hasPlatformMXBeans) {
MBeanOperationInfo[] mopis = server.getMBeanInfo(on).getOperations();
// look for findDeadlockedThreads operations;
for (MBeanOperationInfo op : mopis) {
if (op.getName().equals("findDeadlockedThreads")) {
this.supportsLockUsage = true;
break;
}
}
on = new ObjectName(COMPILATION_MXBEAN_NAME);
this.hasCompilationMXBean = server.isRegistered(on);
}
} catch (MalformedObjectNameException e) {
// should not reach here
throw new InternalError(e.getMessage());
} catch (IntrospectionException e) {
InternalError ie = new InternalError(e.getMessage());
ie.initCause(e);
throw ie;
} catch (InstanceNotFoundException e) {
InternalError ie = new InternalError(e.getMessage());
ie.initCause(e);
throw ie;
} catch (ReflectionException e) {
InternalError ie = new InternalError(e.getMessage());
ie.initCause(e);
throw ie;
}
if (hasPlatformMXBeans) {
// WORKAROUND for bug 5056632
// Check if the access role is correct by getting a RuntimeMXBean
getRuntimeMXBean();
}
}
/**
* Gets a proxy client for a given local virtual machine.
*/
public static ProxyClient getProxyClient(LocalVirtualMachine lvm)
throws IOException {
final String key = getCacheKey(lvm);
ProxyClient proxyClient = cache.get(key);
if (proxyClient == null) {
proxyClient = new ProxyClient(lvm);
cache.put(key, proxyClient);
}
return proxyClient;
}
public static String getConnectionName(LocalVirtualMachine lvm) {
return Integer.toString(lvm.vmid());
}
private static String getCacheKey(LocalVirtualMachine lvm) {
return Integer.toString(lvm.vmid());
}
/**
* Gets a proxy client for a given JMXServiceURL.
*/
public static ProxyClient getProxyClient(String url,
String userName, String password)
throws IOException {
final String key = getCacheKey(url, userName, password);
ProxyClient proxyClient = cache.get(key);
if (proxyClient == null) {
proxyClient = new ProxyClient(url, userName, password);
cache.put(key, proxyClient);
}
return proxyClient;
}
public static String getConnectionName(String url,
String userName) {
if (userName != null && userName.length() > 0) {
return userName + "@" + url;
} else {
return url;
}
}
private static String getCacheKey(String url,
String userName, String password) {
return (url == null ? "" : url) + ":" +
(userName == null ? "" : userName) + ":" +
(password == null ? "" : password);
}
/**
* Gets a proxy client for a given "hostname:port".
*/
public static ProxyClient getProxyClient(String hostName, int port,
String userName, String password)
throws IOException {
final String key = getCacheKey(hostName, port, userName, password);
ProxyClient proxyClient = cache.get(key);
if (proxyClient == null) {
proxyClient = new ProxyClient(hostName, port, userName, password);
cache.put(key, proxyClient);
}
return cache.get(key);
}
public static String getConnectionName(String hostName, int port,
String userName) {
String name = hostName + ":" + port;
if (userName != null && userName.length() > 0) {
return userName + "@" + name;
} else {
return name;
}
}
private static String getCacheKey(String hostName, int port,
String userName, String password) {
return (hostName == null ? "" : hostName) + ":" +
port + ":" +
(userName == null ? "" : userName) + ":" +
(password == null ? "" : password);
}
public String connectionName() {
return connectionName;
}
public String getDisplayName() {
return displayName;
}
public String toString() {
if (!isConnected()) {
return null;
//return Resources.getText("ConnectionName (disconnected)", displayName);
} else {
return displayName;
}
}
public MBeanServerConnection getMBeanServerConnection() {
return mbsc;
}
public SnapshotMBeanServerConnection getSnapshotMBeanServerConnection() {
return server;
}
public String getUrl() {
return advancedUrl;
}
public String getHostName() {
return hostName;
}
public int getPort() {
return port;
}
public int getVmid() {
return (lvm != null) ? lvm.vmid() : 0;
}
public String getUserName() {
return userName;
}
public String getPassword() {
return password;
}
public void disconnect() {
// Reset remote stub
stub = null;
// Close MBeanServer connection
if (jmxc != null) {
try {
jmxc.close();
} catch (IOException e) {
// Ignore ???
}
}
// Reset platform MBean references
classLoadingMBean = null;
compilationMBean = null;
memoryMBean = null;
operatingSystemMBean = null;
runtimeMBean = null;
threadMBean = null;
sunOperatingSystemMXBean = null;
garbageCollectorMBeans = null;
memoryPoolProxies = null;
// Set connection state to DISCONNECTED
if (!isDead) {
isDead = true;
setConnectionState(ConnectionState.DISCONNECTED);
}
}
/**
* Returns the list of domains in which any MBean is
* currently registered.
*/
public String[] getDomains() throws IOException {
return server.getDomains();
}
/**
* Returns a map of MBeans with ObjectName as the key and MBeanInfo value
* of a given domain. If domain is <tt>null</tt>, all MBeans
* are returned. If no MBean found, an empty map is returned.
*
*/
public Map<ObjectName, MBeanInfo> getMBeans(String domain)
throws IOException {
ObjectName name = null;
if (domain != null) {
try {
name = new ObjectName(domain + ":*");
} catch (MalformedObjectNameException e) {
// should not reach here
assert(false);
}
}
Set mbeans = server.queryNames(name, null);
Map<ObjectName,MBeanInfo> result =
new HashMap<ObjectName,MBeanInfo>(mbeans.size());
Iterator iterator = mbeans.iterator();
while (iterator.hasNext()) {
Object object = iterator.next();
if (object instanceof ObjectName) {
ObjectName o = (ObjectName)object;
try {
MBeanInfo info = server.getMBeanInfo(o);
result.put(o, info);
} catch (IntrospectionException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
} catch (InstanceNotFoundException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
} catch (ReflectionException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
}
}
}
return result;
}
/**
* Returns a list of attributes of a named MBean.
*
*/
public AttributeList getAttributes(ObjectName name, String[] attributes)
throws IOException {
AttributeList list = null;
try {
list = server.getAttributes(name, attributes);
} catch (InstanceNotFoundException e) {
// TODO: A MBean may have been unregistered.
// need to set up listener to listen for MBeanServerNotification.
logger.error(e.getMessage(), e);
} catch (ReflectionException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
}
return list;
}
/**
* Set the value of a specific attribute of a named MBean.
*/
public void setAttribute(ObjectName name, Attribute attribute)
throws InvalidAttributeValueException,
MBeanException,
IOException {
try {
server.setAttribute(name, attribute);
} catch (InstanceNotFoundException e) {
// TODO: A MBean may have been unregistered.
logger.error(e.getMessage(), e);
} catch (AttributeNotFoundException e) {
throw new IllegalStateException(e);
} catch (ReflectionException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
}
}
/**
* Invokes an operation of a named MBean.
*
* @throws MBeanException Wraps an exception thrown by
* the MBean's invoked method.
*/
public Object invoke(ObjectName name, String operationName,
Object[] params, String[] signature)
throws IOException, MBeanException {
Object result = null;
try {
result = server.invoke(name, operationName, params, signature);
} catch (InstanceNotFoundException e) {
// TODO: A MBean may have been unregistered.
} catch (ReflectionException e) {
// TODO: should log the error
logger.error(e.getMessage(), e);
}
return result;
}
public synchronized ClassLoadingMXBean getClassLoadingMXBean() throws IOException {
if (hasPlatformMXBeans && classLoadingMBean == null) {
classLoadingMBean =
newPlatformMXBeanProxy(server, CLASS_LOADING_MXBEAN_NAME,
ClassLoadingMXBean.class);
}
return classLoadingMBean;
}
public synchronized CompilationMXBean getCompilationMXBean() throws IOException {
if (hasCompilationMXBean && compilationMBean == null) {
compilationMBean =
newPlatformMXBeanProxy(server, COMPILATION_MXBEAN_NAME,
CompilationMXBean.class);
}
return compilationMBean;
}
public Collection<MemoryPoolProxy> getMemoryPoolProxies()
throws IOException {
// TODO: How to deal with changes to the list??
if (memoryPoolProxies == null) {
ObjectName poolName = null;
try {
poolName = new ObjectName(MEMORY_POOL_MXBEAN_DOMAIN_TYPE + ",*");
} catch (MalformedObjectNameException e) {
// should not reach here
assert(false);
}
Set mbeans = server.queryNames(poolName, null);
if (mbeans != null) {
memoryPoolProxies = new ArrayList<MemoryPoolProxy>();
Iterator iterator = mbeans.iterator();
while (iterator.hasNext()) {
ObjectName objName = (ObjectName) iterator.next();
MemoryPoolProxy p = new MemoryPoolProxy(this, objName);
memoryPoolProxies.add(p);
}
}
}
return memoryPoolProxies;
}
public synchronized Collection<BufferPoolMXBean> getBufferPoolMXBeans()
throws IOException {
String oName = "java.nio:type=BufferPool";
// TODO: How to deal with changes to the list??
if (bufferPoolMXBeans == null) {
ObjectName bpName = null;
try {
bpName = new ObjectName(oName + ",*");
} catch (MalformedObjectNameException e) {
// should not reach here
assert(false);
}
Set mbeans = server.queryNames(bpName, null);
if (mbeans != null) {
bufferPoolMXBeans = new ArrayList<BufferPoolMXBean>();
Iterator iterator = mbeans.iterator();
while (iterator.hasNext()) {
ObjectName on = (ObjectName) iterator.next();
String name = oName +",name=" + on.getKeyProperty("name");
BufferPoolMXBean mBean =
newPlatformMXBeanProxy(server, name,
BufferPoolMXBean.class);
bufferPoolMXBeans.add(mBean);
}
}
}
return bufferPoolMXBeans;
}
public synchronized Collection<GarbageCollectorMXBean> getGarbageCollectorMXBeans()
throws IOException {
// TODO: How to deal with changes to the list??
if (garbageCollectorMBeans == null) {
ObjectName gcName = null;
try {
gcName = new ObjectName(GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE + ",*");
} catch (MalformedObjectNameException e) {
// should not reach here
assert(false);
}
Set mbeans = server.queryNames(gcName, null);
if (mbeans != null) {
garbageCollectorMBeans = new ArrayList<GarbageCollectorMXBean>();
Iterator iterator = mbeans.iterator();
while (iterator.hasNext()) {
ObjectName on = (ObjectName) iterator.next();
String name = GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE +
",name=" + on.getKeyProperty("name");
GarbageCollectorMXBean mBean =
newPlatformMXBeanProxy(server, name,
GarbageCollectorMXBean.class);
garbageCollectorMBeans.add(mBean);
}
}
}
return garbageCollectorMBeans;
}
public synchronized MemoryMXBean getMemoryMXBean() throws IOException {
if (hasPlatformMXBeans && memoryMBean == null) {
memoryMBean =
newPlatformMXBeanProxy(server, MEMORY_MXBEAN_NAME,
MemoryMXBean.class);
}
return memoryMBean;
}
public synchronized RuntimeMXBean getRuntimeMXBean() throws IOException {
if (hasPlatformMXBeans && runtimeMBean == null) {
runtimeMBean =
newPlatformMXBeanProxy(server, RUNTIME_MXBEAN_NAME,
RuntimeMXBean.class);
}
return runtimeMBean;
}
public synchronized ThreadMXBean getThreadMXBean() throws IOException {
if (hasPlatformMXBeans && threadMBean == null) {
threadMBean =
newPlatformMXBeanProxy(server, THREAD_MXBEAN_NAME,
ThreadMXBean.class);
}
return threadMBean;
}
public synchronized OperatingSystemMXBean getOperatingSystemMXBean() throws IOException {
if (hasPlatformMXBeans && operatingSystemMBean == null) {
operatingSystemMBean =
newPlatformMXBeanProxy(server, OPERATING_SYSTEM_MXBEAN_NAME,
OperatingSystemMXBean.class);
}
return operatingSystemMBean;
}
public synchronized com.sun.management.OperatingSystemMXBean
getSunOperatingSystemMXBean() throws IOException {
try {
ObjectName on = new ObjectName(OPERATING_SYSTEM_MXBEAN_NAME);
if (sunOperatingSystemMXBean == null) {
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | true |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/JConsoleContext.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/JConsoleContext.java | /*
* %W% %E%
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.stephan.tof.jmxmon.jmxutil;
import java.beans.PropertyChangeListener;
import javax.management.MBeanServerConnection;
/**
* {@code JConsoleContext} represents a JConsole connection to a target
* application.
* <p>
* {@code JConsoleContext} notifies any {@code PropertyChangeListeners}
* about the {@linkplain #CONNECTION_STATE_PROPERTY <i>ConnectionState</i>}
* property change to {@link ConnectionState#CONNECTED CONNECTED} and
* {@link ConnectionState#DISCONNECTED DISCONNECTED}.
* The {@code JConsoleContext} instance will be the source for
* any generated events.
* <p>
*
* @since 1.6
*/
public interface JConsoleContext {
/**
* The {@link ConnectionState ConnectionState} bound property name.
*/
public static String CONNECTION_STATE_PROPERTY = "connectionState";
/**
* Values for the {@linkplain #CONNECTION_STATE_PROPERTY
* <i>ConnectionState</i>} bound property.
*/
public enum ConnectionState {
/**
* The connection has been successfully established.
*/
CONNECTED,
/**
* No connection present.
*/
DISCONNECTED,
/**
* The connection is being attempted.
*/
CONNECTING
}
/**
* Returns the {@link MBeanServerConnection MBeanServerConnection} for the
* connection to an application. The returned
* {@code MBeanServerConnection} object becomes invalid when
* the connection state is changed to the
* {@link ConnectionState#DISCONNECTED DISCONNECTED} state.
*
* @return the {@code MBeanServerConnection} for the
* connection to an application.
*/
public MBeanServerConnection getMBeanServerConnection();
/**
* Returns the current connection state.
* @return the current connection state.
*/
public ConnectionState getConnectionState();
/**
* Add a {@link java.beans.PropertyChangeListener PropertyChangeListener}
* to the listener list.
* The listener is registered for all properties.
* The same listener object may be added more than once, and will be called
* as many times as it is added.
* If {@code listener} is {@code null}, no exception is thrown and
* no action is taken.
*
* @param listener The {@code PropertyChangeListener} to be added
*/
public void addPropertyChangeListener(PropertyChangeListener listener);
/**
* Removes a {@link java.beans.PropertyChangeListener PropertyChangeListener}
* from the listener list. This
* removes a {@code PropertyChangeListener} that was registered for all
* properties. If {@code listener} was added more than once to the same
* event source, it will be notified one less time after being removed. If
* {@code listener} is {@code null}, or was never added, no exception is
* thrown and no action is taken.
*
* @param listener the {@code PropertyChangeListener} to be removed
*/
public void removePropertyChangeListener(PropertyChangeListener listener);
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
toomanyopenfiles/jmxmon | https://github.com/toomanyopenfiles/jmxmon/blob/75c885a32dd706aeeb30547cb3d6bfb9fedf33f4/src/main/java/com/stephan/tof/jmxmon/jmxutil/MemoryPoolProxy.java | src/main/java/com/stephan/tof/jmxmon/jmxutil/MemoryPoolProxy.java | /*
* %W% %E%
*
* Copyright (c) 2006, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*/
package com.stephan.tof.jmxmon.jmxutil;
import static java.lang.management.ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE;
import java.lang.management.MemoryPoolMXBean;
import java.lang.management.MemoryUsage;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.management.ObjectName;
import com.sun.management.GarbageCollectorMXBean;
import com.sun.management.GcInfo;
public class MemoryPoolProxy {
private String poolName;
private ProxyClient client;
private ObjectName objName;
private MemoryPoolMXBean pool;
private Map<ObjectName,Long> gcMBeans;
private GcInfo lastGcInfo;
public MemoryPoolProxy(ProxyClient client, ObjectName poolName) throws java.io.IOException {
this.client = client;
this.objName = objName;
this.pool = client.getMXBean(poolName, MemoryPoolMXBean.class);
this.poolName = this.pool.getName();
this.gcMBeans = new HashMap<ObjectName,Long>();
this.lastGcInfo = null;
String[] mgrNames = pool.getMemoryManagerNames();
for (String name : mgrNames) {
try {
ObjectName mbeanName = new ObjectName(GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE +
",name=" + name);
if (client.isRegistered(mbeanName)) {
gcMBeans.put(mbeanName, new Long(0));
}
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
}
public boolean isCollectedMemoryPool() {
return (gcMBeans.size() != 0);
}
public ObjectName getObjectName() {
return objName;
}
public MemoryPoolStat getStat() throws java.io.IOException {
long usageThreshold = (pool.isUsageThresholdSupported()
? pool.getUsageThreshold()
: -1);
long collectThreshold = (pool.isCollectionUsageThresholdSupported()
? pool.getCollectionUsageThreshold()
: -1);
long lastGcStartTime = 0;
long lastGcEndTime = 0;
MemoryUsage beforeGcUsage = null;
MemoryUsage afterGcUsage = null;
long gcId = 0;
if (lastGcInfo != null) {
gcId = lastGcInfo.getId();
lastGcStartTime = lastGcInfo.getStartTime();
lastGcEndTime = lastGcInfo.getEndTime();
beforeGcUsage = lastGcInfo.getMemoryUsageBeforeGc().get(poolName);
afterGcUsage = lastGcInfo.getMemoryUsageAfterGc().get(poolName);
}
Set<Map.Entry<ObjectName,Long>> set = gcMBeans.entrySet();
for (Map.Entry<ObjectName,Long> e : set) {
GarbageCollectorMXBean gc =
client.getMXBean(e.getKey(),
com.sun.management.GarbageCollectorMXBean.class);
Long gcCount = e.getValue();
Long newCount = gc.getCollectionCount();
if (newCount > gcCount) {
gcMBeans.put(e.getKey(), new Long(newCount));
lastGcInfo = gc.getLastGcInfo();
if (lastGcInfo.getEndTime() > lastGcEndTime) {
gcId = lastGcInfo.getId();
lastGcStartTime = lastGcInfo.getStartTime();
lastGcEndTime = lastGcInfo.getEndTime();
beforeGcUsage = lastGcInfo.getMemoryUsageBeforeGc().get(poolName);
afterGcUsage = lastGcInfo.getMemoryUsageAfterGc().get(poolName);
assert(beforeGcUsage != null);
assert(afterGcUsage != null);
}
}
}
MemoryUsage usage = pool.getUsage();
return new MemoryPoolStat(poolName,
usageThreshold,
usage,
gcId,
lastGcStartTime,
lastGcEndTime,
collectThreshold,
beforeGcUsage,
afterGcUsage);
}
}
| java | Apache-2.0 | 75c885a32dd706aeeb30547cb3d6bfb9fedf33f4 | 2026-01-05T02:41:45.155873Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/test/java/com/pinterest/memq/commons/protocol/TestTopicMetadataPacket.java | memq-commons/src/test/java/com/pinterest/memq/commons/protocol/TestTopicMetadataPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.HashSet;
import java.util.Properties;
import org.junit.Test;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
public class TestTopicMetadataPacket {
@Test
public void testTopicMetadataRequest() throws IOException {
TopicMetadataRequestPacket request = new TopicMetadataRequestPacket("test");
ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer();
request.write(buf, RequestType.PROTOCOL_VERSION);
request = new TopicMetadataRequestPacket();
request.readFields(buf, RequestType.PROTOCOL_VERSION);
assertEquals("test", request.getTopic());
}
@Test
public void testTopicMetadataResponse() throws IOException {
Properties storageProperties = new Properties();
storageProperties.setProperty("prop1", "xyz");
storageProperties.setProperty("prop2", String.valueOf(212));
TopicMetadata md = new TopicMetadata("test23", "delayeddevnull", storageProperties);
assertEquals(0, md.getWriteBrokers().size());
md.getWriteBrokers().add(new Broker("127.0.0.1", (short) 9092, "2xl", "us-east-1a",
BrokerType.WRITE, new HashSet<>()));
TopicMetadataResponsePacket response = new TopicMetadataResponsePacket(md);
ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer();
response.write(buf, RequestType.PROTOCOL_VERSION);
response = new TopicMetadataResponsePacket();
response.readFields(buf, RequestType.PROTOCOL_VERSION);
TopicMetadata metadata = response.getMetadata();
assertEquals("test23", metadata.getTopicName());
assertEquals("delayeddevnull", metadata.getStorageHandlerName());
assertEquals(2, metadata.getStorageHandlerConfig().size());
assertEquals(1, metadata.getWriteBrokers().size());
md = new TopicMetadata("test23", "delayeddevnull", new Properties());
buf = PooledByteBufAllocator.DEFAULT.buffer();
response.write(buf, RequestType.PROTOCOL_VERSION);
response = new TopicMetadataResponsePacket();
response.readFields(buf, RequestType.PROTOCOL_VERSION);
metadata = response.getMetadata();
assertEquals("test23", metadata.getTopicName());
assertEquals("delayeddevnull", metadata.getStorageHandlerName());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/MemqLogMessage.java | memq-commons/src/main/java/com/pinterest/memq/commons/MemqLogMessage.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons;
import java.util.Map;
public class MemqLogMessage<K, V> {
public static final String INTERNAL_FIELD_TOPIC = "topic";
public static final String INTERNAL_FIELD_WRITE_TIMESTAMP = "wts";
public static final String INTERNAL_FIELD_NOTIFICATION_PARTITION_ID = "npi";
public static final String INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP = "nrts";
public static final String INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET = "npo";
public static final String INTERNAL_FIELD_OBJECT_SIZE = "objectSize";
public static final String INTERNAL_FIELD_MESSAGE_OFFSET = "mo";
private final K key;
private final V value;
private final MessageId messageId;
private final Map<String, byte[]> headers;
private long writeTimestamp;
private long messageOffsetInBatch;
private int notificationPartitionId;
private long notificationPartitionOffset;
private long notificationReadTimestamp;
private final boolean endOfBatch;
public MemqLogMessage(MessageId messageId, Map<String, byte[]> headers, K key, V value, boolean endOfBatch) {
this.messageId = messageId;
this.headers = headers;
this.value = value;
this.key = key;
this.endOfBatch = endOfBatch;
}
public MessageId getMessageId() {
return messageId;
}
public Map<String, byte[]> getHeaders() {
return headers;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public long getWriteTimestamp() {
return writeTimestamp;
}
public void setWriteTimestamp(long writeTimestamp) {
this.writeTimestamp = writeTimestamp;
}
public long getMessageOffsetInBatch() {
return messageOffsetInBatch;
}
public void setMessageOffsetInBatch(long messageOffsetInBatch) {
this.messageOffsetInBatch = messageOffsetInBatch;
}
public int getNotificationPartitionId() {
return notificationPartitionId;
}
public void setNotificationPartitionId(int notificationPartitionId) {
this.notificationPartitionId = notificationPartitionId;
}
public long getNotificationPartitionOffset() {
return notificationPartitionOffset;
}
public void setNotificationPartitionOffset(long notificationPartitionOffset) {
this.notificationPartitionOffset = notificationPartitionOffset;
}
public long getNotificationReadTimestamp() {
return notificationReadTimestamp;
}
public void setNotificationReadTimestamp(long notificationReadTimestamp) {
this.notificationReadTimestamp = notificationReadTimestamp;
}
public boolean isEndOfBatch() {
return endOfBatch;
}
@Override
public String toString() {
return "[key=" + key + ", value=" + value + ", messageId=" + messageId + ", headers=" + headers
+ ", writeTimestamp=" + writeTimestamp + ", messageOffsetInBatch=" + messageOffsetInBatch
+ ", notificationPartitionId=" + notificationPartitionId + ", notificationPartitionOffset="
+ notificationPartitionOffset + ", notificationReadTimestamp=" + notificationReadTimestamp
+ "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/MessageId.java | memq-commons/src/main/java/com/pinterest/memq/commons/MessageId.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons;
public class MessageId {
protected byte[] array;
public MessageId(byte[] array) {
this.array = array;
}
public byte[] toByteArray() {
return array;
}
public void fromByteArray(byte[] array) {
this.array = array;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/CloseableIterator.java | memq-commons/src/main/java/com/pinterest/memq/commons/CloseableIterator.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons;
import java.io.Closeable;
import java.util.Iterator;
public interface CloseableIterator<T> extends Iterator<T>, Closeable {
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/BatchHeader.java | memq-commons/src/main/java/com/pinterest/memq/commons/BatchHeader.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.logging.Logger;
import com.pinterest.memq.commons.protocol.Packet;
import io.netty.buffer.ByteBuf;
/**
* Header is used for improving speed of seeks in a batch.
*
* This is used to maintain a SortedMap of Relative Id to IndexEntry where
* {@link IndexEntry} contains the offset and size of the Message.
*
* This functionality is usually used by any consumer that looking to perform a
* peek style lookup of data. e.g. check {@link MemqLogMessage} at specific
* position.
*/
public class BatchHeader {
private static final Logger logger = Logger.getLogger(BatchHeader.class.getName());
private SortedMap<Integer, IndexEntry> messageIndex;
private int headerLength;
public BatchHeader(DataInputStream stream) throws IOException {
messageIndex = new TreeMap<>();
headerLength = stream.readInt();
int numIndexEntries = stream.readInt();// number of entries
logger.fine(() -> "header len: " + headerLength + ", num entries: " + numIndexEntries);
for (int i = 0; i < numIndexEntries; i++) {
int idx = stream.readInt();
int offset = stream.readInt();
int size = stream.readInt();
messageIndex.put(idx, new IndexEntry(offset, size));
}
}
public ByteBuf writeHeaderToByteBuf(ByteBuf buf) {
buf.writeInt(headerLength);
buf.writeInt(messageIndex.size());
for (Entry<Integer, IndexEntry> entry : messageIndex.entrySet()) {
buf.writeInt(entry.getKey());
buf.writeInt(entry.getValue().getOffset());
buf.writeInt(entry.getValue().getSize());
}
return buf;
}
public SortedMap<Integer, IndexEntry> getMessageIndex() {
return messageIndex;
}
@Override
public String toString() {
return "BatchHeader [messageIndex=" + messageIndex + "]";
}
/**
* Used by {@link BatchHeader} to locate the byte offset (seek position) and
* length of the Message in Batch so seeks and access can be performed in an
* optimal fashion.
*/
public static class IndexEntry implements Packet {
private int offset;
private int size;
public IndexEntry() {
}
public IndexEntry(int offset, int size) {
this.offset = offset;
this.size = size;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
offset = buf.readInt();
size = buf.readInt();
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
buf.writeInt(offset);
buf.writeInt(size);
}
@Override
public int getSize(short protocolVersion) {
return Integer.BYTES * 2;
}
public int getOffset() {
return offset;
}
public void setOffset(int offset) {
this.offset = offset;
}
public int getSize() {
return size;
}
public void setSize(int size) {
this.size = size;
}
@Override
public String toString() {
return "IndexEntry [offset=" + offset + ", size=" + size + "]";
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/mon/OpenTSDBReporter.java | memq-commons/src/main/java/com/pinterest/memq/commons/mon/OpenTSDBReporter.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.mon;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import com.pinterest.memq.commons.mon.OpenTSDBClient.MetricsBuffer;
public class OpenTSDBReporter extends ScheduledReporter {
private static final Logger logger = Logger.getLogger(OpenTSDBClient.class.getName());
private OpenTSDBClient client;
private String[] tags;
private String baseName;
private static final int RETRY_COUNT = 3;
protected OpenTSDBReporter(String baseName,
MetricRegistry registry,
String registryName,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
OpenTSDBClient client,
String localHostAddress,
Map<String, Object> tags) throws UnknownHostException {
super(registry, registryName, filter, rateUnit, durationUnit);
if (baseName == null || baseName.isEmpty()) {
this.baseName = "";
} else {
this.baseName = baseName + ".";
}
this.tags = new String[tags.size() + 1];
this.tags[tags.size()] = "host=" + localHostAddress;
int i = 0;
for (Map.Entry<String, Object> e : tags.entrySet()) {
this.tags[i] = e.getKey() + "=" + e.getValue().toString();
i++;
}
this.client = client;
}
public static ScheduledReporter createReporter(String baseName,
MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
OpenTSDBClient client,
String localHostAddress) throws UnknownHostException {
return new OpenTSDBReporter(baseName, registry, name, filter, rateUnit, durationUnit, client,
localHostAddress, Collections.emptyMap());
}
public static ScheduledReporter createReporterWithTags(String baseName,
MetricRegistry registry,
String name,
MetricFilter filter,
TimeUnit rateUnit,
TimeUnit durationUnit,
OpenTSDBClient client,
String localHostAddress,
Map<String, Object> tags) throws UnknownHostException {
return new OpenTSDBReporter(baseName, registry, name, filter, rateUnit, durationUnit, client,
localHostAddress, tags);
}
@Override
public void report(@SuppressWarnings("rawtypes") SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
try {
int epochSecs = (int) (System.currentTimeMillis() / 1000);
MetricsBuffer buffer = new MetricsBuffer("memq." + baseName);
for (Entry<String, Counter> entry : counters.entrySet()) {
buffer.addMetric(entry.getKey(), epochSecs, entry.getValue().getCount(), tags);
}
for (Entry<String, Meter> entry : meters.entrySet()) {
buffer.addMetric(entry.getKey(), epochSecs, entry.getValue().getCount(), tags);
}
for (Entry<String, Histogram> entry : histograms.entrySet()) {
Snapshot snapshot = entry.getValue().getSnapshot();
buffer.addMetric(entry.getKey() + ".avg", epochSecs, snapshot.getMean(), tags);
buffer.addMetric(entry.getKey() + ".min", epochSecs, snapshot.getMin(), tags);
buffer.addMetric(entry.getKey() + ".median", epochSecs, snapshot.getMedian(), tags);
buffer.addMetric(entry.getKey() + ".p50", epochSecs, snapshot.getMedian(), tags);
buffer.addMetric(entry.getKey() + ".p75", epochSecs, snapshot.get75thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p95", epochSecs, snapshot.get95thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p98", epochSecs, snapshot.get98thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p99", epochSecs, snapshot.get99thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p999", epochSecs, snapshot.get999thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".max", epochSecs, snapshot.getMax(), tags);
buffer.addMetric(entry.getKey() + ".stddev", epochSecs, snapshot.getStdDev(), tags);
}
for (Entry<String, Timer> entry : timers.entrySet()) {
Snapshot snapshot = entry.getValue().getSnapshot();
buffer.addMetric(entry.getKey() + ".avg", epochSecs, snapshot.getMean(), tags);
buffer.addMetric(entry.getKey() + ".min", epochSecs, snapshot.getMin(), tags);
buffer.addMetric(entry.getKey() + ".median", epochSecs, snapshot.getMedian(), tags);
buffer.addMetric(entry.getKey() + ".p50", epochSecs, snapshot.getMedian(), tags);
buffer.addMetric(entry.getKey() + ".p75", epochSecs, snapshot.get75thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p95", epochSecs, snapshot.get95thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p98", epochSecs, snapshot.get98thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p99", epochSecs, snapshot.get99thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".p999", epochSecs, snapshot.get999thPercentile(), tags);
buffer.addMetric(entry.getKey() + ".max", epochSecs, snapshot.getMax(), tags);
buffer.addMetric(entry.getKey() + ".stddev", epochSecs, snapshot.getStdDev(), tags);
}
for (@SuppressWarnings("rawtypes")
Entry<String, Gauge> entry : gauges.entrySet()) {
if (entry.getValue().getValue() instanceof Long) {
buffer.addMetric(entry.getKey(), epochSecs, (Long) entry.getValue().getValue(), tags);
} else if (entry.getValue().getValue() instanceof Double) {
buffer.addMetric(entry.getKey(), epochSecs, (Double) entry.getValue().getValue(), tags);
} else {
String val = entry.getValue().getValue().toString();
if (!val.contains("[")) {
buffer.addMetric(entry.getKey(), epochSecs, Double.parseDouble(val), tags);
}
}
}
int retryCounter = RETRY_COUNT;
while (retryCounter > 0) {
try {
client.sendMetrics(buffer);
break;
} catch (Exception ex) {
if (retryCounter == 1) {
logger.log(Level.SEVERE, "Failed to send metrics to OpenTSDB after " + RETRY_COUNT + " retries", ex);
throw ex;
}
retryCounter--;
}
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Failed to write metrics to opentsdb", e);
}
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/mon/OpenTSDBClient.java | memq-commons/src/main/java/com/pinterest/memq/commons/mon/OpenTSDBClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.mon;
import com.google.common.base.Joiner;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.UnknownHostException;
import java.util.logging.Level;
import java.util.logging.Logger;
/*
* A client library for sending metrics to an OpenTSDB server.
*
* The API for sending data to OpenTSDB is documented at:
*
* http://opentsdb.net/docs/build/html/user_guide/writing.html
*
* put <metric> <timestamp> <value> <tagk1=tagv1[ tagk2=tagv2 ...tagkN=tagvN]>
*
*/
public class OpenTSDBClient {
private static final Logger LOG = Logger.getLogger(OpenTSDBClient.class.getName());
private final SocketAddress address;
public static final int CONNECT_TIMEOUT_MS = 1000;
public static final class MetricsBuffer {
private String prefix;
public MetricsBuffer(String prefix) {
this.prefix = prefix;
this.buffer = new StringBuilder();
}
/**
* Add a single metric to the buffer.
*
* @param name the name of the metric, like "foo.bar.sprockets".
* @param epochSecs the UNIX epoch time in seconds.
* @param value the value of the metric at this epoch time.
* @param tags a list of one or more tags, each of which must be formatted
* as "name=value".
*/
public void addMetric(String name, int epochSecs, float value, String... tags) {
addMetric(name, epochSecs, value, SPACE_JOINER.join(tags));
}
public void addMetric(String name, int epochSecs, Double value, String... tags) {
addMetric(name, epochSecs, value.floatValue(), tags);
}
public void addMetric(String name, int epochSecs, float value, String tags) {
buffer.append("put " + prefix).append(name).append(" ").append(epochSecs).append(" ")
.append(value).append(" ").append(tags).append("\n");
}
/**
* Reset the metrics buffer for reuse, this discards all previous data.
*/
public void reset() {
buffer.setLength(0);
}
@Override
public String toString() {
return buffer.toString();
}
private final StringBuilder buffer;
private static final Joiner SPACE_JOINER = Joiner.on(" ");
}
public static class OpenTsdbClientException extends Exception {
private static final long serialVersionUID = 1L;
public OpenTsdbClientException(Throwable causedBy) {
super(causedBy);
}
}
public static final class ConnectionFailedException extends OpenTsdbClientException {
private static final long serialVersionUID = 1L;
public ConnectionFailedException(Throwable causedBy) {
super(causedBy);
}
}
public static final class SendFailedException extends OpenTsdbClientException {
private static final long serialVersionUID = 1L;
public SendFailedException(Throwable causedBy) {
super(causedBy);
}
}
public OpenTSDBClient(String host, int port) throws UnknownHostException {
InetAddress address = InetAddress.getByName(host);
this.address = new InetSocketAddress(address, port);
}
public void sendMetrics(MetricsBuffer buffer) throws ConnectionFailedException,
SendFailedException {
Socket socket = null;
try {
try {
socket = new Socket();
socket.connect(address, CONNECT_TIMEOUT_MS);
} catch (IOException ioex) {
throw new ConnectionFailedException(ioex);
}
try {
// There is no way to set a time out for blocking send calls. Thanks Java!
socket.getOutputStream().write(buffer.toString().getBytes());
} catch (IOException ioex) {
throw new SendFailedException(ioex);
}
} finally {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
LOG.log(Level.WARNING, "Failed to close socket to OpenTSDB", e);
}
}
}
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/config/SSLConfig.java | memq-commons/src/main/java/com/pinterest/memq/commons/config/SSLConfig.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.config;
import java.util.Collections;
import java.util.List;
/**
* listeners=PLAINTEXT://:9092,SSL://:9093
* security.inter.broker.protocol=PLAINTEXT ssl.client.auth=required
* ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
* ssl.endpoint.identification.algorithm=HTTPS ssl.key.password=pintastic
* ssl.keystore.location=/var/lib/normandie/fuse/jks/generic
* ssl.keystore.password=pintastic ssl.keystore.type=JKS
* ssl.secure.random.implementation=SHA1PRNG
* ssl.truststore.location=/var/lib/normandie/fuse/jkstrust/generic
* ssl.truststore.password=pintastic ssl.truststore.type=JKS
* authorizer.class.name=com.pinterest.commons.kafka.authorizers.PastisAuthorizer
* kafka.authorizer.pastis_policy=kafka
*/
public class SSLConfig {
private String keystorePath;
private String keystoreType;
private String keystorePassword;
private String truststorePath;
private String truststoreType;
private String truststorePassword;
private List<String> protocols = Collections.singletonList("TLSv1.2");
public String getKeystorePath() {
return keystorePath;
}
public void setKeystorePath(String keystorePath) {
this.keystorePath = keystorePath;
}
public String getKeystoreType() {
return keystoreType;
}
public void setKeystoreType(String keystoreType) {
this.keystoreType = keystoreType;
}
public String getKeystorePassword() {
return keystorePassword;
}
public void setKeystorePassword(String keystorePassword) {
this.keystorePassword = keystorePassword;
}
public String getTruststorePath() {
return truststorePath;
}
public void setTruststorePath(String truststorePath) {
this.truststorePath = truststorePath;
}
public String getTruststoreType() {
return truststoreType;
}
public void setTruststoreType(String truststoreType) {
this.truststoreType = truststoreType;
}
public String getTruststorePassword() {
return truststorePassword;
}
public void setTruststorePassword(String truststorePassword) {
this.truststorePassword = truststorePassword;
}
public List<String> getProtocols() {
return protocols;
}
public void setProtocols(List<String> protocols) {
this.protocols = protocols;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TransportPacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TransportPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
public abstract class TransportPacket implements Packet {
protected short protocolVersion;
protected long clientRequestId;
protected RequestType requestType;
public TransportPacket() {
}
public TransportPacket(short protocolVersion, long clientRequestId,
RequestType requestType) {
this.protocolVersion = protocolVersion;
this.clientRequestId = clientRequestId;
this.requestType = requestType;
}
public short getProtocolVersion() {
return protocolVersion;
}
public void setProtocolVersion(short protocolVersion) {
this.protocolVersion = protocolVersion;
}
public long getClientRequestId() {
return clientRequestId;
}
public void setClientRequestId(long clientRequestId) {
this.clientRequestId = clientRequestId;
}
public RequestType getRequestType() {
return requestType;
}
public void setRequestType(RequestType requestType) {
this.requestType = requestType;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadataRequestPacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadataRequestPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class TopicMetadataRequestPacket implements Packet {
private String topic;
public TopicMetadataRequestPacket() {
}
public TopicMetadataRequestPacket(String topic) {
this.topic = topic;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
topic = ProtocolUtils.readStringWithTwoByteEncoding(buf);
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
ProtocolUtils.writeStringWithTwoByteEncoding(buf, topic);
}
@Override
public int getSize(short protocolVersion) {
return ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(topic);
}
public String getTopic() {
return topic;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/WriteResponsePacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/WriteResponsePacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import io.netty.buffer.ByteBuf;
public class WriteResponsePacket implements Packet {
@Override
public void readFields(ByteBuf buf, short protocolVersion) {
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
}
@Override
public int getSize(short protocolVersion) {
return 0;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ClusterMetadataResponse.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ClusterMetadataResponse.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import io.netty.buffer.ByteBuf;
public class ClusterMetadataResponse implements Packet {
private Map<String, TopicMetadata> topicMetadataMap = new HashMap<>();
public ClusterMetadataResponse() {
}
public ClusterMetadataResponse(Map<String, TopicMetadata> topicMetadataMap) {
this.topicMetadataMap = topicMetadataMap;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
short topicCount = buf.readShort();
for (int i = 0; i < topicCount; i++) {
String topicName = ProtocolUtils.readStringWithTwoByteEncoding(buf);
TopicMetadata md = new TopicMetadata();
md.readFields(buf, protocolVersion);
topicMetadataMap.put(topicName, md);
}
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
}
@Override
public int getSize(short protocolVersion) {
return topicMetadataMap.entrySet().stream()
.mapToInt(e -> ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(e.getKey())
+ e.getValue().getSize(protocolVersion))
.sum();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ReadRequestPacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ReadRequestPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.Arrays;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import io.netty.buffer.ByteBuf;
public class ReadRequestPacket implements Packet {
public static final int DISABLE_READ_AT_INDEX = -1;
private static final Gson gson = new Gson();
protected byte[] topicName;
protected byte[] notification;
// for header requests only
protected boolean readHeaderOnly;
protected IndexEntry readIndex = new IndexEntry(DISABLE_READ_AT_INDEX, DISABLE_READ_AT_INDEX);
public ReadRequestPacket() {
}
public ReadRequestPacket(String topicName, JsonObject notification) {
this.topicName = topicName.getBytes(Charset.forName("utf-8"));
this.notification = gson.toJson(notification).getBytes(Charset.forName("utf-8"));
}
public ReadRequestPacket(String topicName, JsonObject notification, boolean readHeaderOnly) {
this(topicName, notification);
this.readHeaderOnly = readHeaderOnly;
}
public ReadRequestPacket(String topicName,
JsonObject notification,
boolean readHeaderOnly,
IndexEntry readIndex) {
this(topicName, notification, readHeaderOnly);
this.readIndex = readIndex;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
short topicNameLength = buf.readShort();
topicName = new byte[topicNameLength];
buf.readBytes(topicName);
short notificationLength = buf.readShort();
notification = new byte[notificationLength];
buf.readBytes(notification);
readHeaderOnly = buf.readBoolean();
readIndex = new IndexEntry();
readIndex.readFields(buf, protocolVersion);
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
buf.writeShort(topicName.length);
buf.writeBytes(topicName);
buf.writeShort(notification.length);
buf.writeBytes(notification);
buf.writeBoolean(readHeaderOnly);
readIndex.write(buf, protocolVersion);
}
@Override
public int getSize(short protocolVersion) {
return Short.BYTES + topicName.length + Short.BYTES + notification.length + 1
+ readIndex.getSize(protocolVersion);
}
public String getTopicName() {
return new String(topicName, Charset.forName("utf-8"));
}
public void setTopicName(String topicName) {
this.topicName = topicName.getBytes(Charset.forName("utf-8"));
}
public boolean isReadHeaderOnly() {
return readHeaderOnly;
}
public JsonObject getNotification() {
return gson.fromJson(new String(notification, Charset.forName("utf-8")), JsonObject.class);
}
public void setNotification(JsonObject notification) {
this.notification = notification.toString().getBytes(Charset.forName("utf-8"));
}
public IndexEntry getReadIndex() {
return readIndex;
}
@Override
public String toString() {
return "ReadRequestPacket [topicName=" + Arrays.toString(topicName) + ", notification="
+ Arrays.toString(notification) + ", readHeaderOnly=" + readHeaderOnly + ", readIndex="
+ readIndex + "]";
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/RequestPacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/RequestPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class RequestPacket extends TransportPacket {
private Packet payload;
private ByteBuf preAllocOutBuf;
public RequestPacket() {
}
public RequestPacket(short protocolVersion,
long clientRequestId,
RequestType requestType,
Packet payload) {
super(protocolVersion, clientRequestId, requestType);
this.payload = payload;
}
public static int getHeaderSize() {
return Integer.BYTES + Short.BYTES + Long.BYTES + Byte.BYTES;
}
@Override
public void readFields(ByteBuf inBuffer, short pv) throws IOException {
inBuffer.readInt();
this.protocolVersion = inBuffer.readShort();
clientRequestId = inBuffer.readLong();
requestType = RequestType.extractPacketType(inBuffer);
payload = requestType.requestImplementationSupplier.get();
payload.readFields(inBuffer, this.protocolVersion);
}
@Override
public void write(ByteBuf outBuf, short protocolVersion) {
writeHeader(outBuf, protocolVersion);
payload.write(outBuf, protocolVersion);
}
public void setPreAllocOutBuf(ByteBuf preAllocOutBuf) {
this.preAllocOutBuf = preAllocOutBuf;
}
public ByteBuf getPreAllocOutBuf() {
return preAllocOutBuf;
}
@Override
public int getSize(short protocolVersion) {
return Short.BYTES + Long.BYTES + Byte.BYTES + payload.getSize(protocolVersion);
}
@Override
public void writeHeader(ByteBuf headerBuf, short protocolVersion) {
headerBuf.writeInt(getSize(protocolVersion));
headerBuf.writeShort(protocolVersion);
headerBuf.writeLong(clientRequestId);
headerBuf.writeByte(requestType.ordinal());
}
public Packet getPayload() {
return payload;
}
public void setPayload(Packet payload) {
this.payload = payload;
}
public RequestPacket retry() {
preAllocOutBuf.retain();
return this;
}
@Override
public void release() throws IOException {
if (preAllocOutBuf != null) {
preAllocOutBuf.release();
} else {
payload.release();
}
super.release();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ProtocolUtils.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ProtocolUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import io.netty.buffer.ByteBuf;
public class ProtocolUtils {
public static String readStringWithTwoByteEncoding(ByteBuf buf) {
short length = buf.readShort();
byte[] str = new byte[length];
buf.readBytes(str);
return new String(str);
}
public static void writeStringWithTwoByteEncoding(ByteBuf buf, String str) {
if (str == null || str.isEmpty()) {
buf.writeShort(0);
} else {
byte[] bytes = str.getBytes();
buf.writeShort(bytes.length);
buf.writeBytes(bytes);
}
}
public static int getStringSerializedSizeWithTwoByteEncoding(String str) {
return (str == null || str.isEmpty() ? 0 : (str.getBytes().length)) + Short.BYTES;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadata.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadata.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import io.netty.buffer.ByteBuf;
public class TopicMetadata implements Packet {
private String topicName;
private Set<Broker> writeBrokers = new HashSet<>();
private Set<Broker> readBrokers = new HashSet<>();
private String storageHandlerName;
private Properties storageHandlerConfig = new Properties();
public TopicMetadata() {
}
public TopicMetadata(String topicName,
String storageHandlerName,
Properties storageHandlerConfig) {
this.topicName = topicName;
this.storageHandlerName = storageHandlerName;
this.storageHandlerConfig = storageHandlerConfig;
}
public TopicMetadata(TopicConfig config) {
this.topicName = config.getTopic();
this.storageHandlerName = config.getStorageHandlerName();
this.storageHandlerConfig = config.getStorageHandlerConfig();
}
public TopicMetadata(String topicName,
Set<Broker> writeBrokers,
Set<Broker> readBrokers,
String storageHandlerName,
Properties storageHandlerConfig) {
this.topicName = topicName;
this.writeBrokers = writeBrokers;
this.readBrokers = readBrokers;
this.storageHandlerName = storageHandlerName;
this.storageHandlerConfig = storageHandlerConfig;
}
public String getTopicName() {
return topicName;
}
public void setTopicName(String topicName) {
this.topicName = topicName;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
topicName = ProtocolUtils.readStringWithTwoByteEncoding(buf);
short brokerCount = buf.readShort();
for (int i = 0; i < brokerCount; i++) {
Broker broker = new Broker();
broker.readFields(buf, protocolVersion);
writeBrokers.add(broker);
}
storageHandlerName = ProtocolUtils.readStringWithTwoByteEncoding(buf);
short numProperties = buf.readShort();
for (int i = 0; i < numProperties; i++) {
storageHandlerConfig.setProperty(ProtocolUtils.readStringWithTwoByteEncoding(buf),
ProtocolUtils.readStringWithTwoByteEncoding(buf));
}
if (protocolVersion >= 3) {
// get read brokers
brokerCount = buf.readShort();
for (int i = 0; i < brokerCount; i++) {
Broker broker = new Broker();
broker.readFields(buf, protocolVersion);
readBrokers.add(broker);
}
}
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
ProtocolUtils.writeStringWithTwoByteEncoding(buf, topicName);
buf.writeShort((short) writeBrokers.size());
for (Broker broker : writeBrokers) {
broker.write(buf, protocolVersion);
}
ProtocolUtils.writeStringWithTwoByteEncoding(buf, storageHandlerName);
buf.writeShort((short) storageHandlerConfig.size());
for (Entry<Object, Object> entry : storageHandlerConfig.entrySet()) {
ProtocolUtils.writeStringWithTwoByteEncoding(buf, entry.getKey().toString());
ProtocolUtils.writeStringWithTwoByteEncoding(buf, entry.getValue().toString());
}
if (protocolVersion >= 3) {
// send read brokers
buf.writeShort((short) readBrokers.size());
for (Broker broker : readBrokers) {
broker.write(buf, protocolVersion);
}
}
}
public Set<Broker> getWriteBrokers() {
return writeBrokers;
}
public Set<Broker> getReadBrokers() {
return readBrokers;
}
public String getStorageHandlerName() {
return storageHandlerName;
}
public void setStorageHandlerName(String storageHandlerName) {
this.storageHandlerName = storageHandlerName;
}
public Properties getStorageHandlerConfig() {
return storageHandlerConfig;
}
@Override
public int getSize(short protocolVersion) {
int s = ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(topicName) + Short.BYTES
+ writeBrokers.stream().mapToInt(b -> b.getSize(protocolVersion)).sum()
+ ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(storageHandlerName) + Short.BYTES
+ storageHandlerConfig.entrySet().stream().mapToInt(
e -> ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(e.getKey().toString())
+ ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(e.getValue().toString()))
.sum();
if (protocolVersion >= 3) {
s += Short.BYTES + readBrokers.stream().mapToInt(b -> b.getSize(protocolVersion)).sum();
}
return s;
}
@Override
public String toString() {
return "TopicMetadata [topicName=" + topicName + ", writeBrokers=" + writeBrokers
+ ", readBrokers=" + readBrokers + ", storageType=" + storageHandlerName
+ ", storageProperties=" + storageHandlerConfig + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/BatchData.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/BatchData.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class BatchData implements Packet {
private ByteBuf dataAsBuf;
private int length;
private Object sendFileRef;
public BatchData() {
}
public BatchData(int length, ByteBuf dataBuf) {
this.length = length;
this.dataAsBuf = dataBuf;
}
public BatchData(int length, Object sendFileRef) {
this.length = length;
this.sendFileRef = sendFileRef;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
length = buf.readInt();
if (length > 0) {
buf.retain();
dataAsBuf = buf.readSlice(length);
}
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
buf.writeInt(length);
if (dataAsBuf != null) {
// this is an optimization hack to enable sendFile calls
// if dataAsBuf is not null then send it using that else just write the length
// and use other workarounds for writing the payload
dataAsBuf.resetReaderIndex();
buf.writeBytes(dataAsBuf);
}
}
@Override
public int getSize(short protocolVersion) {
return Integer.BYTES + (length > 0 ? length : 0);
}
public int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public ByteBuf getDataAsBuf() {
return dataAsBuf;
}
public void setDataAsBuf(ByteBuf dataAsBuf) {
this.dataAsBuf = dataAsBuf;
}
public Object getSendFileRef() {
return sendFileRef;
}
public void setSendFileRef(Object sendFileRef) {
this.sendFileRef = sendFileRef;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicConfig.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicConfig.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.util.Objects;
import java.util.Properties;
import com.google.gson.annotations.SerializedName;
public class TopicConfig implements Comparable<TopicConfig> {
private long topicOrder;
/**
* Slot size
*/
private int bufferSize;
private int ringBufferSize;
private int batchMilliSeconds = 60_000;
@Deprecated
private int batchSizeMB = -1;
private long batchSizeBytes = 100 * 1024 * 1024;
private int outputParallelism = 2;
private int maxDispatchCount = 100;
private String topic;
private int tickFrequencyMillis = 1000;
private boolean enableBucketing2Processor = true;
private boolean enableServerHeaderValidation = false;
private int clusteringMultiplier = 3;
@SerializedName(value = "storageHandlerName", alternate = "outputHandler")
private String storageHandlerName;
@SerializedName(value = "storageHandlerConfig", alternate = "outputHandlerConfig")
private Properties storageHandlerConfig = new Properties();
private volatile double inputTrafficMB = 0.0;
public TopicConfig() {
}
public TopicConfig(TopicConfig config) {
this.topicOrder = config.topicOrder;
this.topic = config.topic;
this.storageHandlerName = config.storageHandlerName;
this.bufferSize = config.bufferSize;
this.outputParallelism = config.outputParallelism;
this.maxDispatchCount = config.maxDispatchCount;
this.tickFrequencyMillis = config.tickFrequencyMillis;
this.batchMilliSeconds = config.batchMilliSeconds;
this.storageHandlerConfig = config.storageHandlerConfig;
this.ringBufferSize = config.ringBufferSize;
this.batchSizeMB = config.batchSizeMB;
this.batchSizeBytes = config.batchSizeBytes;
this.enableBucketing2Processor = config.enableBucketing2Processor;
this.enableServerHeaderValidation = config.enableServerHeaderValidation;
this.clusteringMultiplier = config.clusteringMultiplier;
}
public TopicConfig(int topicOrder,
int bufferSize,
int ringBufferSize,
String topic,
int batchSizeMB,
double inputTrafficMB,
int clusteringMultiplier) {
super();
this.topicOrder = topicOrder;
this.bufferSize = bufferSize;
this.ringBufferSize = ringBufferSize;
this.topic = topic;
this.inputTrafficMB = inputTrafficMB;
this.batchSizeMB = batchSizeMB;
this.clusteringMultiplier = clusteringMultiplier;
}
public TopicConfig(String topic, String storageHandler) {
this.topic = topic;
this.storageHandlerName = storageHandler;
}
@Override
public int compareTo(TopicConfig o) {
return Long.compare(topicOrder, o.getTopicOrder());
}
public long getTopicOrder() {
return topicOrder;
}
public void setTopicOrder(long topicOrder) {
this.topicOrder = topicOrder;
}
/**
* @return the bufferSize
*/
public int getBufferSize() {
return bufferSize;
}
/**
* @param bufferSize the bufferSize to set
*/
public void setBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
}
/**
* @return the ringBufferSize
*/
public int getRingBufferSize() {
return ringBufferSize;
}
/**
* @param ringBufferSize the ringBufferSize to set
*/
public void setRingBufferSize(int ringBufferSize) {
this.ringBufferSize = ringBufferSize;
}
/**
* @return the batchMilliSeconds
*/
public int getBatchMilliSeconds() {
return batchMilliSeconds;
}
/**
* @param batchMilliSeconds the batchMilliSeconds to set
*/
public void setBatchMilliSeconds(int batchMilliSeconds) {
this.batchMilliSeconds = batchMilliSeconds;
}
/**
* @param batchSizeMB the batchSizeMB to set
*/
public void setBatchSizeMB(int batchSizeMB) {
this.batchSizeBytes = batchSizeMB * 1024 * 1024;
}
public void setBatchSizeKB(int batchSizeKB) {
this.batchSizeBytes = batchSizeKB * 1024;
}
public void setBatchSizeBytes(long batchSizeBytes) {
this.batchSizeBytes = batchSizeBytes;
}
public long getBatchSizeBytes() {
if (batchSizeMB != -1) return batchSizeMB * 1024 * 1024;
return batchSizeBytes;
}
public int getMaxDispatchCount() {
return maxDispatchCount;
}
public void setMaxDispatchCount(int maxDispatchCount) {
this.maxDispatchCount = maxDispatchCount;
}
/**
* @return the topic
*/
public String getTopic() {
return topic;
}
/**
* @param topic the topic to set
*/
public void setTopic(String topic) {
this.topic = topic;
}
public Properties getStorageHandlerConfig() {
return storageHandlerConfig;
}
public void setStorageHandlerConfig(Properties storageHandlerConfig) {
this.storageHandlerConfig = storageHandlerConfig;
}
public String getStorageHandlerName() {
return storageHandlerName;
}
public void setStorageHandlerName(String storageHandlerName) {
this.storageHandlerName = storageHandlerName;
}
/**
* @return the outputParallelism
*/
public int getOutputParallelism() {
return outputParallelism;
}
/**
* @param outputParallelism the outputParallelism to set
*/
public void setOutputParallelism(int outputParallelism) {
this.outputParallelism = outputParallelism;
}
/**
* @return the inputTrafficMB
*/
public double getInputTrafficMB() {
return inputTrafficMB;
}
/**
* @param inputTrafficMB the inputTrafficMB to set
*/
public void setInputTrafficMB(double inputTrafficMB) {
this.inputTrafficMB = inputTrafficMB;
}
public int getTickFrequencyMillis() {
return tickFrequencyMillis;
}
public void setTickFrequencyMillis(int tickFrequencyMillis) {
this.tickFrequencyMillis = tickFrequencyMillis;
}
public boolean isEnableBucketing2Processor() {
return enableBucketing2Processor;
}
public void setEnableBucketing2Processor(boolean enableBucketing2Processor) {
this.enableBucketing2Processor = enableBucketing2Processor;
}
public boolean isEnableServerHeaderValidation() {
return enableServerHeaderValidation;
}
public void setEnableServerHeaderValidation(boolean enableServerHeaderValidation) {
this.enableServerHeaderValidation = enableServerHeaderValidation;
}
public int getClusteringMultiplier() {
return clusteringMultiplier;
}
public void setClusteringMultiplier(int clusteringMultiplier) {
this.clusteringMultiplier = clusteringMultiplier;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof TopicConfig) {
return ((TopicConfig) obj).getTopic().equals(topic);
}
return false;
}
public boolean isDifferentFrom(Object o) {
if (this == o) {
return false;
}
if (o == null || getClass() != o.getClass()) {
return true;
}
TopicConfig that = (TopicConfig) o;
if (bufferSize != that.bufferSize) {
return true;
}
if (ringBufferSize != that.ringBufferSize) {
return true;
}
if (batchMilliSeconds != that.batchMilliSeconds) {
return true;
}
if (batchSizeMB != that.batchSizeMB) {
return true;
}
if (batchSizeBytes != that.batchSizeBytes) {
return true;
}
if (outputParallelism != that.outputParallelism) {
return true;
}
if (maxDispatchCount != that.maxDispatchCount) {
return true;
}
if (tickFrequencyMillis != that.tickFrequencyMillis) {
return true;
}
if (enableBucketing2Processor != that.enableBucketing2Processor) {
return true;
}
if (enableServerHeaderValidation != that.enableServerHeaderValidation) {
return true;
}
if (Double.compare(that.inputTrafficMB, inputTrafficMB) != 0) {
return true;
}
if (!Objects.equals(storageHandlerConfig, that.storageHandlerConfig)) {
return true;
}
return !Objects.equals(storageHandlerName, that.storageHandlerName);
}
@Override
public int hashCode() {
return topic.hashCode();
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "TopicConfig [bufferSize=" + bufferSize + ", ringBufferSize=" + ringBufferSize
+ ", batchMilliSeconds=" + batchMilliSeconds + ", batchByteSize=" + batchSizeBytes
+ ", outputParallelism=" + outputParallelism + ", topic=" + topic + ", storageHandlerConfig="
+ storageHandlerConfig + ", storageHandler=" + storageHandlerName + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ResponseCodes.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ResponseCodes.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
public class ResponseCodes {
public static final short OK = 200;
public static final short UNAUTHORIZED = 401;
public static final short SERVICE_UNAVAILABLE = 503;
public static final short NOT_FOUND = 404;
public static final short INTERNAL_SERVER_ERROR = 500;
public static final short BAD_REQUEST = 400;
public static final short REQUEST_FAILED = 502;
public static final short REDIRECT = 302;
public static final short NO_DATA = 204;
private ResponseCodes() {
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ReadResponsePacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ReadResponsePacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class ReadResponsePacket implements Packet {
private BatchData batchData;
public ReadResponsePacket() {
}
public ReadResponsePacket(BatchData batchData) {
this.batchData = batchData;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
batchData = new BatchData();
batchData.readFields(buf, protocolVersion);
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
batchData.write(buf, protocolVersion);
}
@Override
public int getSize(short protocolVersion) {
return batchData.getSize(protocolVersion);
}
public BatchData getBatchData() {
return batchData;
}
@Override
public String toString() {
return "ReadResponsePacket [batchData=" + batchData + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/MemqConstants.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/MemqConstants.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
public class MemqConstants {
public static final int DEFAULT_CRC32_CHECKSUM = 0;
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/Packet.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/Packet.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public interface Packet {
void readFields(ByteBuf buf, short protocolVersion) throws IOException;
void write(ByteBuf buf, short protocolVersion);
int getSize(short protocolVersion);
default void writeHeader(ByteBuf headerBuf, short protocolVersion) {};
default void release() throws IOException {
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/Broker.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/Broker.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import io.netty.buffer.ByteBuf;
public class Broker implements Packet, Comparable<Broker> {
public static enum BrokerType {
WRITE,
READ,
READ_WRITE
}
private String brokerIP;
private short brokerPort;
private String instanceType;
private String locality;
private Set<TopicAssignment> assignedTopics = new HashSet<>();
private int totalNetworkCapacity;
private BrokerType brokerType = BrokerType.WRITE;
public Broker() {
}
public Broker(Broker broker) {
if (broker == null) {
throw new IllegalArgumentException("broker cannot be null");
}
if (broker.brokerIP == null) {
throw new IllegalArgumentException("brokerIP cannot be null");
}
this.brokerIP = broker.brokerIP;
this.brokerPort = broker.brokerPort;
this.instanceType = broker.instanceType;
this.locality = broker.locality;
this.assignedTopics = broker.assignedTopics;
this.totalNetworkCapacity = broker.totalNetworkCapacity;
this.brokerType = broker.brokerType;
}
public Broker(String brokerIP,
short brokerPort,
String instanceType,
String locality,
BrokerType brokerType,
Set<TopicAssignment> assignedTopics) {
if (brokerIP == null) {
throw new IllegalArgumentException("brokerIP cannot be null");
}
this.brokerIP = brokerIP;
this.brokerPort = brokerPort;
this.instanceType = instanceType;
this.locality = locality;
this.assignedTopics = assignedTopics;
this.brokerType = brokerType;
}
private int getUsedNetworkCapacity() {
return assignedTopics.stream().mapToInt(t -> (int) t.getInputTrafficMB()).sum();
}
public int getAvailableCapacity() {
return totalNetworkCapacity - getUsedNetworkCapacity();
}
public int getTotalNetworkCapacity() {
return totalNetworkCapacity;
}
public void setTotalNetworkCapacity(int totalNetworkCapacity) {
this.totalNetworkCapacity = totalNetworkCapacity;
}
public String getBrokerIP() {
return brokerIP;
}
public void setBrokerIP(String brokerIP) {
this.brokerIP = brokerIP;
}
public short getBrokerPort() {
return brokerPort;
}
public void setBrokerPort(short brokerPort) {
this.brokerPort = brokerPort;
}
public String getInstanceType() {
return instanceType;
}
public void setInstanceType(String instanceType) {
this.instanceType = instanceType;
}
public String getLocality() {
return locality;
}
public void setLocality(String locality) {
this.locality = locality;
}
public Set<TopicAssignment> getAssignedTopics() {
return assignedTopics;
}
public void setAssignedTopics(Set<TopicAssignment> assignedTopics) {
this.assignedTopics = assignedTopics;
}
@Override
public int hashCode() {
return brokerIP.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof Broker) {
return ((Broker) obj).getBrokerIP().equals(brokerIP) && ((Broker) obj).getBrokerPort() == brokerPort;
}
return false;
}
@Override
public int compareTo(Broker o) {
return brokerIP.compareTo(o.getBrokerIP());
}
public BrokerType getBrokerType() {
return brokerType;
}
public void setBrokerType(BrokerType brokerType) {
this.brokerType = brokerType;
}
@Override
public String toString() {
return "Broker [brokerIP=" + brokerIP + ", brokerPort=" + brokerPort + ", instanceType="
+ instanceType + ", locality=" + locality + ", totalNetworkCapacity=" + totalNetworkCapacity
+ ", availableNetworkCapacity=" + getAvailableCapacity() + ", brokerType=" + brokerType + "]";
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
locality = ProtocolUtils.readStringWithTwoByteEncoding(buf);
brokerIP = ProtocolUtils.readStringWithTwoByteEncoding(buf);
brokerPort = buf.readShort();
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
ProtocolUtils.writeStringWithTwoByteEncoding(buf, locality);
ProtocolUtils.writeStringWithTwoByteEncoding(buf, brokerIP);
buf.writeShort(brokerPort);
}
@Override
public int getSize(short protocolVersion) {
return ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(locality)
+ ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(brokerIP) + Short.BYTES;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/RequestType.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/RequestType.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.util.function.Supplier;
import io.netty.buffer.ByteBuf;
/**
* MemQ request type information. <br>
* <br>
*
* NOTE: ENUM ORIDINAL VALUEs used for protocol encoding in protocol. DO NOT
* CHANGE THE ORDERING OF THE ENUM ENTRIES AS THAT WILL CAUSE PROTOCOL
* CORRUPTION.
*/
public enum RequestType {
// WARNING: reordering of request type will break protocol, ordinal values
// are used for request type
WRITE(() -> new WriteRequestPacket(), () -> new WriteResponsePacket()),
TOPIC_METADATA(() -> new TopicMetadataRequestPacket(),
() -> new TopicMetadataResponsePacket()),
READ(() -> new ReadRequestPacket(), () -> new ReadResponsePacket());
public Supplier<Packet> requestImplementationSupplier;
public Supplier<Packet> responseImplementationSupplier;
private RequestType(Supplier<Packet> requestImplementationSupplier,
Supplier<Packet> responseImplementationSupplier) {
this.requestImplementationSupplier = requestImplementationSupplier;
this.responseImplementationSupplier = responseImplementationSupplier;
}
public static final short PROTOCOL_VERSION = 3;
public static RequestType extractPacketType(ByteBuf inBuffer) throws IOException {
int requestTypeCode = (int) inBuffer.readByte();
RequestType[] values = RequestType.values();
if (requestTypeCode > values.length - 1) {
throw new IOException("Invalid request type:" + requestTypeCode);
}
RequestType requestType = values[requestTypeCode];// request type
return requestType;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ResponsePacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/ResponsePacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class ResponsePacket extends TransportPacket {
protected short responseCode;
protected String errorMessage;
protected Packet packet;
public ResponsePacket() {
}
public ResponsePacket(short protocolVersion,
long clientRequestId,
RequestType requestType,
short responseCode,
String errorMessage) {
super(protocolVersion, clientRequestId, requestType);
this.responseCode = responseCode;
this.errorMessage = errorMessage;
}
public ResponsePacket(short protocolVersion,
long clientRequestId,
RequestType requestType,
short responseCode,
Packet payload) {
super(protocolVersion, clientRequestId, requestType);
this.responseCode = responseCode;
this.packet = payload;
}
public ResponsePacket(short protocolVersion,
long clientRequestId,
RequestType requestType,
short responseCode,
String errorMessage,
Packet payload) {
super(protocolVersion, clientRequestId, requestType);
this.responseCode = responseCode;
this.errorMessage = errorMessage;
this.packet = payload;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
buf.readInt();
this.protocolVersion = buf.readShort();
clientRequestId = buf.readLong();
requestType = RequestType.extractPacketType(buf);
responseCode = buf.readShort();
errorMessage = ProtocolUtils.readStringWithTwoByteEncoding(buf);
if (responseCode == ResponseCodes.OK) {
packet = requestType.responseImplementationSupplier.get();
packet.readFields(buf, protocolVersion);
}
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
buf.writeInt(getSize(protocolVersion) - Integer.BYTES);
buf.writeShort(protocolVersion);
buf.writeLong(clientRequestId);
buf.writeByte(requestType.ordinal());
buf.writeShort(responseCode);
ProtocolUtils.writeStringWithTwoByteEncoding(buf, errorMessage);
if (packet != null) {
packet.write(buf, protocolVersion);
}
}
@Override
public int getSize(short protocolVersion) {
return Integer.BYTES + Short.BYTES + Long.BYTES + Byte.BYTES + Short.BYTES
+ ProtocolUtils.getStringSerializedSizeWithTwoByteEncoding(errorMessage)
+ (packet != null ? packet.getSize(protocolVersion) : 0);
}
@Override
public void release() throws IOException {
if (packet != null) {
packet.release();
}
super.release();
}
public Packet getPacket() {
return packet;
}
public short getResponseCode() {
return responseCode;
}
public void setResponseCode(short responseCode) {
this.responseCode = responseCode;
}
public String getErrorMessage() {
return errorMessage;
}
public void setErrorMessage(String errorMessage) {
this.errorMessage = errorMessage;
}
@Override
public String toString() {
return "ResponsePacket{" +
"responseCode=" + responseCode +
", errorMessage='" + errorMessage + '\'' +
", packet=" + packet +
", protocolVersion=" + protocolVersion +
", clientRequestId=" + clientRequestId +
", requestType=" + requestType +
'}';
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicAssignment.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicAssignment.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
public class TopicAssignment extends TopicConfig {
private long assignmentTimestamp = -1;
private double assignmentInputTrafficMB;
public TopicAssignment() {
super();
}
public TopicAssignment(TopicConfig topicConfig, double assignmentInputTrafficMB) {
super(topicConfig);
this.assignmentTimestamp = System.currentTimeMillis();
this.assignmentInputTrafficMB = assignmentInputTrafficMB;
}
public TopicAssignment(TopicConfig topicConfig, double assignmentInputTrafficMB, long assignmentTimestamp) {
this(topicConfig, assignmentInputTrafficMB);
this.assignmentTimestamp = assignmentTimestamp;
}
public long getAssignmentTimestamp() {
return assignmentTimestamp;
}
public void setAssignmentTimestamp(long assignmentTimestamp) {
this.assignmentTimestamp = assignmentTimestamp;
}
@Override
public double getInputTrafficMB() {
return assignmentInputTrafficMB;
}
@Override
public void setInputTrafficMB(double inputTrafficMB) {
assignmentInputTrafficMB = inputTrafficMB;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadataResponsePacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/TopicMetadataResponsePacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import io.netty.buffer.ByteBuf;
public class TopicMetadataResponsePacket implements Packet {
private TopicMetadata metadata;
public TopicMetadataResponsePacket() {
}
public TopicMetadataResponsePacket(TopicMetadata metadata) {
this.metadata = metadata;
}
@Override
public void readFields(ByteBuf buf, short protocolVersion) throws IOException {
metadata = new TopicMetadata();
metadata.readFields(buf, protocolVersion);
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
metadata.write(buf, protocolVersion);
}
@Override
public int getSize(short protocolVersion) {
return metadata.getSize(protocolVersion);
}
public TopicMetadata getMetadata() {
return metadata;
}
@Override
public String toString() {
return "TopicMetadataResponsePacket [metadata=" + metadata + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/commons/protocol/WriteRequestPacket.java | memq-commons/src/main/java/com/pinterest/memq/commons/protocol/WriteRequestPacket.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.protocol;
import java.io.IOException;
import java.nio.charset.Charset;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
public class WriteRequestPacket implements Packet {
protected boolean disableAcks;
protected byte[] topicName;
protected boolean checksumExists = false;
protected int checksum;
protected int dataLength;
protected ByteBuf data;
public WriteRequestPacket() {
}
public WriteRequestPacket(boolean disableAcks,
byte[] topicName,
boolean checksumExists,
int checksum,
ByteBuf payload) {
this.disableAcks = disableAcks;
this.topicName = topicName;
this.checksumExists = checksumExists;
this.checksum = checksum;
setData(payload);
}
@Override
public int getSize(short protocolVersion) {
return Byte.BYTES + Short.BYTES + topicName.length + Integer.BYTES + dataLength
+ (protocolVersion >= 1 ? Integer.BYTES : 0);
}
public static int getHeaderSize(short protocolVersion, String topicName) {
return Byte.BYTES + Short.BYTES + topicName.length() + Integer.BYTES
+ (protocolVersion >= 1 ? Integer.BYTES : 0);
}
@Override
public void readFields(ByteBuf inBuffer, short protocolVersion) {
disableAcks = inBuffer.readBoolean();
short topicNameLength = inBuffer.readShort();
topicName = new byte[topicNameLength];
inBuffer.readBytes(topicName);
if (protocolVersion >= 2) {
checksum = inBuffer.readInt();
checksumExists = true;
}
dataLength = inBuffer.readInt();
data = inBuffer;
if (data.readableBytes() != dataLength) {
// request error
System.out.println("Invalid length:" + data.readableBytes() + "vs" + dataLength);
}
}
@Override
public void write(ByteBuf buf, short protocolVersion) {
writeHeader(buf, protocolVersion);
buf.writeBytes(data); // payload
}
@Override
public void writeHeader(ByteBuf headerBuf, short protocolVersion) {
headerBuf.writeBoolean(disableAcks); // if ack all is enabled or not
headerBuf.writeShort((short) topicName.length); // topic name length
headerBuf.writeBytes(topicName); // topic name
headerBuf.writeInt(checksum);
headerBuf.writeInt(dataLength); // payload length
}
public boolean isDisableAcks() {
return disableAcks;
}
public void setDisableAcks(boolean disableAcks) {
this.disableAcks = disableAcks;
}
public String getTopicName() {
return new String(topicName, Charset.forName("utf-8"));
}
public void setTopicName(String topicName) {
this.topicName = topicName.getBytes(Charset.forName("utf-8"));
}
public int getDataLength() {
return dataLength;
}
public ByteBuf getData() {
return data;
}
public void setData(ByteBuf data) {
this.dataLength = data.readableBytes();
this.data = data;
}
public void setData(byte[] data) {
this.dataLength = data.length;
this.data = PooledByteBufAllocator.DEFAULT.buffer(data.length);
this.data.writeBytes(data);
}
public int getChecksum() {
return checksum;
}
public void setChecksum(int checksum) {
this.checksum = checksum;
}
public boolean isChecksumExists() {
return checksumExists;
}
public void setChecksumExists(boolean checksumExists) {
this.checksumExists = checksumExists;
}
@Override
public void release() throws IOException {
Packet.super.release();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/commons/Message.java | memq-commons/src/main/java/com/pinterest/memq/core/commons/Message.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.commons;
import com.google.common.annotations.VisibleForTesting;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.Recycler;
public class Message {
private ByteBuf buf;
private volatile long clientRequestId = -1L;
private volatile long serverRequestId = -1L;
private volatile ChannelHandlerContext pipelineReference;
private volatile short clientProtocolVersion;
private final Recycler.Handle<Message> handle;
private static final Recycler<Message> RECYCLER = new Recycler<Message>() {
@Override
protected Message newObject(Handle<Message> handle) {
return new Message(handle);
}
};
private Message(Recycler.Handle<Message> handle) {
this.handle = handle;
}
public static Message newInstance(ByteBuf buf,
long clientRequestId,
long serverRequestId,
ChannelHandlerContext pipelineReference,
short clientProtocolVersion){
Message message = RECYCLER.get();
message.buf = buf;
message.clientRequestId = clientRequestId;
message.serverRequestId = serverRequestId;
message.pipelineReference = pipelineReference;
message.clientProtocolVersion = clientProtocolVersion;
return message;
}
public void recycle() {
buf = null;
clientRequestId = 0;
serverRequestId = 0;
pipelineReference = null;
clientProtocolVersion = 0;
handle.recycle(this);
}
// public Message(ByteBuf buf,
// long clientRequestId,
// long serverRequestId,
// ChannelHandlerContext pipelineReference,
// short clientProtocolVersion) {
// this.buf = buf;
// this.clientRequestId = clientRequestId;
// this.serverRequestId = serverRequestId;
// this.pipelineReference = pipelineReference;
// this.clientProtocolVersion = clientProtocolVersion;
// }
//
@VisibleForTesting
public Message(int capacity, boolean isDirect) {
handle = null;
if (!isDirect) {
buf = PooledByteBufAllocator.DEFAULT.buffer(capacity, capacity);
} else {
buf = PooledByteBufAllocator.DEFAULT.directBuffer(capacity, capacity);
}
}
/**
* @return the buf
*/
public ByteBuf getBuf() {
return buf;
}
public int readIndex() {
return buf.readerIndex();
}
public int writeIndex() {
return buf.writerIndex();
}
/**
* Write the byte array to ByteBuffer and flip it for reading
*
* To be called only once
*
* @param buf
*/
public void put(byte[] buf) {
this.buf.writeBytes(buf);
}
/**
* @return the clientRequestId
*/
public long getClientRequestId() {
return clientRequestId;
}
/**
* @param clientRequestId the clientRequestId to set
*/
public void setClientRequestId(long clientRequestId) {
this.clientRequestId = clientRequestId;
}
/**
* @return the serverRequestId
*/
public long getServerRequestId() {
return serverRequestId;
}
/**
* @param serverRequestId the serverRequestId to set
*/
public void setServerRequestId(long serverRequestId) {
this.serverRequestId = serverRequestId;
}
@Override
public String toString() {
return serverRequestId + "_" + clientRequestId;
}
public void reset() {
buf.clear();
pipelineReference = null;
clientProtocolVersion = 0;
}
public ChannelHandlerContext getPipelineReference() {
return pipelineReference;
}
public void setPipelineReference(ChannelHandlerContext pipelineReference) {
this.pipelineReference = pipelineReference;
}
public short getClientProtocolVersion() {
return clientProtocolVersion;
}
public void setClientProtocolVersion(short clientProtocolVersion) {
this.clientProtocolVersion = clientProtocolVersion;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/commons/MemqProcessingThreadFactory.java | memq-commons/src/main/java/com/pinterest/memq/core/commons/MemqProcessingThreadFactory.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.commons;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
public final class MemqProcessingThreadFactory implements ThreadFactory {
private final String threadBaseName;
private AtomicInteger counter = new AtomicInteger();
public MemqProcessingThreadFactory(String threadBaseName) {
this.threadBaseName = threadBaseName;
}
@Override
public Thread newThread(Runnable r) {
Thread th = new Thread(r);
th.setName(threadBaseName + counter.incrementAndGet());
th.setDaemon(true);
return th;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/commons/MessageBufferInputStream.java | memq-commons/src/main/java/com/pinterest/memq/core/commons/MessageBufferInputStream.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.commons;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import com.codahale.metrics.Counter;
import io.netty.buffer.ByteBuf;
/**
* Converts a list of Messages into an Inputstream so bytes of the messages can
* we read in sequence i.e. concatenated together.
*/
public class MessageBufferInputStream extends InputStream {
private List<Message> list;
private int bufferIdx;
private int markBufferIdx = -1;
private int markBufferPosition = -1;
private int readLimit = -1;
private int bytesRead;
private ByteBuf buf;
private int readLimitCheck = 0;
public MessageBufferInputStream(List<Message> list, Counter streamResetCounter) {
this.list = list;
this.buf = list.get(bufferIdx).getBuf();
this.buf.resetReaderIndex();
}
@Override
public int read() throws IOException {
if (bufferIdx > list.size() - 1) {
return -1;
}
if (buf == null) {
buf = list.get(bufferIdx).getBuf();
buf.resetReaderIndex();
}
if (buf.readableBytes() > 0) {
bytesRead++;
// map from negative to positive (0-255) range before returning
return buf.readByte() & 0xff;
} else {
buf = null;
bufferIdx++;
return read();
}
}
@Override
public synchronized void mark(int readlimit) {
throw new UnsupportedOperationException();
}
@Override
public synchronized void reset() throws IOException {
throw new UnsupportedOperationException();
}
public void resetToBeginnging() {
this.bufferIdx = 0;
this.bytesRead = 0;
this.readLimitCheck = 0;
this.readLimit = -1;
this.markBufferIdx = -1;
this.markBufferPosition = -1;
this.buf = list.get(bufferIdx).getBuf();
this.buf.resetReaderIndex();
}
@Override
public boolean markSupported() {
return true;
}
public int getBytesRead() {
return bytesRead;
}
@Override
public void close() throws IOException {
}
public void printDiagnosticsInfo() {
System.err.println(toString());
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "MessageBufferInputStream [list=" + list + ", bufferIdx=" + bufferIdx
+ ", markBufferIdx=" + markBufferIdx + ", markBufferPosition=" + markBufferPosition
+ ", readLimit=" + readLimit + ", bytesRead=" + bytesRead + ", buf=" + buf
+ ", readLimitCheck=" + readLimitCheck + "]";
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/utils/MemqUtils.java | memq-commons/src/main/java/com/pinterest/memq/core/utils/MemqUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.utils;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.Inet4Address;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException;
import java.util.Base64;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManagerFactory;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import com.pinterest.memq.commons.config.SSLConfig;
public class MemqUtils {
// NOTE this is only designed for IPv4 only, if we ever switch to IPv6 this
// section will need to be updated along with the tests and encoding
public static byte[] HOST_IPV4_ADDRESS = new byte[4];
public static Charset CHARSET = Charset.forName("utf-8");
static {
try {
HOST_IPV4_ADDRESS = Inet4Address.getLocalHost().getAddress();
} catch (Exception e) {
// TODO: Add a metric to track failures here
e.printStackTrace();
}
}
public static String getStringFromByteAddress(byte[] address) {
if (address != null) {
try {
return Inet4Address.getByAddress(address).getHostAddress();
} catch (UnknownHostException e) {
e.printStackTrace();
return null;
}
} else {
return "null";
}
}
private MemqUtils() {
}
public static byte[] calculateMessageIdHash(byte[] messageIdHash, byte[] byteArray) {
if (messageIdHash == null) {
return byteArray;
} else {
for (int i = 0; i < byteArray.length; i++) {
messageIdHash[i] = (byte) (messageIdHash[i] ^ byteArray[i]);
}
return messageIdHash;
}
}
public static String etagToBase64(String eTag) throws DecoderException {
byte[] decodedHex = Hex.decodeHex(eTag.toCharArray());
String encodedHexB64 = Base64.getEncoder().encodeToString(decodedHex);
return encodedHexB64;
}
public static TrustManagerFactory extractTMPFromSSLConfig(SSLConfig sslConfig) throws KeyStoreException,
Exception,
IOException,
NoSuchAlgorithmException,
CertificateException,
FileNotFoundException {
KeyStore ts = KeyStore.getInstance(sslConfig.getTruststoreType());
File trustStoreFile = new File(sslConfig.getTruststorePath());
if (!trustStoreFile.exists()) {
throw new Exception("Missing truststore");
}
ts.load(new FileInputStream(trustStoreFile), sslConfig.getTruststorePassword().toCharArray());
TrustManagerFactory tmf = TrustManagerFactory
.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ts);
return tmf;
}
public static KeyManagerFactory extractKMFFromSSLConfig(SSLConfig sslConfig) throws KeyStoreException,
Exception,
IOException,
NoSuchAlgorithmException,
CertificateException,
FileNotFoundException,
UnrecoverableKeyException {
KeyStore ks = KeyStore.getInstance(sslConfig.getKeystoreType());
File keystoreFile = new File(sslConfig.getKeystorePath());
if (!keystoreFile.exists()) {
throw new Exception("Missing keystore");
}
ks.load(new FileInputStream(keystoreFile), sslConfig.getKeystorePassword().toCharArray());
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(ks, sslConfig.getKeystorePassword().toCharArray());
return kmf;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/utils/DaemonThreadFactory.java | memq-commons/src/main/java/com/pinterest/memq/core/utils/DaemonThreadFactory.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.utils;
import java.util.concurrent.ThreadFactory;
public class DaemonThreadFactory implements ThreadFactory {
public static DaemonThreadFactory INSTANCE = new DaemonThreadFactory();
@Override
public Thread newThread(Runnable r) {
Thread th = new Thread(r);
th.setDaemon(true);
return th;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-commons/src/main/java/com/pinterest/memq/core/utils/MiscUtils.java | memq-commons/src/main/java/com/pinterest/memq/core/utils/MiscUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.core.utils;
import com.pinterest.memq.commons.CloseableIterator;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.SlidingTimeWindowArrayReservoir;
import com.codahale.metrics.Timer;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
public class MiscUtils {
public static String getHostname() {
String hostName;
try {
hostName = InetAddress.getLocalHost().getHostName();
int firstDotPos = hostName.indexOf('.');
if (firstDotPos > 0) {
hostName = hostName.substring(0, firstDotPos);
}
} catch (Exception e) {
// fall back to env var.
hostName = System.getenv("HOSTNAME");
}
return hostName;
}
public static String getIP() throws UnknownHostException {
return InetAddress.getLocalHost().getHostAddress();
}
public static Timer oneMinuteWindowTimer(MetricRegistry registry, String name) {
return registry.timer(name, () -> new com.codahale.metrics.Timer(
new SlidingTimeWindowArrayReservoir(1, TimeUnit.MINUTES)));
}
/**
* Copied from:
* https://github.com/srotya/sidewinder/blob/development/core/src/main/java/com/srotya/sidewinder/core/utils/MiscUtils.java
*
* @param file
* @return
* @throws IOException
*/
public static boolean delete(File file) throws IOException {
if (file.isDirectory()) {
// directory is empty, then delete it
if (file.list().length == 0) {
return file.delete();
} else {
// list all the directory contents
String files[] = file.list();
boolean result = false;
for (String temp : files) {
// construct the file structure
File fileDelete = new File(file, temp);
// recursive delete
result = delete(fileDelete);
if (!result) {
return false;
}
}
// check the directory again, if empty then delete it
if (file.list().length == 0) {
file.delete();
}
return result;
}
} else {
// if file, then delete it
return file.delete();
}
}
public static void printAllLines(InputStream stream) throws IOException {
BufferedReader br = new BufferedReader(new InputStreamReader(stream));
String line = null;
while ((line = br.readLine()) != null) {
System.out.println(line);
}
}
@SuppressWarnings("unchecked")
public static <T> CloseableIterator<T> emptyCloseableIterator() {
return (CloseableIterator<T>) EmptyCloseableIterator.EMPTY_CLOSEABLE_ITERATOR;
}
private static class EmptyCloseableIterator<T> implements CloseableIterator<T> {
static EmptyCloseableIterator<Object> EMPTY_CLOSEABLE_ITERATOR = new EmptyCloseableIterator<>();
@Override
public void close() throws IOException {}
// Copied from Collections.emptyIterator()
public boolean hasNext() { return false; }
public T next() { throw new NoSuchElementException(); }
public void remove() { throw new IllegalStateException(); }
@Override
public void forEachRemaining(Consumer<? super T> action) {
Objects.requireNonNull(action);
}
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/commons/storage/TestStorageHandler.java | memq-client/src/test/java/com/pinterest/memq/commons/storage/TestStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.List;
import java.util.Map.Entry;
import java.util.UUID;
import org.apache.commons.compress.utils.IOUtils;
import org.junit.Test;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.commons.MessageBufferInputStream;
import io.netty.buffer.ByteBufInputStream;
public class TestStorageHandler {
@Test
public void testBatchHeadersAsInputStream() throws IOException {
String uuid = UUID.randomUUID().toString();
Message m1 = new Message(1024, false);
m1.put(uuid.getBytes("utf-8"));
Message m2 = new Message(512, false);
m2.put("abcdefgh".getBytes("utf-8"));
Message m3 = new Message(128, false);
m3.put("123456789".getBytes("utf-8"));
List<Message> messages = Arrays.asList(m1, m2, m3);
InputStream stream = new ByteBufInputStream(StorageHandler.getBatchHeadersAsByteArray(messages));
@SuppressWarnings("unused")
byte b;
int byteCount = 0;
while ((b = (byte) stream.read()) != -1) {
byteCount++;
}
assertEquals(4 + 4 + 12 * 3, byteCount);
}
@Test
public void testBatchWritesWithStreamConcat() throws Exception {
String uuid = UUID.randomUUID().toString();
Message m1 = new Message(1024, false);
m1.put(uuid.getBytes("utf-8"));
Message m2 = new Message(512, false);
m2.put("abcdefgh".getBytes("utf-8"));
Message m3 = new Message(128, false);
m3.put("123456789".getBytes("utf-8"));
List<Message> messages = Arrays.asList(m1, m2, m3);
InputStream stream = new ByteBufInputStream(StorageHandler.getBatchHeadersAsByteArray(messages));
ByteArrayOutputStream os = new ByteArrayOutputStream();
long length = IOUtils.copy(stream, os);
assertEquals(4 + 4 + 12 * 3, length);
IOUtils.copy(new MessageBufferInputStream(messages, null), os);
os.close();
byte[] output = os.toByteArray();
assertEquals((4 + 4 + 12 * 3) + (uuid.length() + 8 + 9), output.length);
Entry<Integer, Integer> entryForSecondMessage = getIndexEntriesFor(2, output);
ByteBuffer buf = ByteBuffer.wrap(output);
buf.position(entryForSecondMessage.getKey());
byte[] ary = new byte[9];
buf.get(ary);
assertArrayEquals("123456789".getBytes("utf-8"), ary);
}
private Entry<Integer, Integer> getIndexEntriesFor(int n, byte[] output) throws IOException {
// try to reverse engineer the offsets from header
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(output));
dis.readInt();// length
dis.readInt();// count
for (int idx = 0; idx <= n; idx++) {
// first message
dis.readInt();
int offset = dis.readInt();
int size = dis.readInt();
if (idx == n) {
return new AbstractMap.SimpleEntry<>(offset, size);
}
}
return null;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/commons/storage/s3express/TestS3ExpressHelper.java | memq-client/src/test/java/com/pinterest/memq/commons/storage/s3express/TestS3ExpressHelper.java | package com.pinterest.memq.commons.storage.s3express;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestS3ExpressHelper {
@Test
public void testGenerateBucketUrl() throws Exception {
String bucketName = "testbucket--use1-az5--x-s3";
assertEquals(
"https://testbucket--use1-az5--x-s3.s3express-use1-az5.us-east-1.amazonaws.com/",
S3ExpressHelper.generateBucketUrl(bucketName)
);
}
@Test
public void testValidateS3ExpressBucketName() throws Exception {
String bucketName = "testbucket--use1-az5--x-s3";
S3ExpressHelper.validateS3ExpressBucketName(bucketName);
}
@Test (expected = S3ExpressHelper.S3ExpressParsingException.class)
public void testValidateS3ExpressBucketNameInvalid() throws Exception {
String bucketName = "test-bucket";
S3ExpressHelper.validateS3ExpressBucketName(bucketName);
}
@Test (expected = S3ExpressHelper.S3ExpressParsingException.class)
public void getRegionFromBucketInvalid() throws Exception {
String bucketName = "testbucket--unknownRegion-az5--x-s3";
S3ExpressHelper.getRegionFromBucket(bucketName);
}
@Test
public void testGetRegionFromBucket() throws Exception {
String bucketName = "testbucket--use1-az5--x-s3";
assertEquals("us-east-1", S3ExpressHelper.getRegionFromBucket(bucketName));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/commons/storage/s3express/keygenerator/TestDateHourKeyGenerator.java | memq-client/src/test/java/com/pinterest/memq/commons/storage/s3express/keygenerator/TestDateHourKeyGenerator.java | package com.pinterest.memq.commons.storage.s3express.keygenerator;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestDateHourKeyGenerator {
@Test
public void testGetCurrentDateHr() {
String currentDateHr = DateHourKeyGenerator.getCurrentDateHr();
assertTrue(currentDateHr.matches("\\d{2}\\d{2}\\d{2}-\\d{2}"));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/commons/storage/s3/TestCustomS3Async2OutputHandler.java | memq-client/src/test/java/com/pinterest/memq/commons/storage/s3/TestCustomS3Async2OutputHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
public class TestCustomS3Async2OutputHandler {
@Test
public void testAnyUploadResultOrTimeout() throws Exception {
CustomS3Async2StorageHandler handler = new CustomS3Async2StorageHandler();
Properties props = new Properties();
props.setProperty("bucket", "test");
props.setProperty(KafkaNotificationSink.NOTIFICATION_SERVERSET, "src/test/resources/test.serverset");
props.setProperty(KafkaNotificationSink.NOTIFICATION_TOPIC, "test");
System.setProperty("aws.region", "us-east-1");
handler.initWriter(props, "test", new MetricRegistry());
List<CompletableFuture<CustomS3Async2StorageHandler.UploadResult>> tasks = new ArrayList<>();
for(int i = 1; i <= 5; i++) {
final int j = i;
tasks.add(CompletableFuture.supplyAsync(() -> {
try {
Thread.sleep(200 * j);
} catch (Exception e) {
}
return new CustomS3Async2StorageHandler.UploadResult("task-" + j, 200, null, 0, j);
}));
}
try {
CustomS3Async2StorageHandler.UploadResult r = handler.anyUploadResultOrTimeout(tasks, Duration.ofMillis(1000)).get();
assertEquals(r.getKey(), "task-1");
} catch (Exception e) {
fail("Should not fail: " + e);
}
tasks.clear();
for(int i = 1; i <= 5; i++) {
final int j = i;
tasks.add(CompletableFuture.supplyAsync(() -> {
try {
Thread.sleep(200 * j + 1000);
} catch (Exception e) {
}
return new CustomS3Async2StorageHandler.UploadResult("task-" + j, 200, null, 0, j);
}));
}
try {
CustomS3Async2StorageHandler.UploadResult r = handler.anyUploadResultOrTimeout(tasks, Duration.ofMillis(1000)).get();
fail("Should timeout");
} catch (ExecutionException ee) {
System.out.println(ee);
assertTrue(ee.getCause() instanceof TimeoutException);
} catch (Exception e) {
fail("Should throw timeout exception");
}
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/commons/storage/fs/TestFileSystemStorageHandler.java | memq-client/src/test/java/com/pinterest/memq/commons/storage/fs/TestFileSystemStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.fs;
import static org.junit.Assert.assertEquals;
import java.io.DataInputStream;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ThreadLocalRandom;
import io.netty.util.ResourceLeakDetector;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.codahale.metrics.MetricRegistry;
import com.google.common.collect.ImmutableList;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import io.netty.buffer.PooledByteBufAllocator;
public class TestFileSystemStorageHandler {
@BeforeClass
public static void setup() {
ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID);
}
private MetricRegistry registry = new MetricRegistry();
@Rule
public TemporaryFolder folder = new TemporaryFolder(new File("target"));
@Test
public void testInit() throws Exception {
FileSystemStorageHandler handler = new FileSystemStorageHandler();
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS,
folder.newFolder().getAbsolutePath());
System.out.println(folder.newFolder().getAbsolutePath());
String topic = "testTopic";
handler.initWriter(outputHandlerConfig, topic, registry);
handler.closeWriter();
}
@Test
public void testSpeculativeWrite() throws Exception {
for (int k = 0; k < 1; k++) {
StorageHandler handler = new FileSystemStorageHandler();
Properties outputHandlerConfig = new Properties();
outputHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS,
folder.newFolder().getAbsolutePath() + "/" + k);
String topic = "testTopic";
handler.initWriter(outputHandlerConfig, topic, registry);
long time = System.currentTimeMillis();
long bytes = 0;
List<Message> messageList = generateSampleMessages(false);
for (int i = 0; i < 1000; i++) {
messageList.get(0).setClientRequestId(ThreadLocalRandom.current().nextLong());
int sizeInBytes = batchSizeInBytes(messageList);
handler.writeOutput(sizeInBytes, 0, messageList);
bytes += sizeInBytes;
}
messageList.forEach(m -> m.getBuf().release());
time = (System.currentTimeMillis() - time);
System.out.println(time + "ms " + (bytes / 1024 / 1024 / 1024) + "GB "
+ (bytes * 1000 / 1024 / 1024 / (time)) + "MB/s");
handler.closeWriter();
}
}
@Test
public void testfetchBatchStreamForNotificationBuf() throws Exception {
StorageHandler handler = new FileSystemStorageHandler();
Properties outputHandlerConfig = new Properties();
String absolutePath = folder.newFolder().getAbsolutePath();
outputHandlerConfig.setProperty(FileSystemStorageHandler.STORAGE_DIRS, absolutePath);
String topic = "testTopic1";
handler.initWriter(outputHandlerConfig, topic, registry);
byte[] msg = TestUtils.createMessage("hello world", (base, k) -> base.getBytes(), 100, true,
Compression.NONE, null, false);
ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer();
buffer.writeBytes(msg);
List<Message> messages = ImmutableList.of(Message.newInstance(buffer, 1, 1, null, (short) 3));
handler.writeOutput(msg.length, 0, messages);
Properties properties = new Properties();
handler.initReader(properties, registry);
JsonObject notification = new JsonObject();
notification.addProperty(FileSystemStorageHandler.TOPIC, topic);
String path = absolutePath + "/" + topic + "/" + MiscUtils.getHostname() + "/1_1_0";
notification.addProperty(FileSystemStorageHandler.PATH, path);
BatchData buf = handler.fetchBatchStreamForNotificationBuf(notification);
DataInputStream stream = new DataInputStream(new ByteBufInputStream(buf.getDataAsBuf(), true));
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, topic);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, new File(path).length());
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
MemqLogMessageIterator<byte[], byte[]> iterator = new MemqLogMessageIterator<>("test", "test",
stream, obj, new ByteArrayDeserializer(), new ByteArrayDeserializer(), registry, false,
null);
int c = 0;
while (iterator.hasNext()) {
iterator.next();
c++;
}
assertEquals(100, c);
iterator.close();
}
public static int batchSizeInBytes(List<Message> batch) {
return batch.stream().mapToInt(b -> b.getBuf().writerIndex()).sum();
}
public static final byte[] buf = new byte[1024 * 100];
private List<Message> generateSampleMessages(boolean random) {
List<Message> messages = new ArrayList<>();
for (int i = 0; i < 1 + (random ? ThreadLocalRandom.current().nextInt(100) : 9); i++) {
Message m = new Message(1024 * 1024, true);
m.setClientRequestId(ThreadLocalRandom.current().nextLong());
m.put(buf);
messages.add(m);
}
return messages;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons/ZstdTest.java | memq-client/src/test/java/com/pinterest/memq/client/commons/ZstdTest.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import static org.junit.Assert.assertTrue;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.output.TeeOutputStream;
import com.github.luben.zstd.ZstdInputStream;
import com.github.luben.zstd.ZstdOutputStream;
// Used to verify zstd version compatibility
public class ZstdTest {
//@Test
public void write() throws Exception {
FileOutputStream originalFile = new FileOutputStream("test-original");
FileOutputStream compressedFile = new FileOutputStream("test-compressed");
OutputStream compressionStream = new ZstdOutputStream(compressedFile);
byte[] bytes = new byte[2 * 1024 * 1024];
new Random().nextBytes(bytes);
TeeOutputStream tos = new TeeOutputStream(originalFile, compressionStream);
tos.write(bytes);
tos.close();
}
//@Test
public void read() throws Exception {
FileInputStream originalFile = new FileInputStream("test-original");
FileInputStream compressedFile = new FileInputStream("test-compressed");
InputStream decompressionStream = new ZstdInputStream(compressedFile);
assertTrue(IOUtils.contentEquals(originalFile, decompressionStream));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons/TestUtils.java | memq-client/src/test/java/com/pinterest/memq/client/commons/TestUtils.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Semaphore;
import java.util.function.BiFunction;
import org.apache.commons.compress.utils.IOUtils;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.client.producer.MemqProducer;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.MessageId;
public class TestUtils {
public static InputStream getBatchHeadersAsInputStream(final List<ByteBuffer> messages) throws IOException {
ByteBuffer header = ByteBuffer.allocate(Integer.BYTES * 2 + // header length
messages.size() * Integer.BYTES * 3 // index bytes 12 bytes per index entry
);
header.putInt(header.capacity() - Integer.BYTES);// header length
writeMessageIndex(messages, header);
return new ByteArrayInputStream(header.array());
}
public static void writeMessageIndex(final List<ByteBuffer> messages, ByteBuffer header) {
// build message index
header.putInt(messages.size());
int offset = header.limit();
for (int i = 0; i < messages.size(); i++) {
ByteBuffer message = messages.get(i);
int size = message.limit();
header.putInt(i);//
header.putInt(offset);
header.putInt(size);
offset += size;
}
}
public static byte[] getMemqBatchData(String baseLogMessage,
BiFunction<String, Integer, byte[]> getLogMessageBytes,
int logMessageCount,
int msgs,
boolean enableMessageId,
Compression compression,
List<byte[]> messageIdHashes,
boolean enableTestHeaders) throws Exception {
List<ByteBuffer> bufList = new ArrayList<>();
for (int l = 0; l < msgs; l++) {
byte[] rawData = createMessage(baseLogMessage, getLogMessageBytes, logMessageCount,
enableMessageId, compression, messageIdHashes, enableTestHeaders);
bufList.add(ByteBuffer.wrap(rawData));
}
ByteArrayOutputStream os = new ByteArrayOutputStream();
InputStream buf = getBatchHeadersAsInputStream(bufList);
IOUtils.copy(buf, os);
for (ByteBuffer byteBuffer : bufList) {
os.write(byteBuffer.array());
}
return os.toByteArray();
}
public static byte[] createMessage(String baseLogMessage,
BiFunction<String, Integer, byte[]> getLogMessageBytes,
int logMessageCount,
boolean enableMessageId,
Compression compression,
List<byte[]> messageIdHashes,
boolean enableTestHeaders) throws IOException {
Semaphore maxRequestLock = new Semaphore(1);
MemqNettyRequest task = new MemqNettyRequest("xyz", 1L, compression,
maxRequestLock, true, 1024 * 1024, 100, null, null, 10_000, false);
for (int k = 0; k < logMessageCount; k++) {
byte[] bytes = getLogMessageBytes.apply(baseLogMessage, k);
MessageId id = null;
if (enableMessageId) {
id = new SimpleMessageId(k);
}
byte[] headerBytes = null;
if (enableTestHeaders) {
Map<String, byte[]> headers = new HashMap<>();
headers.put("test", "value".getBytes());
headerBytes = MemqProducer.serializeHeadersToByteArray(headers);
}
MemqProducer.writeMemqLogMessage(id, headerBytes, null, bytes, task,
System.currentTimeMillis());
}
task.markReady();
task.getOutputStream().close();
if (messageIdHashes != null) {
messageIdHashes.add(task.getMessageIdHash());
}
byte[] rawData = task.getPayloadAsByteArrays();
task.getBuffer().release();
return rawData;
}
public static MemqLogMessageIterator<byte[], byte[]> getTestDataIterator(String baseLogMessage,
BiFunction<String, Integer, byte[]> getLogMessageBytes,
int logMessageCount,
int msgs,
Compression compression,
boolean enableHeaders) throws Exception {
byte[] rawData = getMemqBatchData(baseLogMessage, getLogMessageBytes, logMessageCount, msgs,
false, compression, null, enableHeaders);
ByteArrayInputStream in = new ByteArrayInputStream(rawData);
DataInputStream stream = new DataInputStream(in);
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, rawData.length);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
return new MemqLogMessageIterator<>("test", "test", stream, obj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
}
public static MemqLogMessageIterator<byte[], byte[]> getTestDataIteratorWithAllFields(String baseLogMessage,
BiFunction<String, Integer, byte[]> getLogMessageBytes,
int logMessageCount,
int msgs,
Compression compression,
boolean enableHeaders) throws Exception {
byte[] rawData = getMemqBatchData(baseLogMessage, getLogMessageBytes, logMessageCount, msgs,
true, compression, null, enableHeaders);
ByteArrayInputStream in = new ByteArrayInputStream(rawData);
DataInputStream stream = new DataInputStream(in);
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, rawData.length);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
return new MemqLogMessageIterator<>("test", "test", stream, obj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons/audit/TestKafkaBackedAuditor.java | memq-client/src/test/java/com/pinterest/memq/client/commons/audit/TestKafkaBackedAuditor.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons.audit;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.junit.ClassRule;
import org.junit.Test;
import com.google.common.io.Files;
import com.pinterest.memq.core.utils.MemqUtils;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
public class TestKafkaBackedAuditor {
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@Test
public void testAudit() throws Exception {
String kafkaConnectString = "localhost:9092";
String pathname = "target/testserverset1";
Properties adminProps = new Properties();
adminProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectString);
adminProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test12");
adminProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
AdminClient admin = AdminClient.create(adminProps);
String auditTopic = "testaudittopic";
admin.createTopics(Arrays.asList(new NewTopic(auditTopic, 3, (short) 1)));
admin.close();
Files.write(kafkaConnectString.getBytes(), new File(pathname));
Auditor kafkaAuditor = new KafkaBackedAuditor();
Properties props = new Properties();
props.setProperty("serverset", pathname);
props.setProperty("topic", auditTopic);
kafkaAuditor.init(props);
long epoch = System.currentTimeMillis();
for (int i = 0; i < 100; i++) {
kafkaAuditor.auditMessage("test".getBytes(), "test1".getBytes(), MemqUtils.HOST_IPV4_ADDRESS,
epoch, i, new byte[16], 10, false, "producer");
}
kafkaAuditor.close();
KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(adminProps);
consumer.subscribe(Arrays.asList(auditTopic));
Set<Long> idSet = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : consumer.poll(Duration.ofMillis(10000))) {
ByteBuffer buf = ByteBuffer.wrap(consumerRecord.value());
assertEquals(20, consumerRecord.key().length);
ByteBuffer keyBuf = ByteBuffer.wrap(consumerRecord.key());
assertEquals(4, buf.getShort());
byte[] tmp = new byte[4];
buf.get(tmp);
assertEquals("test", new String(tmp));
assertEquals(5, buf.getShort());
tmp = new byte[5];
buf.get(tmp);
assertEquals("test1", new String(tmp));
assertEquals(4, buf.getShort());
tmp = new byte[4];
buf.get(tmp);
byte[] keyTmp = new byte[4];
keyBuf.get(keyTmp);
assertArrayEquals(tmp, keyTmp);
assertEquals(epoch, buf.getLong());
long count = buf.getLong();
idSet.add(count);
assertEquals(epoch, keyBuf.getLong());
assertEquals(count, keyBuf.getLong());
assertEquals(16, buf.getShort());
tmp = new byte[16];
buf.get(tmp);
}
assertEquals(100L, idSet.size());
assertTrue(idSet.containsAll(LongStream.range(0, 100).boxed().collect(Collectors.toList())));
consumer.close();
}
@Test
public void testSingleton() throws Exception {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
KafkaProducer<byte[], byte[]> p1 = KafkaBackedAuditor.getProducer("test", props);
KafkaProducer<byte[], byte[]> p2 = KafkaBackedAuditor.getProducer("test", props);
assertSame(p1, p2);
Properties props1 = new Properties();
props1.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9093");
KafkaProducer<byte[], byte[]> p3 = KafkaBackedAuditor.getProducer("test1", props1);
assertNotSame(p1, p3);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons2/TestMemqCommonClient.java | memq-client/src/test/java/com/pinterest/memq/client/commons2/TestMemqCommonClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
import static org.junit.Assert.*;
import com.google.common.collect.ImmutableSet;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import io.netty.channel.ChannelHandlerContext;
import org.junit.Before;
import org.junit.Test;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
public class TestMemqCommonClient {
private static final ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1);
private static final String LOCALHOST_STRING = "127.0.0.1";
private int port = -1;
private Endpoint commonEndpoint;
@Before
public void generateRandomPort() {
int newPort = -1;
while (port == newPort) {
newPort = ThreadLocalRandom.current().nextInt(20000, 30000);
}
port = newPort;
commonEndpoint = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), "test");
}
@Test
public void testInitialize() throws Exception {
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
try {
client.initialize(Collections.emptyList());
fail("should not initialize with empty list");
} catch (Exception e) {
assertEquals("No endpoints available", e.getMessage());
}
client.close();
}
@Test
public void testSendRequestPacketAndReturnResponseFuture() throws Exception {
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
// not initialized
try {
client.sendRequestPacketAndReturnResponseFuture(null, "test", 10000);
fail("should fail since not initialized");
} catch (IllegalStateException ise) {
// good
} catch (Exception e) {
fail("failed: " + e);
}
// no connection
client.initialize(Collections.singletonList(commonEndpoint));
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1, RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
try {
client.sendRequestPacketAndReturnResponseFuture(request, "test", 10000);
fail("should fail since non connection");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof ConnectException);
} catch (Exception e) {
fail("failed: " + e);
}
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker>
brokers =
Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a", "n/a", BrokerType.WRITE,
Collections.singleton(topicAssignment)));
ResponsePacket
resp =
new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(),
ResponseCodes.OK, new TopicMetadataResponsePacket(
new TopicMetadata(mdPkt.getTopic(), brokers, ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
try {
client.initialize(Collections.singletonList(commonEndpoint));
Future<ResponsePacket> respFuture = client.sendRequestPacketAndReturnResponseFuture(request, "test", 10000);
ResponsePacket resp = respFuture.get();
assertEquals(ResponseCodes.OK, resp.getResponseCode());
} catch (Exception e) {
fail("failed: " + e);
}
client.close();
mockServer.stop();
}
@Test
public void testSendRequestPacketAndReturnResponseFutureFailAfterTimeout() throws Exception {
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
client.initialize(Collections.singletonList(commonEndpoint));
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker>
brokers =
Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a", "n/a", BrokerType.WRITE,
Collections.singleton(topicAssignment)));
ResponsePacket
resp =
new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(),
ResponseCodes.OK, new TopicMetadataResponsePacket(
new TopicMetadata(mdPkt.getTopic(), brokers, ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
scheduler.schedule(() -> {
try {
mockServer.start();
} catch (Exception e) {
fail(e.getMessage());
}
}, 1200, TimeUnit.MILLISECONDS);
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1, RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
try {
Future<ResponsePacket> respFuture = client.sendRequestPacketAndReturnResponseFuture(request, "test",3000);
ResponsePacket resp = respFuture.get();
fail("should throw timeout exception");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof TimeoutException);
}
client.close();
mockServer.stop();
}
@Test
public void testSendRequestPacketAndReturnResponseFutureFailAfterRetry() throws Exception {
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
client.initialize(Collections.singletonList(commonEndpoint));
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker>
brokers =
Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a", "n/a", BrokerType.WRITE,
Collections.singleton(topicAssignment)));
ResponsePacket
resp =
new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(),
ResponseCodes.OK, new TopicMetadataResponsePacket(
new TopicMetadata(mdPkt.getTopic(), brokers, ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
scheduler.schedule(() -> {
try {
mockServer.start();
} catch (Exception e) {
fail(e.getMessage());
}
}, 5000, TimeUnit.MILLISECONDS);
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1, RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
try {
Future<ResponsePacket> respFuture = client.sendRequestPacketAndReturnResponseFuture(request, "test", 5000);
respFuture.get();
fail("should fail since connection is dropped");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof ConnectException);
} catch (Exception e) {
fail("failed: " + e);
}
client.close();
mockServer.stop();
}
@Test
public void testGetLocalityEndpoints() throws Exception {
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
List<Endpoint> localityEndpoints = client.getLocalityEndpoints(Arrays.asList(
new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, 9092), "test"),
new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, 9093), "test2"),
new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, 9094), "test3")
));
assertEquals(1, localityEndpoints.size());
assertEquals("test", localityEndpoints.get(0).getLocality());
client.close();
}
@Test
public void testGetTopicMetadata() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker>
brokers =
Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a", "n/a", BrokerType.WRITE,
Collections.singleton(topicAssignment)));
ResponsePacket
resp =
new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(),
ResponseCodes.OK, new TopicMetadataResponsePacket(
new TopicMetadata(mdPkt.getTopic(), brokers, ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
client.initialize(Collections.singletonList(commonEndpoint));
TopicMetadata md = client.getTopicMetadata("test", 3000);
assertEquals(1, md.getWriteBrokers().size());
assertEquals("dev", md.getStorageHandlerName());
client.close();
mockServer.stop();
}
@Test
public void testReconnect() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
AtomicInteger count = new AtomicInteger(0);
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = new HashSet<>();
int currentCount = count.getAndIncrement();
for (int i = 0; i <= currentCount; i++) {
brokers.add(new Broker("127.0.0." + (i + 1), (short) port, "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
}
ResponsePacket
resp =
new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(),
ResponseCodes.OK, new TopicMetadataResponsePacket(
new TopicMetadata(mdPkt.getTopic(), brokers, ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
MemqCommonClient client = new MemqCommonClient("test", null, new Properties());
client.initialize(Collections.singletonList(commonEndpoint));
TopicMetadata md = client.getTopicMetadata("test", 3000);
assertEquals(1, md.getWriteBrokers().size());
assertEquals("dev", md.getStorageHandlerName());
client.reconnect("test", false);
List<Endpoint> endpoints = client.getEndpointsToTry();
assertEquals(2, endpoints.size());
assertNotEquals(endpoints.get(0), endpoints.get(1));
client.close();
mockServer.stop();
}
@Test
public void testDeprioritizeAndRemoveDeadEndpointAfterTwoFailures() throws Exception {
Properties networkProps = new Properties();
networkProps.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, "2");
MemqCommonClient client = new MemqCommonClient("test", null, networkProps);
Endpoint dead = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), "test");
Endpoint e2 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 1), "test");
Endpoint e3 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 2), "test");
client.initialize(Arrays.asList(dead, e2, e3));
// First deprioritization: moved to end, still present
client.deprioritizeDeadEndpoint(dead, "topic");
List<Endpoint> orderAfterFirst = client.getEndpointsToTry();
assertTrue(orderAfterFirst.contains(dead));
assertFalse(orderAfterFirst.get(0).equals(dead));
// Second deprioritization: removed from consideration
client.deprioritizeDeadEndpoint(dead, "topic");
List<Endpoint> orderAfterSecond = client.getEndpointsToTry();
assertFalse(orderAfterSecond.contains(dead));
assertEquals(2, orderAfterSecond.size());
client.close();
}
@Test
public void testStickyWriteEndpointsAndRoundRobinRotation() throws Exception {
Properties networkProps = new Properties();
networkProps.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, "2");
MemqCommonClient client = new MemqCommonClient("test", null, networkProps);
Endpoint e1 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), "test");
Endpoint e2 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 1), "test");
Endpoint e3 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 2), "test");
client.initialize(Arrays.asList(e1, e2, e3));
// Start servers on all three endpoints (all alive)
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = new HashSet<>();
brokers.add(new Broker(LOCALHOST_STRING, (short) port, "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
brokers.add(new Broker(LOCALHOST_STRING, (short) (port + 1), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
brokers.add(new Broker(LOCALHOST_STRING, (short) (port + 2), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer s1 = new MockMemqServer(port, map);
MockMemqServer s2 = new MockMemqServer(port + 1, map);
MockMemqServer s3 = new MockMemqServer(port + 2, map);
s1.start();
s2.start();
s3.start();
// Send a few requests to register two working write endpoints
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
for (int i = 0; i < 4; i++) {
client.sendRequestPacketAndReturnResponseFuture(request, "test", 3000).get();
}
List<Endpoint> sticky = new ArrayList<>(client.currentWriteEndpoints());
assertEquals(2, sticky.size());
// Ensure they remain sticky across additional sends
for (int i = 0; i < 10; i++) {
client.sendRequestPacketAndReturnResponseFuture(request, "test", 3000).get();
List<Endpoint> current = client.currentWriteEndpoints();
assertEquals(new HashSet<>(sticky), new HashSet<>(current));
}
// Verify round-robin rotation among sticky endpoints
List<Endpoint> first = client.getEndpointsToTry();
assertTrue(first.get(0).equals(sticky.get(0)) || first.get(0).equals(sticky.get(1)));
List<Endpoint> second = client.getEndpointsToTry();
// The first position should rotate to the other sticky endpoint
assertNotEquals(first.get(0), second.get(0));
assertTrue(new HashSet<>(Arrays.asList(first.get(0), second.get(0))).containsAll(sticky));
client.close();
s1.stop();
s2.stop();
s3.stop();
}
@Test
public void testWriteEndpointsSelectionRandomizedAcrossRuns() throws Exception {
// Probabilistic: across multiple runs, the chosen sticky endpoints should vary at least once
int runs = 8;
Set<Set<Integer>> selections = new HashSet<>();
for (int r = 0; r < runs; r++) {
int base = port + 10 + (r * 10);
Properties networkProps = new Properties();
networkProps.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, "2");
MemqCommonClient client = new MemqCommonClient("test", null, networkProps);
Endpoint e1 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, base), "test");
Endpoint e2 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, base + 1), "test");
Endpoint e3 = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, base + 2), "test");
client.initialize(Arrays.asList(e1, e2, e3));
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = new HashSet<>();
brokers.add(new Broker(LOCALHOST_STRING, (short) base, "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
brokers.add(new Broker(LOCALHOST_STRING, (short) (base + 1), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
brokers.add(new Broker(LOCALHOST_STRING, (short) (base + 2), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer s1 = new MockMemqServer(base, map);
MockMemqServer s2 = new MockMemqServer(base + 1, map);
MockMemqServer s3 = new MockMemqServer(base + 2, map);
s1.start();
s2.start();
s3.start();
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
for (int i = 0; i < 3; i++) {
client.sendRequestPacketAndReturnResponseFuture(request, "test", 3000).get();
}
List<Endpoint> sticky = client.currentWriteEndpoints();
selections.add(new HashSet<>(Arrays.asList(sticky.get(0).getAddress().getPort(), sticky.get(1).getAddress().getPort())));
client.close();
s1.stop();
s2.stop();
s3.stop();
}
// Expect at least two distinct selections across runs to indicate random choice
assertTrue("Expected at least two distinct sticky endpoint selections across runs", selections.size() >= 2);
}
@Test
public void testSendFailureRefreshesWriteEndpoints() throws Exception {
// Use real MemqCommonClient and force first attempt to target a dead endpoint by rotating write endpoints
Properties networkProps = new Properties();
networkProps.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, "2");
MemqCommonClient client = new MemqCommonClient("test", null, networkProps);
// Prepare endpoints: include a dead endpoint and a live endpoint
Endpoint dead = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), "test");
Endpoint alive = new Endpoint(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 1), "test");
client.initialize(Arrays.asList(dead, alive));
// Start server on the alive endpoint
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) (port + 1), "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer server = new MockMemqServer(port + 1, map);
server.start();
// Rotate write endpoints so the first endpoint to try is the dead one
List<Endpoint> endpointsToTry = client.getEndpointsToTry();
int attempts = 0;
while (endpointsToTry.get(0).getAddress().getPort() != port && attempts++ < 5) {
endpointsToTry = client.getEndpointsToTry();
}
assertEquals(port, endpointsToTry.get(0).getAddress().getPort());
// First attempt should hit the dead endpoint (connect failure) → triggers refreshWriteEndpoints; next attempt hits alive and succeeds
RequestPacket request = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
ResponsePacket resp = client.sendRequestPacketAndReturnResponseFuture(request, "test", 5000).get();
assertEquals(ResponseCodes.OK, resp.getResponseCode());
// Verify write endpoints were refreshed to prefer the alive endpoint now
List<Endpoint> writeEndpoints = client.currentWriteEndpoints();
assertEquals(alive.getAddress(), writeEndpoints.get(0).getAddress());
client.close();
server.stop();
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons2/MockMemqServer.java | memq-client/src/test/java/com/pinterest/memq/client/commons2/MockMemqServer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelOutboundHandlerAdapter;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.ChannelPromise;
import io.netty.channel.group.ChannelGroup;
import io.netty.channel.group.DefaultChannelGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.handler.traffic.GlobalTrafficShapingHandler;
import io.netty.util.concurrent.GlobalEventExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteOrder;
import java.util.Map;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.function.BiConsumer;
public class MockMemqServer {
private static final Logger logger = LoggerFactory.getLogger(MockMemqServer.class);
private final ServerBootstrap bootstrap;
private ChannelFuture channelFuture;
private final ByteBufAllocator allocator;
private final ChannelGroup allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
private final int port;
public MockMemqServer(int port, Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> responseMap, boolean useDirect) {
this(port, responseMap, useDirect, false, -1, -1);
}
public MockMemqServer(int port, Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> responseMap, boolean useDirect,
boolean attachTrafficShapingHandler, long readLimit, long checkInterval) {
this.port = port;
allocator = new PooledByteBufAllocator(false,3, 0, 8192, 3);
bootstrap = new ServerBootstrap();
bootstrap.group(new NioEventLoopGroup(1, new ThreadFactoryBuilder().setNameFormat("boss").build()), new NioEventLoopGroup(new ThreadFactoryBuilder().setNameFormat("worker").build()));
bootstrap.channel(NioServerSocketChannel.class);
bootstrap.localAddress(port);
bootstrap.childOption(ChannelOption.ALLOCATOR, allocator);
ScheduledThreadPoolExecutor tmpScheduler = new ScheduledThreadPoolExecutor(1);
tmpScheduler.setRemoveOnCancelPolicy(true);
GlobalTrafficShapingHandler trafficShapingHandler = new GlobalTrafficShapingHandler(tmpScheduler, 0, readLimit, checkInterval);
bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new LengthFieldBasedFrameDecoder(
ByteOrder.BIG_ENDIAN,
4 * 1024 * 1024,
0,
Integer.BYTES,
0,
0,
false));
if (attachTrafficShapingHandler) {
pipeline.addLast(trafficShapingHandler);
}
pipeline.addLast(new MockResponseHandler());
pipeline.addLast(new MockRequestHandler(responseMap));
}
});
}
public MockMemqServer(int port, Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> responseMap) {
this(port, responseMap, true, false, -1, -1);
}
public ChannelFuture start() throws Exception {
channelFuture = bootstrap.bind().sync();
return channelFuture;
}
public void stop() throws Exception {
if (channelFuture == null) {
return;
}
allChannels.close();
channelFuture.channel().close();
if (channelFuture.channel().parent() != null) {
channelFuture.channel().parent().close();
}
}
public boolean isRunning() {
return channelFuture != null && channelFuture.channel().isActive();
}
private class MockRequestHandler extends ChannelInboundHandlerAdapter {
private final Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> responseMap;
public MockRequestHandler(Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> responseMap) {
this.responseMap = responseMap;
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
allChannels.add(ctx.channel());
super.channelActive(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf inBuffer = (ByteBuf) msg;
RequestPacket requestPacket = new RequestPacket();
requestPacket.readFields(inBuffer, (short) 0);
inBuffer.release();
BiConsumer<ChannelHandlerContext, RequestPacket> consumer = responseMap.get(requestPacket.getRequestType());
if (consumer != null) {
consumer.accept(ctx, requestPacket);
} else {
ResponsePacket resp = new ResponsePacket(requestPacket.getProtocolVersion(),
requestPacket.getClientRequestId(), requestPacket.getRequestType(), ResponseCodes.BAD_REQUEST, "No handler for request type");
ctx.writeAndFlush(resp);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.warn("Exception caught on inbound connection: {}", cause.getMessage());
}
}
private class MockResponseHandler extends ChannelOutboundHandlerAdapter {
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
if (msg instanceof ResponsePacket) {
ResponsePacket response = (ResponsePacket) msg;
int responseSize;
try {
responseSize = response.getSize(response.getProtocolVersion());
} catch (Exception e) {
logger.error("Failed to encode response packet: ", e);
response = new ResponsePacket(
response.getProtocolVersion(),
response.getClientRequestId(),
response.getRequestType(),
ResponseCodes.INTERNAL_SERVER_ERROR,
"Failed to encode response packet " + e.getMessage()
);
responseSize = response.getSize(response.getProtocolVersion());
}
ByteBuf buffer = allocator.buffer(responseSize, responseSize);
response.write(buffer, response.getProtocolVersion());
super.write(ctx, buffer, promise);
} else {
super.write(ctx, msg, promise);
}
promise.addListener((f) -> {
if(!f.isSuccess()) {
logger.error("Failed to respond", f.cause());
}
});
}
}
public int getPort() {
return port;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/commons2/network/TestNetworkClient.java | memq-client/src/test/java/com/pinterest/memq/client/commons2/network/TestNetworkClient.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.commons2.network;
import static org.junit.Assert.*;
import com.google.common.collect.ImmutableSet;
import com.pinterest.memq.client.commons2.MockMemqServer;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.ResourceLeakDetector;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
public class TestNetworkClient {
private static final ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1);
private static final String LOCALHOST_STRING = "127.0.0.1";
private int port = -1;
@BeforeClass
public static void setup() {
ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID);
}
@Before
public void generateRandomPort() {
int newPort = -1;
while (port == newPort) {
newPort = ThreadLocalRandom.current().nextInt(10000, 20000);
}
port = newPort;
}
@Test
public void testSendSimple() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
NetworkClient client = new NetworkClient();
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
ResponsePacket resp = pktFuture.get();
assertEquals(ResponseCodes.OK, resp.getResponseCode());
assertEquals(RequestType.TOPIC_METADATA, resp.getRequestType());
mockServer.stop();
}
@Test
public void testSendTimeout() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
// do nothing (simulating hanging server)
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
NetworkClient client = new NetworkClient();
RequestPacket mdPkt1 = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Future<ResponsePacket> pktFuture = client.send(
InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt1,
Duration.ofMillis(1000));
try {
pktFuture.get();
fail("should throw timeout exception");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof TimeoutException);
} catch (Exception e) {
fail("failed: " + e);
}
}
@Test
public void testSendReconnectAfterConnectionClose() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
// only third request will go through, others will be dropped
if (req.getClientRequestId() != 3) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
ctx.close();
return;
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
NetworkClient client = new NetworkClient();
RequestPacket mdPkt1 = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket mdPkt2 = new RequestPacket(RequestType.PROTOCOL_VERSION, 2,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket mdPkt3 = new RequestPacket(RequestType.PROTOCOL_VERSION, 3,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Future<ResponsePacket> pktFuture1 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt1);
Future<ResponsePacket> pktFuture2 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt2);
try {
pktFuture1.get();
fail("should throw connection closed related IOException");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof IOException); // exception will be either connect reset by
// peer or closed connection
} catch (Exception e) {
fail("failed: " + e);
}
try {
pktFuture2.get();
fail("should throw connection closed related IOException");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof IOException);
} catch (Exception e) {
fail("failed: " + e);
}
Thread.sleep(1500);
Future<ResponsePacket> pktFuture3 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt3);
ResponsePacket resp3 = pktFuture3.get();
assertEquals(ResponseCodes.OK, resp3.getResponseCode());
assertEquals(RequestType.TOPIC_METADATA, resp3.getRequestType());
mockServer.stop();
}
@Test
public void testSendDrop() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
AtomicInteger count = new AtomicInteger(0);
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
int currentCount = count.getAndIncrement();
if (currentCount == 0) {
ctx.close();
return;
} else if (currentCount == 1) {
return;
}
try {
Thread.sleep(500);
} catch (Exception e) {
// no-op
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Properties props = new Properties();
props.setProperty(NetworkClient.CONFIG_IRRESPONSIVE_TIMEOUT_MS, "3000");
NetworkClient client = new NetworkClient(props);
// server closed connection
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
try {
Thread.sleep(500);
assertEquals(0, client.getInflightRequestCount());
pktFuture.get(1000, TimeUnit.MILLISECONDS);
fail("should throw closed connection exception");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof ClosedConnectionException);
} catch (Exception e) {
fail("failed: " + e);
}
// Server taking a long time to respond (dropping requests)
mdPkt.setClientRequestId(2);
Future<ResponsePacket> pktFuture1 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
try {
Thread.sleep(500);
assertEquals(1, client.getInflightRequestCount());
pktFuture1.get(4500, TimeUnit.MILLISECONDS);
fail("should throw timeout exception");
} catch (ExecutionException e) {
assertEquals(0, client.getInflightRequestCount());
assertTrue(e.getCause() instanceof TimeoutException);
} catch (Exception e) {
fail("failed: " + e);
}
// happy path
mdPkt.setClientRequestId(3);
Future<ResponsePacket> pktFuture2 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
Thread.sleep(100);
assertEquals(1, client.getInflightRequestCount());
ResponsePacket resp2 = pktFuture2.get();
assertEquals(0, client.getInflightRequestCount());
assertEquals(ResponseCodes.OK, resp2.getResponseCode());
assertEquals(RequestType.TOPIC_METADATA, resp2.getRequestType());
mockServer.stop();
}
@Test
public void testSendMultipleRequests() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
// each request will hang for 500 ms before getting a response back
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket mdPkt2 = new RequestPacket(RequestType.PROTOCOL_VERSION, 2,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket mdPkt3 = new RequestPacket(RequestType.PROTOCOL_VERSION, 3,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
NetworkClient client = new NetworkClient();
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
Future<ResponsePacket> pktFuture2 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt2);
Future<ResponsePacket> pktFuture3 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt3);
Thread.sleep(200);
assertEquals(3, client.getInflightRequestCount());
ResponsePacket resp = pktFuture.get();
ResponsePacket resp2 = pktFuture2.get();
ResponsePacket resp3 = pktFuture3.get();
assertEquals(ResponseCodes.OK, resp.getResponseCode());
assertEquals(mdPkt.getClientRequestId(), resp.getClientRequestId());
assertEquals(RequestType.TOPIC_METADATA, resp.getRequestType());
assertEquals(ResponseCodes.OK, resp2.getResponseCode());
assertEquals(mdPkt2.getClientRequestId(), resp2.getClientRequestId());
assertEquals(RequestType.TOPIC_METADATA, resp2.getRequestType());
assertEquals(ResponseCodes.OK, resp3.getResponseCode());
assertEquals(mdPkt3.getClientRequestId(), resp3.getClientRequestId());
assertEquals(RequestType.TOPIC_METADATA, resp3.getRequestType());
mockServer.stop();
}
@Test
public void testSendDifferentServer() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
MockMemqServer mockServer2 = new MockMemqServer(port + 1, map);
mockServer.start();
mockServer2.start();
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket mdPkt2 = new RequestPacket(RequestType.PROTOCOL_VERSION, 2,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
NetworkClient client = new NetworkClient();
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
Thread.sleep(200);
Future<ResponsePacket> pktFuture2 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port + 1), mdPkt2);
try {
pktFuture.get();
} catch (ExecutionException ee) {
fail("should not throw exception");
} catch (Exception e) {
fail("failed: " + e);
}
ResponsePacket resp2 = pktFuture2.get();
assertEquals(ResponseCodes.OK, resp2.getResponseCode());
assertEquals(mdPkt2.getClientRequestId(), resp2.getClientRequestId());
mockServer.stop();
mockServer2.stop();
}
@Test
public void testAcquireChannelNoConnection() throws Exception {
NetworkClient client = new NetworkClient();
try {
client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
fail("should throw connect exception");
} catch (ExecutionException ee) {
assertNotNull(ee.getCause());
assertTrue(ee.getCause() instanceof ConnectException);
} catch (Exception e) {
fail(e.getMessage());
}
}
@Test
public void testAcquireChannelSimple() throws Exception {
NetworkClient client = new NetworkClient();
ChannelFuture channel;
MockMemqServer server = new MockMemqServer(port, Collections.emptyMap());
server.start();
try {
channel = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
} catch (Exception e) {
fail("failed: " + e);
}
server.stop();
}
@Test
public void testAcquireChannelRetries() throws Exception {
Properties props = new Properties();
props.setProperty(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS, "500");
props.setProperty(NetworkClient.CONFIG_INITIAL_RETRY_INTERVAL_MS, "300");
NetworkClient client = new NetworkClient(props);
ChannelFuture channel;
MockMemqServer server = new MockMemqServer(port, Collections.emptyMap());
scheduler.schedule(() -> {
try {
server.start();
} catch (Exception e) {
fail("failed: " + e);
}
}, 800, TimeUnit.MILLISECONDS);
try {
channel = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
} catch (Exception e) {
fail(e.getMessage());
}
server.stop();
}
@Test
public void testAcquireChannelFailAfterRetries() throws Exception {
Properties props = new Properties();
props.setProperty(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS, "500");
props.setProperty(NetworkClient.CONFIG_INITIAL_RETRY_INTERVAL_MS, "300");
NetworkClient client = new NetworkClient(props);
ChannelFuture channel;
MockMemqServer server = new MockMemqServer(port, Collections.emptyMap());
scheduler.schedule(() -> {
try {
server.start();
} catch (Exception e) {
fail("failed: " + e);
}
}, 3000, TimeUnit.MILLISECONDS);
try {
channel = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
fail("should throw connect exception");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof ConnectException);
} catch (Exception e) {
fail("failed: " + e);
}
server.stop();
}
@Test
public void testStartAndClose() throws Exception {
NetworkClient client = new NetworkClient();
MockMemqServer server = new MockMemqServer(port, Collections.emptyMap());
server.start();
try {
client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
} catch (Exception e) {
fail(e.getMessage());
}
client.close();
Thread.sleep(100); // wait some time for the connection to close
assertNull(client.getConnectFuture());
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
client = new NetworkClient();
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
client.close();
try {
pktFuture.get();
fail("should throw closed connection exception");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof ClosedConnectionException || ee.getCause() instanceof ClientClosedException);
} catch (Exception e) {
fail("failed: " + e);
}
server.stop();
}
@Test
public void testReset() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
AtomicInteger count = new AtomicInteger(0);
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
int currentCount = count.getAndIncrement();
// first request will hang
if (currentCount == 0) {
return;
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Properties props = new Properties();
props.setProperty(NetworkClient.CONFIG_IRRESPONSIVE_TIMEOUT_MS, "5000");
NetworkClient client = new NetworkClient(props);
// server no response
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
try {
Thread.sleep(1000);
client.reset();
pktFuture.get(2000, TimeUnit.MILLISECONDS);
fail("should throw closed connection exception");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof ClosedConnectionException);
} catch (Exception e) {
fail("failed: " + e);
}
// happy path
mdPkt.setClientRequestId(2);
Future<ResponsePacket> pktFuture2 = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
ResponsePacket resp2 = pktFuture2.get();
assertEquals(ResponseCodes.OK, resp2.getResponseCode());
assertEquals(RequestType.TOPIC_METADATA, resp2.getRequestType());
mockServer.stop();
}
@Test
public void testChannelPoolingReusesConnectionForSameEndpoint() throws Exception {
MockMemqServer server = new MockMemqServer(port, Collections.emptyMap());
server.start();
NetworkClient client = new NetworkClient();
ChannelFuture cf1 = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
// Acquire again for the same endpoint; should reuse the same channel
ChannelFuture cf2 = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
assertSame(cf1.channel(), cf2.channel());
assertTrue(cf1.channel().isActive());
client.close();
server.stop();
}
@Test
public void testMultiplexingUsesSingleChannelForConcurrentRequests() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
try {
Thread.sleep(300);
} catch (InterruptedException e) {
// no-op
}
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
MockMemqServer server = new MockMemqServer(port, map);
server.start();
NetworkClient client = new NetworkClient();
RequestPacket r1 = new RequestPacket(RequestType.PROTOCOL_VERSION, 1,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket r2 = new RequestPacket(RequestType.PROTOCOL_VERSION, 2,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
RequestPacket r3 = new RequestPacket(RequestType.PROTOCOL_VERSION, 3,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Future<ResponsePacket> f1 = client.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), r1);
Future<ResponsePacket> f2 = client.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), r2);
Future<ResponsePacket> f3 = client.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), r3);
// Give a moment for all three to be inflight
Thread.sleep(100);
// Only one channel should back the same endpoint; acquiring should return the same active channel
ChannelFuture cf = client.acquireChannel(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port));
assertEquals(1, client.getChannelPool().size());
assertTrue(cf.channel().isActive());
assertEquals(3, client.getInflightRequestCount());
ResponsePacket rp1 = f1.get();
ResponsePacket rp2 = f2.get();
ResponsePacket rp3 = f3.get();
assertEquals(ResponseCodes.OK, rp1.getResponseCode());
assertEquals(ResponseCodes.OK, rp2.getResponseCode());
assertEquals(ResponseCodes.OK, rp3.getResponseCode());
assertEquals(0, client.getInflightRequestCount());
client.close();
server.stop();
}
// @Test
// load test
public void testLoadedRequest() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) port, "n/a",
"n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
TopicMetadataResponsePacket respPkt = new TopicMetadataResponsePacket(
new TopicMetadata("test", brokers, ImmutableSet.of(), "dev", new Properties()));
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, respPkt);
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
ExecutorService es = Executors.newSingleThreadExecutor();
NetworkClient client = new NetworkClient();
long startMs = System.currentTimeMillis();
BlockingQueue<Future<ResponsePacket>> q = new ArrayBlockingQueue<>(100);
AtomicBoolean done = new AtomicBoolean(false);
int NUM_OF_REQUESTS = 10_000_000;
AtomicInteger count = new AtomicInteger(0);
Future<?> task = es.submit(() -> {
while (!done.get() || !q.isEmpty()) {
try {
ResponsePacket resp = q.take().get();
assertEquals(ResponseCodes.OK, resp.getResponseCode());
assertEquals(RequestType.TOPIC_METADATA, resp.getRequestType());
if (count.incrementAndGet() % 100_000 == 0) {
System.out.println("" + count.get() + "/" + NUM_OF_REQUESTS + " , elapsed: "
+ (System.currentTimeMillis() - startMs));
}
} catch (Exception e) {
fail("fail: " + e);
}
}
});
for (int i = 0; i < NUM_OF_REQUESTS; i++) {
RequestPacket mdPkt = new RequestPacket(RequestType.PROTOCOL_VERSION, i,
RequestType.TOPIC_METADATA, new TopicMetadataRequestPacket("test"));
Future<ResponsePacket> pktFuture = client
.send(InetSocketAddress.createUnresolved(LOCALHOST_STRING, port), mdPkt);
q.put(pktFuture);
}
done.set(true);
task.get();
assertEquals(10_000_000, count.get());
mockServer.stop();
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerMultipleEndpoints.java | memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerMultipleEndpoints.java | package com.pinterest.memq.client.producer2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.commons2.Endpoint;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.MockMemqServer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.google.common.collect.ImmutableSet;
import io.netty.channel.ChannelHandlerContext;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
@RunWith(Parameterized.class)
public class TestMemqProducerMultipleEndpoints extends TestMemqProducerBase {
@Parameterized.Parameters(name = "{index}: numWriteEndpoints={0}, numBrokers={1}, numDeadBrokers={2}")
public static Collection<Integer[]> parameters() {
int[] numWriteEndpointsParams = {1, 2, 3};
int[] numBrokersParams = {1, 2, 3};
int[] numDeadBrokersParams = {0, 1, 2};
List<Integer[]> parameters = new ArrayList<>();
for (int numWriteEndpoints : numWriteEndpointsParams) {
for (int numBrokers : numBrokersParams) {
for (int numDeadBrokers : numDeadBrokersParams) {
if (numDeadBrokers >= numBrokers) {
continue;
}
parameters.add(new Integer[] {numWriteEndpoints, numBrokers, numDeadBrokers});
}
}
}
return parameters;
}
private final int numWriteEndpoints;
private final int numBrokers;
private final int numDeadBrokers;
public TestMemqProducerMultipleEndpoints(int numWriteEndpoints, int numBrokers, int numDeadBrokers) {
this.numWriteEndpoints = numWriteEndpoints;
this.numBrokers = numBrokers;
this.numDeadBrokers = numDeadBrokers;
}
@Test
public void testMultipleBrokerWrites() throws Exception {
if (numDeadBrokers > 0) {
System.out.println("Skipping testMultipleBrokerWrites with dead brokers");
return;
}
System.out.println("numWriteEndpoints: " + numWriteEndpoints + ", numBrokers: " + numBrokers + ", numDeadBrokers: " + numDeadBrokers);
AtomicInteger[] writeCounts = new AtomicInteger[numBrokers];
for (int i = 0; i < numBrokers; i++) {
writeCounts[i] = new AtomicInteger();
}
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
// Return all brokers in metadata so client discovers all brokers
Set<Broker> brokers = new HashSet<>();
for (int i = 0; i < numBrokers; i++) {
brokers.add(new Broker(LOCALHOST_STRING, (short) (port + i), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
}
BiConsumer<ChannelHandlerContext, RequestPacket> topicMetadataHandler = (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
};
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>>[] maps = new HashMap[numBrokers];
for (int i = 0; i < numBrokers; i++) {
final int idx = i;
BiConsumer<ChannelHandlerContext, RequestPacket> writeHandler = (ctx, req) -> {
writeCounts[idx].getAndIncrement();
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
};
maps[i] = new HashMap<>();
maps[i].put(RequestType.TOPIC_METADATA, topicMetadataHandler);
maps[i].put(RequestType.WRITE, writeHandler);
}
MockMemqServer[] mockServers = new MockMemqServer[numBrokers];
for (int i = 0; i < numBrokers; i++) {
mockServers[i] = new MockMemqServer(port + i, maps[i]);
mockServers[i].start();
}
Properties networkProperties = new Properties();
networkProperties.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, String.valueOf(numWriteEndpoints));
int payloadSize =
RequestPacket.getHeaderSize() +
RequestPacket.getHeaderSize() +
WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() +
RawRecord.newInstance(null, null, null, "test1".getBytes(), 0).calculateEncodedLogMessageLength();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test")
.bootstrapServers(LOCALHOST_STRING + ":" + port) // Start with just first server for bootstrap
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(payloadSize)
.maxInflightRequests(100)
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
// Perform multiple writes to trigger round-robin behavior
List<Future<MemqWriteResult>> results = new ArrayList<>();
int numWrites = 100;
for (int i = 0; i < numWrites; i++) {
Future<MemqWriteResult> r = producer.write(null, "test1".getBytes());
results.add(r);
}
producer.flush();
int successCount = 0;
for (Future<MemqWriteResult> r : results) {
try {
r.get();
successCount++;
} catch (Exception e) {
System.out.println("TestTwoBrokers exception: " + e);
e.printStackTrace();
fail("Should not throw exception");
}
}
assertEquals("Success count should be numWrites", numWrites, successCount);
producer.close();
// Verify that writes went to both servers
int totalWrites = 0;
for (int i = 0; i < numBrokers; i++) {
totalWrites += writeCounts[i].get();
System.out.println("Server " + i + " writes: " + writeCounts[i].get());
}
System.out.println("Total writes: " + totalWrites);
assertEquals("Total writes should be numWrites", numWrites, totalWrites);
// Verify that the number of distinct brokers that received writes matches numWriteEndpoints (capped at total brokers)
int brokersWithWrites = 0;
for (int i = 0; i < numBrokers; i++) {
brokersWithWrites += writeCounts[i].get() > 0 ? 1 : 0;
}
int expectedBrokersWithWrites = Math.min(numWriteEndpoints, numBrokers);
assertEquals("Unexpected number of brokers received writes",
expectedBrokersWithWrites, brokersWithWrites);
// If more than one broker received writes, verify approximate balance across them
if (expectedBrokersWithWrites > 1) {
List<Integer> activeBrokerWrites = new ArrayList<>();
for (int i = 0; i < numBrokers; i++) {
if (writeCounts[i].get() > 0) activeBrokerWrites.add(writeCounts[i].get());
}
// Defensive: ensure we are comparing only among active brokers
assertEquals("Active broker count mismatch",
expectedBrokersWithWrites, activeBrokerWrites.size());
int minWrites = Collections.min(activeBrokerWrites);
int maxWrites = Collections.max(activeBrokerWrites);
int avgPerBroker = numWrites / expectedBrokersWithWrites;
int allowedSkew = Math.max(1, (int) Math.ceil(avgPerBroker * 0.05)); // allow 5% skew or at least 1
assertTrue(
"Writes not approximately balanced across active brokers: min=" + minWrites +
", max=" + maxWrites + ", allowedSkew=" + allowedSkew,
(maxWrites - minWrites) <= allowedSkew);
}
for (int i = 0; i < numBrokers; i++) {
mockServers[i].stop();
}
}
@Test
public void testMultipleBrokerWritesWithDeadBrokers() throws Exception {
if (numDeadBrokers == 0) {
System.out.println("Skipping testMultipleBrokerWritesWithDeadBrokers with no dead brokers");
return;
}
System.out.println("numWriteEndpoints: " + numWriteEndpoints + ", numBrokers: " + numBrokers + ", numDeadBrokers: " + numDeadBrokers);
AtomicInteger[] writeCounts = new AtomicInteger[numBrokers];
for (int i = 0; i < numBrokers; i++) {
writeCounts[i] = new AtomicInteger();
}
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
// Return all brokers in metadata so client discovers all brokers
Set<Broker> brokers = new HashSet<>();
for (int i = 0; i < numBrokers; i++) {
brokers.add(new Broker(LOCALHOST_STRING, (short) (port + i), "n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
}
BiConsumer<ChannelHandlerContext, RequestPacket> topicMetadataHandler = (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
};
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>>[] maps = new HashMap[numBrokers];
for (int i = 0; i < numBrokers; i++) {
final int idx = i;
BiConsumer<ChannelHandlerContext, RequestPacket> writeHandler = (ctx, req) -> {
writeCounts[idx].getAndIncrement();
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
};
maps[i] = new HashMap<>();
maps[i].put(RequestType.TOPIC_METADATA, topicMetadataHandler);
maps[i].put(RequestType.WRITE, writeHandler);
}
MockMemqServer[] mockServers = new MockMemqServer[numBrokers];
Map<Integer, MockMemqServer> portToServerMap = new HashMap<>();
for (int i = 0; i < numBrokers; i++) {
mockServers[i] = new MockMemqServer(port + i, maps[i]);
mockServers[i].start();
portToServerMap.put(mockServers[i].getPort(), mockServers[i]);
}
Properties networkProperties = new Properties();
networkProperties.setProperty(MemqCommonClient.CONFIG_NUM_WRITE_ENDPOINTS, String.valueOf(numWriteEndpoints));
int payloadSize =
RequestPacket.getHeaderSize() +
RequestPacket.getHeaderSize() +
WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() +
RawRecord.newInstance(null, null, null, "test1".getBytes(), 0).calculateEncodedLogMessageLength();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test")
.bootstrapServers(LOCALHOST_STRING + ":" + port) // Start with just first server for bootstrap
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(payloadSize)
.maxInflightRequests(1000)
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
// Perform multiple writes to trigger round-robin behavior
List<Future<MemqWriteResult>> results = new ArrayList<>();
List<Integer> writePorts = new ArrayList<>();
Set<Integer> portsToKill = new HashSet<>();
boolean killScheduled = false;
int numWrites = 200;
for (int i = 0; i < numWrites; i++) {
Future<MemqWriteResult> r = producer.write(null, "test1".getBytes());
results.add(r);
System.out.println("Written " + i + " records");
Thread.sleep(10);
if (writePorts.size() < numDeadBrokers) {
for (Endpoint e : producer.getWriteEndpoints()) {
if (!writePorts.contains(e.getAddress().getPort())) {
writePorts.add(e.getAddress().getPort());
}
}
} else {
// kill up to numDeadBrokers write ports
for (int j = 0; j < numDeadBrokers; j++) {
portsToKill.add(writePorts.get(j));
}
if (!killScheduled) {
for (int port : portsToKill) {
new ScheduledThreadPoolExecutor(1).schedule(() -> {
try {
System.out.println("Killing server on port " + port);
portToServerMap.get(port).stop();
} catch (Exception e) {
fail("Failed to stop server on port " + port);
}
}, 100, TimeUnit.MILLISECONDS);
}
killScheduled = true;
}
}
}
producer.flush();
int successCount = 0;
for (Future<MemqWriteResult> r : results) {
try {
r.get();
successCount++;
} catch (Exception e) {
System.out.println("TestMultipleBrokerWritesWithDeadBrokers exception: " + e);
e.printStackTrace();
fail("Should not throw exception");
}
}
assertEquals("Success count should be numWrites", numWrites, successCount);
producer.close();
// Verify that writes went to both servers
int totalWrites = 0;
for (int i = 0; i < numBrokers; i++) {
totalWrites += writeCounts[i].get();
System.out.println("Server " + i + " writes: " + writeCounts[i].get());
}
System.out.println("Total writes: " + totalWrites);
assertEquals("Total writes should be numWrites", numWrites, totalWrites);
// Verify that the number of distinct brokers that received writes matches numWriteEndpoints (capped at total brokers)
int brokersWithWrites = 0;
for (int i = 0; i < numBrokers; i++) {
brokersWithWrites += writeCounts[i].get() > 0 ? 1 : 0;
}
int minNumBrokersWithWrites = Math.min(numWriteEndpoints, numBrokers);
assertTrue("Unexpected number of brokers received writes",
minNumBrokersWithWrites <= brokersWithWrites);
for (int i = 0; i < numBrokers; i++) {
mockServers[i].stop();
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerBase.java | memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerBase.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import org.junit.Before;
import org.junit.BeforeClass;
import com.google.common.collect.ImmutableSet;
import com.pinterest.memq.client.commons2.MockMemqServer;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import io.netty.channel.ChannelHandlerContext;
import io.netty.util.ResourceLeakDetector;
public class TestMemqProducerBase {
protected static final String LOCALHOST_STRING = "127.0.0.1";
protected short port = -1;
@BeforeClass
public static void setup() {
ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.PARANOID);
}
@Before
public void generateRandomPort() {
short newPort = -1;
while (port == newPort) {
newPort = (short) ThreadLocalRandom.current().nextInt(20000, 30000);
}
port = newPort;
}
protected MockMemqServer newSimpleTestServer(AtomicInteger writeCount) {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map);
map.put(RequestType.WRITE, (ctx, req) -> {
writeCount.getAndIncrement();
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
});
return new MockMemqServer(port, map);
}
protected MockMemqServer newTrafficShapingTestServer(int readLimit) {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map, port);
map.put(RequestType.WRITE, (ctx, req) -> {
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(),
req.getClientRequestId(), req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
});
return new MockMemqServer(port, map, false, true, readLimit, 200);
}
protected void setupSimpleTestServerTopicMetadataHandler(Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map) {
setupSimpleTestServerTopicMetadataHandler(map, this.port);
}
protected static void setupSimpleTestServerTopicMetadataHandler(Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map, short port) {
map.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, port, "n/a", "n/a",
BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerMemory.java | memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducerMemory.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import com.codahale.metrics.MetricRegistry;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.commons2.MemoryAllocationException;
import com.pinterest.memq.client.commons2.MockMemqServer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import io.netty.channel.ChannelHandlerContext;
import org.junit.Ignore;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Future;
import java.util.function.BiConsumer;
import java.util.logging.Level;
import java.util.logging.Logger;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestMemqProducerMemory extends TestMemqProducerBase {
private static final Logger logger = Logger.getLogger(TestMemqProducerMemory.class.getName());
/**
* This test simulates a local write scenario where the producer writes 2 messages to a local broker which has
* TrafficShapingHandler enabled with a low read limit. This should result in the first write succeeding quickly, while
* the second write blocks until the first write is acknowledged by the broker.
*
* The second write blocks because we configured the producer to create one request per message, so the second write
* must try to create a new Request. Upon attempting to create a new request, it would have realized that the first write
* request is still in-flight, and that it cannot create a new request until the first one is acknowledged because the max
* inflight requests memory bytes is set to the size of one request. Therefore, the producer can only accommodate one
* inflight request at a time.
*
* We set the maxBlockMs config to a high enough value so that there is sufficient time for the broker to acknowledge the first write
* before the second write times out. The second write should block for a considerable time, but eventually succeed with a much higher ack latency.
*
* @throws Exception
*/
@Test
public void testLocalBlockedWrites() throws Exception {
int messageValueBytes = 8192; // 8 kB
int maxPayloadBytes = messageValueBytes + 4096; // 1 message each request due to some additional header overhead
int maxInflightRequests = 999999999; // effectively unlimited for this test
int maxInflightRequestsMemoryBytes = maxPayloadBytes; // it can only accommodate one inflight request
MockMemqServer mockMemqServer = getMockMemqServerWithTrafficShaping(10, 1000); // it should be able to accept 1 write at a time
mockMemqServer.start();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
Properties networkProperties = new Properties();
byte[] sampleValue = new byte[8192];
builder
.cluster("prototype")
.topic("test")
.bootstrapServers(LOCALHOST_STRING + ":" + 20000)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(maxPayloadBytes)
.compression(Compression.NONE)
.maxInflightRequests(maxInflightRequests)
.maxBlockMs(5000) // enough time for broker to ack the first write
.maxInflightRequestsMemoryBytes(maxInflightRequestsMemoryBytes)
.sendRequestTimeout(60 * 1000) // we shouldn't hit this
.networkProperties(networkProperties)
.metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
// r0 write should succeed almost immediately
Future<MemqWriteResult> r0 = producer.write(
null,
sampleValue
);
assertTrue(producer.getInflightMemoryAvailablePermits() < maxPayloadBytes); // we should not have space for another request
// r1 write should block until r0's acknowledgement is received
long startTime = System.currentTimeMillis();
Future<MemqWriteResult> r1 = producer.write(
null,
sampleValue
);
long r1WriteElapsedTime = System.currentTimeMillis() - startTime;
logger.log(Level.INFO, "Elapsed time for r1 write: " + r1WriteElapsedTime + " ms");
assertTrue(r1WriteElapsedTime > 1000); // r1 should have blocked for a considerable time but eventually succeed
try {
MemqWriteResult r0WriteResult = r0.get();
logger.log(Level.INFO,"r0 ack latency: " + r0WriteResult.getAckLatency() + " ms");
MemqWriteResult r1WriteResult = r1.get();
logger.log(Level.INFO,"r1 ack latency: " + r1WriteResult.getAckLatency() + " ms");
assertTrue(r1WriteResult.getAckLatency() > r0WriteResult.getAckLatency()); // r1 should have a larger ack latency than r0
assertTrue(r0WriteResult.getAckLatency() < r1WriteElapsedTime && (double) r0WriteResult.getAckLatency() / r1WriteElapsedTime > 0.9); // r1 should have blocked for around the same amount of time it took for r0's ack
} catch (Exception e) {
fail("Both writes should have succeeded");
}
producer.close();
mockMemqServer.stop();
}
/**
* This test simulates a scenario where the producer is trying to write
* to the mock server which has a TrafficShapingHandler attached, resulting in
* the server throttling the writes (inbound reads in the Netty channel). It requires the MockMemqServer
* to be running first via the main() method of this class.
*
* This test is designed to be run manually after launching the MockMemqServer in the main() method of the class.
* This is so that there are 2 separate processes running the broker and the test producer
* itself, ensuring that memory allocation failures on the producer do not interfere with memory used by the broker's
* processing data path.
*
* More specifically, the producer wants to write 100 messages, each of size 8 kB, but some writes will fail
* due to insufficient inflight memory permits. Despite running into insufficient memory permits, we expect the producer to
* eventually succeed in writing 100 messages, as the permits are eventually released upon broker acknowledgement of previous requests
* in order to accommodate new writes and request creation / allocation. We assert that we indeed run into MemoryAllocationExceptions,
* but that we still manage to write 100 messages successfully by the end.
*
* In other words, this test simulates a backpressure scenario resulting from broker-side congestion control throttling,
* eventually resulting in some blocked writes due to insufficient memory permits.
*
* @throws Exception
*/
@Test
@Ignore("Run this test manually")
public void testDirectMemoryAllocationFailureOnWrite() throws Exception {
int maxDirectMemoryBytes = 1024 * 1024; // 1 MB
int messageValueBytes = 8192; // 8 kB
int maxPayloadBytes = messageValueBytes * 5; // 4 messages each request due to some additional header overhead
int maxInflightRequests = 999999999; // effectively unlimited for this test
int maxInflightRequestsMemoryBytes = maxDirectMemoryBytes / 2; // 512 kB so that it runs into semaphore exhaustion before direct memory exhaustion
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
Properties networkProperties = new Properties();
byte[] sampleValue = new byte[8192];
builder
.cluster("prototype")
.topic("test")
.bootstrapServers(LOCALHOST_STRING + ":" + 20000)
.keySerializer(new ByteArraySerializer())
.valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(maxPayloadBytes)
.compression(Compression.NONE)
.maxInflightRequests(maxInflightRequests)
.maxBlockMs(5)
.maxInflightRequestsMemoryBytes(maxInflightRequestsMemoryBytes)
.sendRequestTimeout(60 * 1000)
.networkProperties(networkProperties)
.metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
int numMessagesWritten = 0;
List<Future<MemqWriteResult>> futures = new ArrayList<>();
int memoryAllocationNumOccurrences = 0;
int numMessagesToWrite = 100; // we will try to write 100 messages
while (numMessagesWritten < numMessagesToWrite) {
try {
Future<MemqWriteResult> r = producer.write(
null,
sampleValue
);
futures.add(r);
numMessagesWritten++;
} catch (MemoryAllocationException e) {
logger.log(Level.INFO, "Direct memory allocation failed as expected: " + e.getMessage());
memoryAllocationNumOccurrences++;
continue; // this is expected, we will write until we hit
} catch (IOException e) {
fail("Unexpected IOException: " + e.getMessage());
}
logger.log(Level.INFO, "Wrote message #" + numMessagesWritten);
}
assertTrue(memoryAllocationNumOccurrences > 0); // we expect at least one memory allocation failure to ensure a valid test scenario
assertEquals(numMessagesToWrite, futures.size()); // we should have successfully written exactly as many messages as we tried to write
List<Integer> ackLatencies = new ArrayList<>();
for (Future<MemqWriteResult> future : futures) {
try {
MemqWriteResult result = future.get();
ackLatencies.add(result.getAckLatency());
logger.log(Level.INFO, "Write succeeded for message with client request ID: " + result.getClientRequestId());
logger.log(Level.INFO, "Write succeeded with ack latency: " + result.getAckLatency());
} catch (Exception e) {
fail("Future.get() exception is unexpected; all writes should have been eventually successful");
}
}
// ack latencies should generally be trending up, larger values indicate that the mock server is under congestion
// control throttling. Check test logs to see the trend, output by the log statement below
logger.log(Level.INFO, "Ack latencies: " + ackLatencies);
assertTrue(ackLatencies.get(ackLatencies.size() - 1) > ackLatencies.get(0)); // last ack latency should be greater than the first one
assertTrue(ackLatencies.get(ackLatencies.size() - 1) / ackLatencies.get(0) > 20); // last ack latency should be at least 20x the first one
producer.close();
}
// Run this first before running the testDirectMemoryAllocationFailureOnWrite
public static void main(String[] args) throws Exception {
int readLimit = 1024 * 512; // this is specifically tuned to 512 kB so that testDirectMemoryAllocationFailureOnWrite will hit MemoryAllocationException but the server can still eventually handle the writes
MockMemqServer mockServer = getMockMemqServerWithTrafficShaping(readLimit, 10);
mockServer.start();
}
private static MockMemqServer getMockMemqServerWithTrafficShaping(int readLimit, int checkInterval) {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
short port = (short) 20000;
setupSimpleTestServerTopicMetadataHandler(map, port);
map.put(RequestType.WRITE, (ctx, req) -> {
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(),
req.getClientRequestId(), req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map, false, true, readLimit, checkInterval);
return mockServer;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducer.java | memq-client/src/test/java/com/pinterest/memq/client/producer2/TestMemqProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.commons2.MemoryAllocationException;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.MockMemqServer;
import com.pinterest.memq.client.commons2.retry.UniformRetryStrategy;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.commons.protocol.Broker;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.pinterest.memq.commons.protocol.TopicAssignment;
import com.pinterest.memq.commons.protocol.TopicConfig;
import com.pinterest.memq.commons.protocol.TopicMetadata;
import com.pinterest.memq.commons.protocol.TopicMetadataRequestPacket;
import com.pinterest.memq.commons.protocol.TopicMetadataResponsePacket;
import com.pinterest.memq.commons.protocol.WriteRequestPacket;
import com.pinterest.memq.commons.protocol.WriteResponsePacket;
import com.pinterest.memq.commons.protocol.Broker.BrokerType;
import com.codahale.metrics.MetricRegistry;
import com.google.common.collect.ImmutableSet;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelHandlerContext;
import org.junit.Ignore;
import org.junit.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
public class TestMemqProducer extends TestMemqProducerBase {
@Test
public void testMemoizeProducers() throws Exception {
AtomicInteger writeCount = new AtomicInteger();
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builderTmpl = new MemqProducer.Builder<>();
builderTmpl.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties);
MemqProducer.Builder<byte[], byte[]> builder1 = new MemqProducer.Builder<>(builderTmpl);
MemqProducer<byte[], byte[]> producer1 = builder1.memoize().build();
MemqProducer<byte[], byte[]> producer2 = builder1.memoize().build();
assertEquals(producer1, producer2);
producer1.close();
MemqProducer<byte[], byte[]> producer3 = builder1.memoize().build();
assertNotEquals(producer1, producer3);
MemqProducer.Builder<byte[], byte[]> builder2 = new MemqProducer.Builder<>(builderTmpl);
builder2.topic("test2");
MemqProducer<byte[], byte[]> producer4 = builder2.memoize().build();
assertNotEquals(producer2, producer4);
mockServer.stop();
}
@Test
public void testProducerInitializationFailure() throws Exception {
Properties networkProperties = new Properties();
MemqCommonClient client = new MemqCommonClient("n/a", null, networkProperties);
MemqProducer.Builder<byte[], byte[]> builderTmpl = new MemqProducer.Builder<>();
builderTmpl.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties).injectClient(client);
try {
builderTmpl.build();
fail("Should fail since server is offline");
} catch (Exception e) {
assertTrue(client.isClosed());
}
}
@Test
public void testTooLargePayload() throws Exception {
AtomicInteger writeCount = new AtomicInteger();
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null,
new byte["test message that has 32 bytes 1".length() - 1], 0)
.calculateEncodedLogMessageLength())
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
Future<MemqWriteResult> r = producer.write(null, "test message that has 32 bytes 1".getBytes());
producer.flush();
assertNull(r);
producer.close();
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testSimpleWrite() throws Exception {
AtomicInteger writeCount = new AtomicInteger();
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
Future<MemqWriteResult> r = producer.write(null, "test".getBytes());
producer.flush();
r.get();
producer.close();
assertEquals(1, writeCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testSequentialWrites() throws Exception {
AtomicInteger writeCount = new AtomicInteger();
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
Future<MemqWriteResult> r0 = producer.write(null, "test1".getBytes());
Future<MemqWriteResult> r1 = producer.write(null, "test2".getBytes());
Future<MemqWriteResult> r2 = producer.write(null, "test3".getBytes());
producer.flush();
assertEquals(r0, r1);
assertEquals(r1, r2);
r0.get();
producer.close();
assertEquals(1, writeCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testConcurrentWrites() throws Exception {
AtomicInteger writeCount = new AtomicInteger(0);
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
ExecutorService es = Executors.newFixedThreadPool(3);
Future<?>[] results = new Future[3];
Future<?>[] tasks = new Future[3];
for (int i = 0; i < 3; i++) {
final int idx = i;
Future<?> task = es.submit(() -> {
try {
Future<MemqWriteResult> r = producer.write(null, ("test" + idx).getBytes());
results[idx] = r;
} catch (Exception e) {
fail("Should not fail: " + e);
}
});
tasks[i] = task;
}
for (Future<?> f : tasks) {
f.get();
}
assertNotNull(results[0]);
assertEquals(results[0], results[2]);
assertEquals(results[0], results[1]);
producer.flush();
for (Future<?> f : results) {
f.get();
}
producer.close();
assertEquals(1, writeCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testMultipleDispatchedRequests() throws Exception {
AtomicInteger writeCount = new AtomicInteger(0);
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, new byte["test message that has 32 bytes 1".length()], 0)
.calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
Future<MemqWriteResult> r0 = producer.write(null,
"test message that has 32 bytes 1".getBytes());
Future<MemqWriteResult> r1 = producer.write(null,
"test message that has 32 bytes 2".getBytes());
Future<MemqWriteResult> r2 = producer.write(null,
"test message that has 32 bytes 3".getBytes());
producer.flush();
assertNotEquals(r0, r1);
assertNotEquals(r1, r2);
assertNotEquals(r2, r0);
r0.get();
r1.get();
r2.get();
producer.close();
assertEquals(3, writeCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
@Ignore("Ignore due to flaky github action test. It should pass locally")
public void testMultipleDispatchedRequestsClose() throws Exception {
AtomicInteger writeCount = new AtomicInteger(0);
MockMemqServer mockServer = newSimpleTestServer(writeCount);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, new byte["test message that has 32 bytes 1".length()], 0)
.calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties);
MemqProducer<byte[], byte[]> producer = builder.build();
Future<MemqWriteResult> r0 = producer.write(null,
"test message that has 32 bytes 1".getBytes());
producer.close();
try {
Future<MemqWriteResult> r1 = producer.write(null,
"test message that has 32 bytes 2".getBytes());
fail("Should throw exception");
} catch (Exception e) {
assertTrue(e instanceof IOException);
assertEquals("Cannot write to topic test when client is closed", e.getMessage());
}
try {
r0.get();
} catch (Exception e) {
assertTrue(e instanceof ExecutionException);
assertTrue(e.getCause() instanceof IllegalStateException);
assertEquals("Cannot send since client is closed", e.getCause().getMessage());
}
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testRedirect() throws Exception {
AtomicInteger writeCount = new AtomicInteger(0);
AtomicInteger redirectCount = new AtomicInteger(0);
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map);
map.put(RequestType.WRITE, (ctx, req) -> {
ResponsePacket resp;
int currentCount = writeCount.getAndIncrement();
if (currentCount % 2 == 0 || currentCount >= 4) { // redirect first request
redirectCount.getAndIncrement();
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.REDIRECT, new WriteResponsePacket());
} else {
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
}
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer<byte[], byte[]> producer = new MemqProducer.Builder<byte[], byte[]>()
.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, new byte["test message that has 32 bytes 1".length()], 0)
.calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties).build();
Future<MemqWriteResult> r0 = producer.write(null,
"test message that has 32 bytes 1".getBytes());
r0.get();
Future<MemqWriteResult> r1 = producer.write(null,
"test message that has 32 bytes 2".getBytes());
r1.get();
Future<MemqWriteResult> r2 = producer.write(null,
"test message that has 32 bytes 3".getBytes());
producer.flush();
try {
r2.get();
fail("Should fail since more than 1 redirection");
} catch (ExecutionException ee) {
assertEquals("Write request failed after multiple attempts", ee.getCause().getMessage());
} catch (Exception e) {
fail("should throw execution exception");
}
producer.close();
assertEquals(2 + 2 + 3, writeCount.get());
assertEquals(1 + 1 + 3, redirectCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
mockServer.stop();
}
@Test
public void testRedirectToDifferentServer() throws Exception {
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map1 = new HashMap<>();
AtomicInteger metadataCount1 = new AtomicInteger();
map1.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
metadataCount1.incrementAndGet();
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) (port + 1),
"n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
AtomicInteger writeCount1 = new AtomicInteger();
map1.put(RequestType.WRITE, (ctx, req) -> {
writeCount1.getAndIncrement();
ctx.writeAndFlush(new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.REDIRECT, new WriteResponsePacket()));
});
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map2 = new HashMap<>();
map2.put(RequestType.TOPIC_METADATA, (ctx, req) -> {
TopicMetadataRequestPacket mdPkt = (TopicMetadataRequestPacket) req.getPayload();
TopicConfig topicConfig = new TopicConfig("test", "dev");
TopicAssignment topicAssignment = new TopicAssignment(topicConfig, 100.0);
Set<Broker> brokers = Collections.singleton(new Broker(LOCALHOST_STRING, (short) (port + 1),
"n/a", "n/a", BrokerType.WRITE, Collections.singleton(topicAssignment)));
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK,
new TopicMetadataResponsePacket(new TopicMetadata(mdPkt.getTopic(), brokers,
ImmutableSet.of(), "dev", new Properties())));
ctx.writeAndFlush(resp);
});
AtomicInteger writeCount2 = new AtomicInteger();
map2.put(RequestType.WRITE, (ctx, req) -> {
writeCount2.getAndIncrement();
ctx.writeAndFlush(new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket()));
});
MockMemqServer mockserver1 = new MockMemqServer(port, map1);
mockserver1.start();
MockMemqServer mockserver2 = new MockMemqServer(port + 1, map2);
mockserver2.start();
Properties networkProperties = new Properties();
MemqProducer<byte[], byte[]> producer = new MemqProducer.Builder<byte[], byte[]>()
.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, new byte["test message that has 32 bytes 1".length()], 0)
.calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties).build();
Future<MemqWriteResult> r0 = producer.write(null,
"test message that has 32 bytes 1".getBytes());
Future<MemqWriteResult> r1 = producer.write(null,
"test message that has 32 bytes 2".getBytes());
Future<MemqWriteResult> r2 = producer.write(null,
"test message that has 32 bytes 3".getBytes());
producer.flush();
try {
r0.get();
r1.get();
r2.get();
} catch (Exception e) {
fail("should all pass");
}
producer.close();
assertEquals(1, metadataCount1.get());
assertEquals(0, writeCount1.get());
assertEquals(3, writeCount2.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockserver1.stop();
mockserver2.stop();
}
@Test(timeout=30000)
@Ignore("This test is disabled for now due to stalling when run on GitHub actions. It should pass locally.")
public void testDisconnectionRetryAndTimeout() throws Exception {
AtomicInteger writeCount = new AtomicInteger(0);
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map);
map.put(RequestType.WRITE, (ctx, req) -> {
int current = writeCount.getAndIncrement();
if ((current >= 1 && current < 4) || current >= 5) {
ctx.close();
return;
}
ResponsePacket resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
Properties networkProperties = new Properties();
MemqProducer<byte[], byte[]> producer = new MemqProducer.Builder<byte[], byte[]>()
.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.retryStrategy(new UniformRetryStrategy())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, new byte["test message that has 32 bytes 1".length()], 0)
.calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties).build();
Future<MemqWriteResult> r0 = producer.write(null,
"test message that has 32 bytes 1".getBytes());
r0.get();
Future<MemqWriteResult> r1 = producer.write(null,
"test message that has 32 bytes 2".getBytes());
assertNotEquals(r0, r1);
r1.get();
Future<MemqWriteResult> r2 = producer.write(null,
"test message that has 32 bytes 3".getBytes());
producer.flush();
assertNotEquals(r1, r2);
assertNotEquals(r2, r0);
try {
r2.get();
fail("Should timeout");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof TimeoutException);
} catch (Exception e) {
fail("Should throw timeout exception wrapped in execution exception");
}
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
producer.close();
mockServer.stop();
}
@Test
@Ignore("This test is disabled due to flakiness on GitHub actions")
public void testAlignedMemoryLoad() throws Exception {
AtomicInteger requestCount = new AtomicInteger(0);
AtomicInteger redirectCount = new AtomicInteger(0);
AtomicInteger closeCount = new AtomicInteger(0);
AtomicInteger successCount = new AtomicInteger(0);
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map);
map.put(RequestType.WRITE, (ctx, req) -> {
ResponsePacket resp;
int current = requestCount.getAndIncrement();
if (current % 10000 == 9999) {
closeCount.getAndIncrement();
ctx.close();
return;
} else if (current % 1000 == 999) {
redirectCount.getAndIncrement();
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.REDIRECT, new WriteResponsePacket());
} else {
successCount.getAndIncrement();
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
}
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
Properties networkProperties = new Properties();
byte[] sampleValue = new byte[1024];
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(RequestPacket.getHeaderSize() + WriteRequestPacket.getHeaderSize(RequestType.PROTOCOL_VERSION, "test") +
MemqMessageHeader.getHeaderLength() + RawRecord
.newInstance(null, null, null, sampleValue, 0).calculateEncodedLogMessageLength())
.compression(Compression.NONE).networkProperties(networkProperties)
.metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
ExecutorService es = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r);
t.setName("test");
return t;
});
int NUM_OF_WRITES = 1_000_000;
AtomicBoolean done = new AtomicBoolean(false);
final BlockingQueue<Future<MemqWriteResult>> results = new ArrayBlockingQueue<>(10);
Set<Future<MemqWriteResult>> resultSet = new HashSet<>();
AtomicInteger resultCount = new AtomicInteger(0);
Future<?> consumptionTask = es.submit(() -> {
while (!results.isEmpty() || !done.get()) {
try {
results.take().get();
} catch (Exception e) {
System.err.println(e);
}
}
if (!done.get()) {
fail("Not done yet");
} else if (!results.isEmpty()) {
fail("results are not empty");
}
});
for (int i = 0; i < NUM_OF_WRITES; i++) {
if (i % 1_000 == 0) {
System.out.println("[" + i + "/" + NUM_OF_WRITES + "]" + " inflight requests: "
+ results.size() + " netty off-heap usage (KB): "
+ PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory() / 1024
+ " netty heap usage (KB): "
+ PooledByteBufAllocator.DEFAULT.metric().usedHeapMemory() / 1024);
}
byte[] value = new byte[sampleValue.length];
ThreadLocalRandom.current().nextBytes(value);
Future<MemqWriteResult> r = producer.write(null, value);
if (resultSet.add(r)) {
resultCount.incrementAndGet();
results.put(r);
}
}
producer.flush();
done.set(true);
consumptionTask.get();
producer.close();
assertEquals(
producer.getMetricRegistry().getCounters().get("requests.success.count").getCount(),
resultCount.get());
assertEquals(resultSet.size(), resultCount.get());
assertEquals(producer.getRequestCountAvailablePermits(), 30);
assertEquals(producer.getInflightMemoryAvailablePermits(), 32 * 1024 * 1024);
mockServer.stop();
}
@Test
public void testMemoryAllocationException() throws Exception {
MockMemqServer memqServer = newTrafficShapingTestServer(100);
memqServer.start();
Properties networkProperties = new Properties();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
int payloadSize = 256;
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.networkProperties(networkProperties)
.maxInflightRequestsMemoryBytes(64 * 1024) // 64 KB
.maxBlockMs(1)
.lingerMs(Integer.MAX_VALUE) // let size threshold take effect
.maxPayloadBytes(payloadSize)
.maxInflightRequests(Integer.MAX_VALUE); // avoid hitting request count limit
MemqProducer<byte[], byte[]> producer = builder.build();
int recordsWritten = 0;
while (recordsWritten < 1500) {
try {
Future<MemqWriteResult> r = producer.write(null, "test message that has 32 bytes 1".getBytes());
} catch (MemoryAllocationException e) {
// Expected exception due to memory allocation failure
System.out.println("Caught expected MemoryAllocationException after writing " + recordsWritten + " records");
producer.close();
memqServer.stop();
return;
} catch (Exception e) {
fail("Should throw MemoryAllocationException, but got: " + e);
}
recordsWritten++;
}
fail("Should throw exception since memory allocation should fail");
producer.close();
memqServer.stop();
}
@Test
@Ignore("This test is disabled due to flakiness on GitHub actions")
public void testNonAlignedMemoryLoad() throws Exception {
AtomicInteger requestCount = new AtomicInteger(0);
AtomicInteger redirectCount = new AtomicInteger(0);
AtomicInteger closeCount = new AtomicInteger(0);
AtomicInteger successCount = new AtomicInteger(0);
Map<RequestType, BiConsumer<ChannelHandlerContext, RequestPacket>> map = new HashMap<>();
setupSimpleTestServerTopicMetadataHandler(map);
map.put(RequestType.WRITE, (ctx, req) -> {
ResponsePacket resp;
int current = requestCount.getAndIncrement();
if (current % 1000 == 999) {
closeCount.getAndIncrement();
ctx.close();
return;
} else if (current % 100 == 99) {
redirectCount.getAndIncrement();
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.REDIRECT, new WriteResponsePacket());
} else {
successCount.getAndIncrement();
resp = new ResponsePacket(req.getProtocolVersion(), req.getClientRequestId(),
req.getRequestType(), ResponseCodes.OK, new WriteResponsePacket());
}
ctx.writeAndFlush(resp);
});
MockMemqServer mockServer = new MockMemqServer(port, map);
mockServer.start();
MemqProducer.Builder<byte[], byte[]> builder = new MemqProducer.Builder<>();
Properties networkProperties = new Properties();
byte[] sampleValue = new byte[4 * 1024];
builder.cluster("prototype").topic("test").bootstrapServers(LOCALHOST_STRING + ":" + port)
.keySerializer(new ByteArraySerializer()).valueSerializer(new ByteArraySerializer())
.maxPayloadBytes(4 * 1024 * 1024).compression(Compression.NONE).maxInflightRequestsMemoryBytes(64 * 1024 * 1024)
.networkProperties(networkProperties).metricRegistry(new MetricRegistry());
MemqProducer<byte[], byte[]> producer = builder.build();
ExecutorService es = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r);
t.setName("test");
return t;
});
int NUM_OF_WRITES = 1_000_000;
AtomicBoolean done = new AtomicBoolean(false);
final BlockingQueue<Future<MemqWriteResult>> results = new ArrayBlockingQueue<>(10);
Set<Future<MemqWriteResult>> resultSet = new HashSet<>();
AtomicInteger resultCount = new AtomicInteger(0);
Future<?> consumptionTask = es.submit(() -> {
while (!results.isEmpty() || !done.get()) {
try {
results.take().get();
} catch (Exception e) {
throw new RuntimeException(e.getCause());
}
}
if (!done.get()) {
fail("Not done yet");
} else if (!results.isEmpty()) {
fail("results are not empty");
}
});
for (int i = 0; i < NUM_OF_WRITES; i++) {
if (i % 10_000 == 0) {
System.out.println("[" + i + "/" + NUM_OF_WRITES + "]" + " inflight requests: "
+ results.size() + ", netty off-heap usage (KB): "
+ PooledByteBufAllocator.DEFAULT.metric().usedDirectMemory() / 1024
+ ", netty heap usage (KB): "
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | true |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer/TestMemqProducer.java | memq-client/src/test/java/com/pinterest/memq/client/producer/TestMemqProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.InvocationTargetException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiFunction;
import java.util.zip.CRC32;
import org.junit.Test;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.producer.netty.MemqNettyRequest;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.MessageId;
public class TestMemqProducer {
@Test
public void testProducerMessageEncoding() throws IOException, InstantiationException,
IllegalAccessException, IllegalArgumentException,
InvocationTargetException, NoSuchMethodException,
SecurityException {
Semaphore maxRequestLock = new Semaphore(1);
MemqNettyRequest task = new MemqNettyRequest("xyz", 1L, Compression.GZIP,
maxRequestLock, true, 1024 * 1024, 100, null, null, 10_000, false);
int count = 100;
OutputStream os = task.getOutputStream();
String data = "xyzabcs";
String ary = data;
for (int i = 0; i < 2; i++) {
ary += ary;
}
for (int k = 0; k < count; k++) {
byte[] bytes = (ary + "i:" + k).getBytes();
MemqProducer.writeMemqLogMessage(null, null, null, bytes, task, System.currentTimeMillis());
}
task.markReady();
os.close();
byte[] buf = task.getPayloadAsByteArrays();
ByteBuffer wrap = ByteBuffer.wrap(buf);
// task.getHeader().getHeaderLength(), buf.length);
System.out.println("Header length:" + MemqMessageHeader.getHeaderLength());
assertEquals("Header length didn't match in payload", MemqMessageHeader.getHeaderLength(),
wrap.getShort());
assertEquals(task.getVersion(), wrap.getShort());
short extraHeaderLength = wrap.getShort();
assertEquals(21, extraHeaderLength);
// skip testing extra content
wrap.position(wrap.position() + extraHeaderLength);
int crc = wrap.getInt();
byte compression = wrap.get();
assertEquals(1, compression);
int messageCount = wrap.getInt();
assertEquals(count, messageCount);
int lengthOfPayload = wrap.getInt();
ByteBuffer payload = wrap.slice();
CRC32 crcCalc = new CRC32();
crcCalc.update(payload);
payload.rewind();
byte[] outputPayload = new byte[lengthOfPayload];
for (int i = 0; i < lengthOfPayload; i++) {
outputPayload[i] = payload.get();
}
InputStream stream = new ByteArrayInputStream(outputPayload);
for (Compression comp : Compression.values()) {
if (comp.id == compression) {
stream = comp.getCompressStream(stream);
break;
}
}
DataInputStream dis = new DataInputStream(stream);
for (int k = 0; k < count; k++) {
short internalFieldsLength = dis.readShort();
assertEquals(11, internalFieldsLength);
dis.read(new byte[internalFieldsLength]);
int headerLength = dis.readInt();
assertEquals(0, headerLength);
int length = dis.readInt();
byte[] b = new byte[length];
dis.readFully(b);
assertEquals(crc, (int) crcCalc.getValue());
assertArrayEquals((ary + "i:" + k).getBytes(), b);
}
}
@Test
public void testProduceConsumeProtocolCompatibility() throws Exception {
// write data using producer
int count = 200000;
for (Compression c : Compression.values()) {
String data = "xyza33245245234534bcs";
String ary = data;
for (int i = 0; i < 2; i++) {
ary += ary;
}
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> (base + k).getBytes();
String baseLogMessage = ary + "i:";
long ts = System.currentTimeMillis();
Iterator<MemqLogMessage<byte[], byte[]>> iterator = TestUtils
.getTestDataIteratorWithAllFields(baseLogMessage, getLogMessageBytes, count, 1, c, false);
for (int i = 0; i < 5001; i++) {
assertTrue(iterator.hasNext()); // assert idempotence
}
long ts1 = System.currentTimeMillis();
int z = 0;
while (iterator.hasNext()) {
MemqLogMessage<byte[], byte[]> next = iterator.next();
assertNull(next.getKey());
assertEquals(z, ByteBuffer.wrap(next.getMessageId().toByteArray()).getLong());
assertNull(next.getHeaders());
assertTrue(ts <= (Long) next.getWriteTimestamp());
assertEquals("z:" + z, ary + "i:" + z, new String(next.getValue())); // assert value equals
z++;
}
ts1 = System.currentTimeMillis() - ts1;
System.out.println("Time to process:" + ts1 + "ms for:" + z);
assertEquals(count, z);
assertFalse(iterator.hasNext());
}
}
@Test
public void testMessageIdHashPerf() throws IOException {
Semaphore maxRequestLock = new Semaphore(1);
MemqNettyRequest req = new MemqNettyRequest("xyz", 1L, Compression.GZIP,
maxRequestLock, true, 1024 * 1024, 100, null, null, 10_000, false);
long nextLong = ThreadLocalRandom.current().nextLong();
// Takes 1.68s for 100M 16byte messageIds
for (int i = 0; i < 100_000_000; i++) {
req.addMessageId(new MessageId(ByteBuffer.allocate(16).putLong(nextLong).putLong(i).array()));
}
req.getBuffer().release();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/producer/netty/TestMemqNettyProducer.java | memq-client/src/test/java/com/pinterest/memq/client/producer/netty/TestMemqNettyProducer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.producer.netty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.net.InetSocketAddress;
import java.util.HashSet;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeoutException;
import java.util.zip.CRC32;
import org.junit.Before;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.MemqMessageHeader;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.client.commons.serde.ByteArraySerializer;
import com.pinterest.memq.client.producer.MemqWriteResult;
import com.pinterest.memq.client.producer.TaskRequest;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.RequestType;
import io.netty.buffer.ByteBuf;
import reactor.netty.DisposableServer;
import reactor.netty.tcp.TcpServer;
public class TestMemqNettyProducer {
private static final String LOCALHOST_STRING = "127.0.0.1";
private int port = -1;
private InetSocketAddress commonAddress;
@Before
public void generateRandomPort() {
int newPort = -1;
while (port == newPort) {
newPort = ThreadLocalRandom.current().nextInt(20000, 30000);
}
port = newPort;
commonAddress = InetSocketAddress.createUnresolved(LOCALHOST_STRING, port);
}
@Test
public void testSimpleWrite() throws Exception {
DisposableServer mockServer = TcpServer.create()
.bindAddress(() -> commonAddress)
.handle(((in, out) -> {
in.receive().subscribe();
return out.neverComplete();
}))
.bindNow();
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster", commonAddress,
"testTopic", 2, 1024 * 1024, Compression.NONE, false, 100, 30000, "local", 1000, null,
null);
assertNotNull(producer);
assertTrue(!producer.getEs().isShutdown());
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
Set<Future<MemqWriteResult>> futures = new HashSet<>();
for (int i = 0; i < 100; i++) {
Future<MemqWriteResult> future = producer.writeToTopic(null,
UUID.randomUUID().toString().getBytes());
futures.add(future);
}
assertEquals("There should be 1 request in the request map", 1,
producer.getRequestMap().size());
assertEquals("Since there is 1 pending request there should only be 1 future", 1,
futures.size());
TaskRequest request = producer.getRequestMap().entrySet().iterator().next().getValue();
assertTrue(request instanceof MemqNettyRequest);
MemqNettyRequest nettyRequest = (MemqNettyRequest) request;
producer.finalizeRequest();
assertTrue("Request should be ready", nettyRequest.isReady());
Thread.sleep(100);
try {
futures.iterator().next().get();
fail("Must throw ack timeout");
} catch (ExecutionException ee) {
System.out.println(ee.getCause());
assertTrue(ee.getCause() instanceof TimeoutException);
} catch (Exception e) {
fail();
}
assertEquals(0, producer.getRequestMap().size());
producer.close();
mockServer.dispose();
}
@Test
public void testWriteProtocolCompatibility() throws Exception {
CompletableFuture<ByteBuf> bufFuture = new CompletableFuture<>();
DisposableServer mockServer = TcpServer.create()
.bindAddress(() -> commonAddress)
.handle((in, out) -> {
in.receive().aggregate().retain().subscribe(bufFuture::complete);
return out.neverComplete();
})
.bindNow();
String topicName = "testTopic";
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster", commonAddress,
topicName, 2, 1024 * 1024, Compression.NONE, true, 100, 30000, "local", 10000, null, null);
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
Set<Future<MemqWriteResult>> futures = new HashSet<>();
int totalBytesWritten = 0;
for (int i = 0; i < 50; i++) {
byte[] bytes = UUID.randomUUID().toString().getBytes();
totalBytesWritten += bytes.length + 13 + 4 + 4;
Future<MemqWriteResult> future = producer.writeToTopic(null, bytes);
futures.add(future);
}
Future<MemqWriteResult> response = producer.writeToTopic(null, new byte[1024 * 1024]);
assertNull(response);
TaskRequest request = producer.getRequestMap().entrySet().iterator().next().getValue();
assertTrue(request instanceof MemqNettyRequest);
MemqNettyRequest nettyRequest = (MemqNettyRequest) request;
nettyRequest.setDebugEnabled();
producer.finalizeRequest();
assertTrue("Request should be ready", nettyRequest.isReady());
Thread.sleep(300);
producer.close();
// verify data written
ByteBuf buf = bufFuture.get();
assertNotNull(buf);
// attempt to read data
buf.readInt();
assertEquals(RequestType.PROTOCOL_VERSION, buf.readShort());
assertEquals(request.getId(), buf.readLong());
assertEquals(0, buf.readByte());
assertEquals(true, buf.readBoolean());
assertEquals(topicName.length(), buf.readShort());
byte[] tpn = new byte[topicName.length()];
buf.readBytes(tpn);
assertEquals(topicName, new String(tpn));
int checksum = buf.readInt();
int payloadLength = buf.readInt();
assertEquals(totalBytesWritten + MemqMessageHeader.getHeaderLength(), payloadLength);
byte[] payload = new byte[payloadLength];
buf.readBytes(payload);
buf.release();
CRC32 crc = new CRC32();
crc.update(payload);
assertEquals(checksum, (int) crc.getValue());
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, payload.length);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
MemqLogMessageIterator<byte[], byte[]> itr = new MemqLogMessageIterator<>("test", "test",
new DataInputStream(new ByteArrayInputStream(payload)), obj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), true, null);
int c = 0;
while (itr.hasNext()) {
itr.next();
c++;
}
assertEquals(50, c);
mockServer.dispose();
}
@Test
public void testMemoryUsage() throws Exception {
DisposableServer mockServer = TcpServer.create()
.bindAddress(() -> commonAddress)
.handle((in, out) -> {
in.receive().subscribe();
return out.neverComplete();
})
.bindNow();
String topicName = "testTopic";
MemqNettyProducer<byte[], byte[]> producer = new MemqNettyProducer<>("testcluster", commonAddress,
topicName, 10, 1024 * 1024 * 4, Compression.NONE, true, 100, 30000, "local", 1000, null,
null);
producer.setDebug();
producer.setKeySerializer(new ByteArraySerializer());
producer.setValueSerializer(new ByteArraySerializer());
int batchBytes = 0;
long ts = System.currentTimeMillis();
byte[] bytes = UUID.randomUUID().toString().getBytes();
for (int i = 0; i < 10_000_000; i++) {
producer.writeToTopic(null, bytes);
batchBytes += bytes.length;
if ((System.currentTimeMillis() - ts) >= 1000) {
System.out.println(batchBytes / 1024 / 1024 + "MB/s\t" + i);
batchBytes = 0;
ts = System.currentTimeMillis();
}
}
producer.close();
mockServer.dispose();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/consumer/TestData.java | memq-client/src/test/java/com/pinterest/memq/client/consumer/TestData.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.util.Base64;
import java.util.function.BiFunction;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.commons.MemqLogMessage;
public class TestData {
@Test
public void testCompressionNone() throws Exception {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
byte[] memqBatchData = TestUtils.getMemqBatchData("test1231231", getLogMessageBytes, 2, 5, true,
Compression.NONE, null, true);
byte[] decode = Base64.getDecoder().decode(Base64.getEncoder().encodeToString(memqBatchData));
DataInputStream dataInputStream = new DataInputStream(new ByteArrayInputStream(decode));
JsonObject currNotificationObj = new JsonObject();
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, decode.length);
MemqLogMessageIterator<byte[], byte[]> memqLogMessageIterator = new MemqLogMessageIterator<byte[], byte[]>(
"test", "client0", dataInputStream, currNotificationObj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
while (memqLogMessageIterator.hasNext()) {
memqLogMessageIterator.next();
}
memqLogMessageIterator.close();
}
@Test
public void testCompressionZstd() throws Exception {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
byte[] memqBatchData = TestUtils.getMemqBatchData("test1231231", getLogMessageBytes, 2, 5, true,
Compression.ZSTD, null, true);
byte[] decode = Base64.getDecoder().decode(Base64.getEncoder().encodeToString(memqBatchData));
DataInputStream dataInputStream = new DataInputStream(new ByteArrayInputStream(decode));
JsonObject currNotificationObj = new JsonObject();
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, decode.length);
MemqLogMessageIterator<byte[], byte[]> memqLogMessageIterator = new MemqLogMessageIterator<byte[], byte[]>(
"test", "client0", dataInputStream, currNotificationObj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
while (memqLogMessageIterator.hasNext()) {
memqLogMessageIterator.next();
}
memqLogMessageIterator.close();
}
@Test
public void testCompressionGzip() throws Exception {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
byte[] memqBatchData = TestUtils.getMemqBatchData("test1231231", getLogMessageBytes, 2, 5, true,
Compression.GZIP, null, true);
byte[] decode = Base64.getDecoder().decode(Base64.getEncoder().encodeToString(memqBatchData));
DataInputStream dataInputStream = new DataInputStream(new ByteArrayInputStream(decode));
JsonObject currNotificationObj = new JsonObject();
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, decode.length);
MemqLogMessageIterator<byte[], byte[]> memqLogMessageIterator = new MemqLogMessageIterator<byte[], byte[]>(
"test", "client0", dataInputStream, currNotificationObj, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), new MetricRegistry(), false, null);
while (memqLogMessageIterator.hasNext()) {
memqLogMessageIterator.next();
}
memqLogMessageIterator.close();
}
// @Test
// public void testCompressionLz4() throws Exception {
// BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
// byte[] memqBatchData = TestUtils.getMemqBatchData("test1231231", getLogMessageBytes, 2, 5, true,
// Compression.LZ4, null, true);
//
// byte[] decode = Base64.getDecoder().decode(Base64.getEncoder().encodeToString(memqBatchData));
// DataInputStream dataInputStream = new DataInputStream(new ByteArrayInputStream(decode));
// JsonObject currNotificationObj = new JsonObject();
// currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
// currNotificationObj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, decode.length);
// MemqLogMessageIterator<byte[], byte[]> memqLogMessageIterator = new MemqLogMessageIterator<byte[], byte[]>(
// "test", "client0", dataInputStream, currNotificationObj, new ByteArrayDeserializer(),
// new ByteArrayDeserializer(), new MetricRegistry(), false, null);
// while (memqLogMessageIterator.hasNext()) {
// memqLogMessageIterator.next();
// }
// memqLogMessageIterator.close();
// }
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/consumer/TestConsumerIntegration.java | memq-client/src/test/java/com/pinterest/memq/client/consumer/TestConsumerIntegration.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.function.BiFunction;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.audit.KafkaBackedAuditor;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.commons.Message;
import com.salesforce.kafka.test.junit4.SharedKafkaTestResource;
import com.salesforce.kafka.test.listeners.PlainListener;
public class TestConsumerIntegration {
private static final String TOPIC = "topic";
public static final String KEY = "key";
public static final String BUCKET = "bucket";
public static final String SIZE = "objectSize";
public static final String HEADER_SIZE = "headerSize";
private static final String NUMBER_OF_MESSAGES_IN_BATCH = "numBatchMessages";
@ClassRule
public static final SharedKafkaTestResource sharedKafkaTestResource = new SharedKafkaTestResource()
.withBrokers(1).registerListener(new PlainListener().onPorts(9092));
@Before
public void before() {
String kafkaConnectString = "localhost:9092";
Properties adminProps = new Properties();
adminProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectString);
adminProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
adminProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test12");
adminProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
AdminClient admin = AdminClient.create(adminProps);
String notificationTopic = "notify_topic_1";
admin.createTopics(Arrays.asList(new NewTopic(notificationTopic, 1, (short) 1)));
admin.createTopics(Arrays.asList(new NewTopic("auditTopic", 1, (short) 1)));
admin.close();
}
@Test
public void testSeekAndRead() throws Exception {
Properties props = new Properties();
props.setProperty(ConsumerConfigs.CLUSTER, "test");
Properties notificationProps = new Properties();
notificationProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
notificationProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test_1");
notificationProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
notificationProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
StringSerializer.class.getCanonicalName());
notificationProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class.getCanonicalName());
String notificationTopic = "notify_topic_1";
notificationProps.setProperty(KafkaNotificationSource.NOTIFICATION_TOPIC_NAME_KEY,
notificationTopic);
props.put(ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_KEY, notificationProps);
String topic = "topic1";
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
List<byte[]> hashes = new ArrayList<>();
final byte[] memqBatchData = TestUtils.getMemqBatchData("test1231231", getLogMessageBytes, 1000,
10, true, Compression.GZIP, hashes, true);
Gson gson = new Gson();
KafkaProducer<String, String> notificationProducer = new KafkaProducer<>(notificationProps);
JsonObject payload = new JsonObject();
payload.addProperty(BUCKET, "local1");
payload.addProperty(KEY, "local1");
payload.addProperty(SIZE, memqBatchData.length);
payload.addProperty(TOPIC, topic);
payload.addProperty(HEADER_SIZE, 1024);
payload.addProperty(NUMBER_OF_MESSAGES_IN_BATCH, 1000);
notificationProducer
.send(new ProducerRecord<String, String>(notificationTopic, gson.toJson(payload)));
notificationProducer
.send(new ProducerRecord<String, String>(notificationTopic, gson.toJson(payload)));
notificationProducer.close();
System.setProperty("aws.region", "us-east-1"); // set region for S3 storage handler
MemqConsumer<byte[], byte[]> consumer = getConsumer(props, memqBatchData);
consumer.subscribe(Lists.newArrayList(topic));
consumer.assign(Lists.newArrayList(0));
consumer.seek(ImmutableMap.of(0, 1L));
MutableInt reads = new MutableInt();
Iterator<MemqLogMessage<byte[], byte[]>> poll = consumer.poll(Duration.ofSeconds(10), reads);
assertEquals(1, (int) reads.getValue());
int count = 0;
while (poll.hasNext()) {
poll.next();
count++;
}
assertEquals(10000, count);
consumer.close();
Files.write("localhost:9092".getBytes(), new File("target/testconsumeraudit"));
props.setProperty("auditor.bootstrap.servers", "localhost:9092");
props.setProperty("auditor.class", KafkaBackedAuditor.class.getCanonicalName());
props.setProperty("auditor.topic", "auditTopic");
props.setProperty("auditor.enabled", "true");
notificationProps.setProperty(KafkaNotificationSource.NOTIFICATION_TOPIC_NAME_KEY,
notificationTopic);
props.put(ConsumerConfigs.NOTIFICATION_SOURCE_PROPS_KEY, notificationProps);
consumer = getConsumer(props, memqBatchData);
consumer.subscribe(Lists.newArrayList(topic));
reads = new MutableInt();
poll = consumer.poll(Duration.ofSeconds(10), reads);
assertEquals(2, (int) reads.getValue());
count = 0;
while (poll.hasNext()) {
poll.next();
count++;
}
// 2(notification events)x10(Messages)x1000(Message per event)=20000 messages
assertEquals(20000, count);
// this section of the code validates whether or not auditing is working on the
// consumer
Properties auditConfig = new Properties();
auditConfig.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
auditConfig.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
auditConfig.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
ByteArrayDeserializer.class.getCanonicalName());
auditConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "auditConfig_1");
auditConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
KafkaConsumer<byte[], byte[]> auditConsumer = new KafkaConsumer<>(auditConfig);
auditConsumer.subscribe(Arrays.asList("auditTopic"));
ConsumerRecords<byte[], byte[]> pollAudit = auditConsumer.poll(Duration.ofSeconds(10));
Iterator<ConsumerRecord<byte[], byte[]>> itr = pollAudit.iterator();
int auditCount = 0;
List<byte[]> outputHashes = new ArrayList<>();
while (itr.hasNext()) {
ConsumerRecord<byte[], byte[]> next = itr.next();
ByteBuffer wrap = ByteBuffer.wrap(next.value());
wrap.position(wrap.limit() - 13);
byte[] hash = new byte[8];
wrap.get(hash);
outputHashes.add(hash);
auditCount++;
}
auditConsumer.close();
// validate that audit event count is correct
// there were 2 notification events sent, each notification event / Batch has 10
// Messages and each message has 1000 LogMessages
// Audit event is generated for every Message so there should 2x10 = 20 audit
// events
assertEquals(20, auditCount);
// Now we validate audit event hashes to ensure that messageId hashed for all
// 10000x2 events is correct and all events generated were received
for (int i = 0; i < outputHashes.size(); i++) {
assertArrayEquals(hashes.get(i % hashes.size()), outputHashes.get(i));
}
}
private MemqConsumer<byte[], byte[]> getConsumer(Properties props,
final byte[] memqBatchData) throws Exception {
StorageHandler input = new StorageHandler() {
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) {
return new ByteArrayInputStream(memqBatchData);
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
}
@Override
public String getReadUrl() {
return null;
}
};
return new MemqConsumer<byte[], byte[]>(props, input);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/test/java/com/pinterest/memq/client/consumer/TestMemqConsumer.java | memq-client/src/test/java/com/pinterest/memq/client/consumer/TestMemqConsumer.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.client.consumer;
import static com.pinterest.memq.client.commons.ConsumerConfigs.DRY_RUN_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.KEY_DESERIALIZER_CLASS_KEY;
import static com.pinterest.memq.client.commons.ConsumerConfigs.VALUE_DESERIALIZER_CLASS_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.MemoryRecordsBuilder;
import org.apache.kafka.common.record.Record;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.utils.ByteBufferOutputStream;
import org.junit.Test;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons.Compression;
import com.pinterest.memq.client.commons.MemqLogMessageIterator;
import com.pinterest.memq.client.commons.TestUtils;
import com.pinterest.memq.client.commons.serde.ByteArrayDeserializer;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.commons.Message;
public class TestMemqConsumer {
@Test(expected = RuntimeException.class)
public void testIteratorRemove() throws IOException {
Iterator<MemqLogMessage<String, String>> iterator = new MemqLogMessageIterator<>("test", "test",
new DataInputStream(new ByteArrayInputStream(new byte[0])), null, null, null, null, true,
null);
iterator.remove(); // assert exception
}
@Test
public void testIteratorHasNext() throws Exception {
int count = 5000;
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
Iterator<MemqLogMessage<byte[], byte[]>> iterator = TestUtils.getTestDataIterator("hello world",
getLogMessageBytes, count, 1, Compression.NONE, true);
for (int i = 0; i < 2 * count; i++) {
assertTrue(iterator.hasNext()); // assert idempotence before calling any next()
}
for (int i = 0; i < count; i++) {
assertTrue(iterator.hasNext()); // assert iterator.hasNext() returns true when i < count
iterator.next();
}
for (int i = 0; i < 2 * count; i++) {
assertFalse("Index:" + i, iterator.hasNext()); // assert hasNext() idempotence after iterator
// finishes
}
}
@Test
public void testIteratorNextContentEquals() throws Exception {
int count = 5000;
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
Iterator<MemqLogMessage<byte[], byte[]>> iterator = TestUtils.getTestDataIterator("hello world",
getLogMessageBytes, count, 1, Compression.NONE, false);
for (int i = 0; i < count; i++) {
assertEquals("hello world", new String(iterator.next().getValue()));
}
}
@Test(expected = RuntimeException.class)
public void testIteratorNextExceptionHandling() throws Exception {
int count = 5000;
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
Iterator<MemqLogMessage<byte[], byte[]>> iterator = TestUtils.getTestDataIterator("hello world",
getLogMessageBytes, count, 1, Compression.NONE, true);
for (int i = 0; i < count; i++) {
iterator.next();
}
iterator.next(); // should throw exception
}
@Test
public void testIteratorNextPerf() throws Exception {
int count = 5_00_000;
StringBuilder payload = new StringBuilder();
for (int i = 0; i < 10; i++) {
payload.append(UUID.randomUUID().toString());
}
System.out.println("Measuring perf");
int msgs = 1;
buildAndRunMemQRecordBench(count, payload, msgs);
buildAndRunKafkaRecordBench(count, payload, msgs);
}
@Test
public void testIteratorErrorHandling() throws Exception {
Properties mcProps = new Properties();
mcProps.put(KEY_DESERIALIZER_CLASS_KEY, ByteArrayDeserializer.class.getName());
mcProps.put(VALUE_DESERIALIZER_CLASS_KEY, ByteArrayDeserializer.class.getName());
mcProps.put(DRY_RUN_KEY, "true");
final AtomicInteger condition = new AtomicInteger();
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
byte[] memqBatchData = TestUtils.getMemqBatchData("hello world", getLogMessageBytes, 100, 1,
false, Compression.NONE, null, false);
MemqInput input = new MemqInput() {
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) throws IOException {
if (condition.getAndIncrement() == 0) {
throw new IOException("xyz");
}
try {
return new ByteArrayInputStream(memqBatchData);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
}
};
MemqConsumer<byte[], byte[]> mc = new MemqConsumer<>(mcProps, input);
mc.subscribe(Arrays.asList("test"));
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, memqBatchData.length);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
mc.getNotificationQueue().add(obj);
Iterator<MemqLogMessage<byte[], byte[]>> itr = mc.poll(Duration.ofSeconds(3));
if (itr.hasNext()) {
itr.next();
}
mc.close();
}
private void buildAndRunMemQRecordBench(int count,
StringBuilder payload,
int msgs) throws Exception, IOException {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
byte[] rawData = TestUtils.getMemqBatchData(payload.toString(), getLogMessageBytes, count, msgs,
false, Compression.ZSTD, null, false);
int c = 0;
long ns = System.nanoTime();
for (int i = 0; i < 2; i++) {
ByteArrayInputStream in = new ByteArrayInputStream(rawData);
DataInputStream stream = new DataInputStream(in);
JsonObject obj = new JsonObject();
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_TOPIC, "test");
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_OBJECT_SIZE, rawData.length);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET, 1);
obj.addProperty(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_READ_TIMESTAMP,
System.currentTimeMillis());
MemqLogMessageIterator<byte[], byte[]> iterator = new MemqLogMessageIterator<>("test", "test",
stream, obj, new ByteArrayDeserializer(), new ByteArrayDeserializer(),
new MetricRegistry(), false, null);
for (int p = 0; p < count * msgs; p++) {
iterator.next();
c++;
}
}
ns = System.nanoTime() - ns;
System.out.println("Memq:" + ns / 1000 / 1000 + "ms to execute:" + c);
}
private void buildAndRunKafkaRecordBench(int count, StringBuilder payload, int msgs) {
int c;
long ns;
ByteBufferOutputStream out = new ByteBufferOutputStream(1024 * 1024);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, RecordBatch.MAGIC_VALUE_V2,
CompressionType.ZSTD, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(),
System.currentTimeMillis(), (short) 0, 0, false, false,
(int) (System.currentTimeMillis() / 1000), 100);
byte[] bytes = payload.toString().getBytes();
for (int i = 0; i < count * msgs; i++) {
builder.append(System.currentTimeMillis(), null, bytes);
}
builder.close();
ns = System.nanoTime();
MemoryRecords records = builder.build();
c = 0;
for (int i = 0; i < 2; i++) {
for (Record record : records.records()) {
record.value();
c++;
}
}
ns = System.nanoTime() - ns;
System.out.println("Kafka:" + ns / 1000 / 1000 + "ms to execute:" + c);
}
@Test(expected = NoTopicsSubscribedException.class)
public void testNoTopicsSubscribedPoll() throws Exception {
Properties mcProps = new Properties();
mcProps.put(DRY_RUN_KEY, "true");
MemqInput input = new MemqInput() {
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
try {
return new ByteArrayInputStream(TestUtils.getMemqBatchData("hello world",
getLogMessageBytes, 100, 1, false, Compression.NONE, null, false));
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
}
};
MemqConsumer<byte[], byte[]> mc = new MemqConsumer<byte[], byte[]>(mcProps, input);
mc.poll(Duration.ofSeconds(3)); // should throw NoTopicsSubscribedException
mc.close();
}
@Test(expected = UnsupportedOperationException.class)
public void testMultiTopicSubscribeException() throws Exception {
Properties mcProps = new Properties();
mcProps.put(DRY_RUN_KEY, "true");
MemqInput input = new MemqInput() {
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
try {
return new ByteArrayInputStream(TestUtils.getMemqBatchData("hello world",
getLogMessageBytes, 100, 1, false, Compression.NONE, null, false));
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
}
};
MemqConsumer<byte[], byte[]> mc = new MemqConsumer<byte[], byte[]>(mcProps, input);
mc.subscribe(Collections.singleton("test_topic"));
mc.subscribe(Collections.singleton("not_this_topic")); // should throw
// UnsupportedOperationException
mc.close();
}
@Test
public void testSubscribeAndUnsubscribe() throws Exception {
MockConsumer<String, String> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
Properties mcProps = new Properties();
mcProps.put(KEY_DESERIALIZER_CLASS_KEY, ByteArrayDeserializer.class.getName());
mcProps.put(VALUE_DESERIALIZER_CLASS_KEY, ByteArrayDeserializer.class.getName());
mcProps.put(DRY_RUN_KEY, "true");
MemqInput input = new MemqInput() {
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) {
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> base.getBytes();
try {
return new ByteArrayInputStream(TestUtils.getMemqBatchData("hello world",
getLogMessageBytes, 100, 1, false, Compression.NONE, null, false));
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
return null;
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
return null;
}
};
MemqConsumer<byte[], byte[]> mc = new MemqConsumer<byte[], byte[]>(mcProps, input);
KafkaNotificationSource notificationSource = new KafkaNotificationSource(consumer);
notificationSource.setParentConsumer(mc);
mc.setNotificationSource(notificationSource);
mc.subscribe(Collections.singleton("test_topic"));
consumer.assign(Collections.singletonList(new TopicPartition("test_notification_topic", 0)));
Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(new TopicPartition("test_notification_topic", 0), 0L);
consumer.updateBeginningOffsets(beginningOffsets);
int numNotifications = 10;
for (int i = 0; i < numNotifications; i++) {
JsonObject notification = new JsonObject();
if (i % 2 == 0) {
notification.addProperty("topic", "test_topic");
} else {
notification.addProperty("topic", "not_this_topic");
}
consumer.addRecord(new ConsumerRecord<>("test_notification_topic", 0, i, "key",
new Gson().toJson(notification)));
}
assertEquals(0, mc.getNotificationQueue().size());
mc.poll(Duration.ofSeconds(3));
// numNotifications / 2 - 1
assertEquals(0, mc.getNotificationQueue().size());
for (int i = 0; i < numNotifications; i++) {
JsonObject notification = new JsonObject();
if (i % 2 == 0) {
notification.addProperty("topic", "test_topic");
} else {
notification.addProperty("topic", "not_this_topic");
}
consumer.addRecord(new ConsumerRecord<>("test_notification_topic", 0, i + numNotifications,
"key", new Gson().toJson(notification)));
}
// numNotifications / 2 - 1
assertEquals(0, mc.getNotificationQueue().size());
mc.poll(Duration.ofSeconds(3));
// numNotifications - 2
assertEquals(0, mc.getNotificationQueue().size());
// should have twice the amount of notifications as before
mc.unsubscribe();
mc.subscribe(Collections.singleton("test_topic")); // subscribe again
for (int i = 0; i < numNotifications; i++) {
JsonObject notification = new JsonObject();
if (i % 2 == 0) {
notification.addProperty("topic", "test_topic");
} else {
notification.addProperty("topic", "not_this_topic");
}
consumer.addRecord(new ConsumerRecord<>("test_notification_topic", 0,
i + (2 * numNotifications), "key", new Gson().toJson(notification)));
}
for (int i = 0; i < numNotifications; i++) {
JsonObject notification = new JsonObject();
if (i % 2 == 0) {
notification.addProperty("topic", "test_topic");
} else {
notification.addProperty("topic", "not_this_topic");
}
consumer.addRecord(new ConsumerRecord<>("test_notification_topic", 0,
i + (3 * numNotifications), "key", new Gson().toJson(notification)));
}
mc.poll(Duration.ofSeconds(3));
// ((numNotifications / 2) * 4 - 3
assertEquals(0, mc.getNotificationQueue().size());
// mc should have picked up where it left off from previous successful poll
mc.subscribe(Collections.singleton("test_topic")); // shouldn't do anything
for (int i = 0; i < numNotifications; i++) {
JsonObject notification = new JsonObject();
if (i % 2 == 0) {
notification.addProperty("topic", "test_topic");
} else {
notification.addProperty("topic", "not_this_topic");
}
consumer.addRecord(new ConsumerRecord<>("test_notification_topic", 0,
i + (4 * numNotifications), "key", new Gson().toJson(notification)));
}
// mc should have picked up where it left off from previous successful poll
mc.poll(Duration.ofSeconds(3));
// (numNotifications / 2) * 5 - 4
assertEquals(0, mc.getNotificationQueue().size());
mc.close();
}
@Test
public void testSkipToLastLogMessage() throws Exception {
int count = 10241;
String data = "xyza33245245234534bcs";
String ary = data;
for (int i = 0; i < 2; i++) {
ary += ary;
}
BiFunction<String, Integer, byte[]> getLogMessageBytes = (base, k) -> (base + k).getBytes();
String baseLogMessage = ary + "i:";
MemqLogMessageIterator<byte[], byte[]> iterator = (MemqLogMessageIterator<byte[], byte[]>) TestUtils
.getTestDataIterator(baseLogMessage, getLogMessageBytes, count, 5, Compression.GZIP, false);
for (int i = 0; i < 501; i++) {
assertTrue(iterator.hasNext()); // assert idempotence
}
iterator.skipToLastLogMessage();
int i = 0;
try {
while (iterator.hasNext()) {
iterator.next();
i++;
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed at:" + i + " " + e.getMessage());
}
assertEquals("Skip to last MUST yield only 1 message", 1, i);
}
public abstract class MemqInput implements StorageHandler {
@Override
public String getReadUrl() {
return null;
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Properties;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.client.commons2.DataNotFoundException;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.core.commons.Message;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
public interface StorageHandler {
String SIZE = "objectSize";
default void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
}
/**
* Reconfigure the storage handler with new configs
*
* @param outputHandlerConfig the new configs
* @return true if configuration is accepted, otherwise false
*/
default boolean reconfigure(Properties outputHandlerConfig) {
return true;
};
void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException;
String getReadUrl();
static ByteBuf getBatchHeadersAsByteArray(final List<Message> messages) {
// index bytes 12 bytes per
// index entry
int length = Integer.BYTES * 2 + // header length
messages.size() * Integer.BYTES * 3;
ByteBuf header = PooledByteBufAllocator.DEFAULT.directBuffer(length, length);
header.writeInt(header.capacity() - Integer.BYTES);// header length
writeMessageIndex(messages, header, length);
return header;
}
static void writeMessageIndex(final List<Message> messages, ByteBuf header, int capacity) {
// build message index
header.writeInt(messages.size());
int offset = capacity;
for (int i = 0; i < messages.size(); i++) {
Message message = messages.get(i);
int size = message.getBuf().readableBytes();
header.writeInt(i);//
header.writeInt(offset);
header.writeInt(size);
offset += size;
}
}
default void closeWriter() {
}
default void initReader(Properties properties, MetricRegistry registry) throws Exception {
}
default InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
throw new UnsupportedOperationException();
}
default BatchData fetchBatchStreamForNotificationBuf(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
throw new UnsupportedOperationException();
}
default BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
throw new UnsupportedOperationException();
}
default BatchData fetchHeaderForBatchBuf(JsonObject nextNotificationToProcess) throws IOException,
DataNotFoundException {
throw new UnsupportedOperationException();
}
default DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException,
DataNotFoundException {
throw new UnsupportedOperationException();
}
default int getBatchSizeFromNotification(JsonObject notification) {
return notification.get(SIZE).getAsInt();
}
default void closeReader() {
}
default BatchData fetchMessageAtIndexBuf(JsonObject objectNotification,
IndexEntry index) throws IOException, DataNotFoundException {
throw new UnsupportedOperationException();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/WriteFailedException.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/WriteFailedException.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
public class WriteFailedException extends Exception {
private static final long serialVersionUID = 1L;
public WriteFailedException(String msg) {
super(msg);
}
public WriteFailedException(Exception e) {
super(e);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/ReadBrokerStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/ReadBrokerStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import com.pinterest.memq.client.commons.CommonConfigs;
import com.pinterest.memq.client.commons.ConsumerConfigs;
import com.pinterest.memq.client.commons2.DataNotFoundException;
import com.pinterest.memq.client.commons2.Endpoint;
import com.pinterest.memq.client.commons2.MemqCommonClient;
import com.pinterest.memq.client.commons2.TopicNotFoundException;
import com.pinterest.memq.client.commons2.network.NetworkClient;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.protocol.ReadRequestPacket;
import com.pinterest.memq.commons.protocol.ReadResponsePacket;
import com.pinterest.memq.commons.protocol.RequestPacket;
import com.pinterest.memq.commons.protocol.RequestType;
import com.pinterest.memq.commons.protocol.ResponseCodes;
import com.pinterest.memq.commons.protocol.ResponsePacket;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
public abstract class ReadBrokerStorageHandler implements StorageHandler {
private Logger logger = Logger.getLogger(ReadBrokerStorageHandler.class.getName());
private long connectTimeout = 500;
private int maxReadAttempts = 3;
private boolean localRead;
private MemqCommonClient client;
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
StorageHandler.super.initReader(properties, registry);
localRead = Boolean.parseBoolean(properties.getProperty("read.local.enabled", "true"));
if (!isLocalRead()) {
String topic = properties.getProperty(ConsumerConfigs.TOPIC_INTERNAL_PROP);
String bootstrapServers = properties.getProperty(ConsumerConfigs.BOOTSTRAP_SERVERS);
properties.setProperty(NetworkClient.CONFIG_CONNECT_TIMEOUT_MS, "300000");
List<Endpoint> endpoints = MemqCommonClient
.getEndpointsFromBootstrapServerString(bootstrapServers);
// Attempt to use locality provided by the configuring reader, if locality is
// missing then the underlying network client will ignore it. Using locality can
// prevent X-AZ network costs when read brokers are used.
MemqCommonClient client = new MemqCommonClient(properties.getProperty(CommonConfigs.CLIENT_LOCALITY, ""), null,
properties);
setClient(client);
getClient().initialize(endpoints);
// use topic metadata to find the read brokers for this topic and then reconnect
getClient().reconnect(topic, true);
}
}
protected long getConnectTimeout() {
return connectTimeout;
}
protected void setConnectTimeout(long connectTimeout) {
this.connectTimeout = connectTimeout;
}
protected int getMaxReadAttempts() {
return maxReadAttempts;
}
protected void setMaxReadAttempts(int maxReadAttempts) {
this.maxReadAttempts = maxReadAttempts;
}
protected boolean isLocalRead() {
return localRead;
}
protected void setLocalRead(boolean localRead) {
this.localRead = localRead;
}
protected void setClient(MemqCommonClient client) {
this.client = client;
}
protected MemqCommonClient getClient() {
return client;
}
protected BatchData readBatchHeader(String topic, JsonObject notification) throws Exception {
return readBatch(topic, notification, true,
new BatchHeader.IndexEntry(ReadRequestPacket.DISABLE_READ_AT_INDEX,
ReadRequestPacket.DISABLE_READ_AT_INDEX),
connectTimeout, 0);
}
protected BatchData readBatch(String topic, JsonObject notification) throws Exception {
return readBatch(topic, notification, false,
new BatchHeader.IndexEntry(ReadRequestPacket.DISABLE_READ_AT_INDEX,
ReadRequestPacket.DISABLE_READ_AT_INDEX),
connectTimeout, 0);
}
protected BatchData readBatchAtIndex(String topic,
JsonObject notification,
BatchHeader.IndexEntry entry) throws Exception {
return readBatch(topic, notification, false, entry, connectTimeout, 0);
}
protected BatchData readBatch(String topic,
JsonObject notification,
boolean readHeaderOnly,
BatchHeader.IndexEntry entry,
long timeoutMillis,
int attempts) throws Exception {
Future<ResponsePacket> response = client.sendRequestPacketAndReturnResponseFuture(
new RequestPacket(RequestType.PROTOCOL_VERSION, ThreadLocalRandom.current().nextLong(),
RequestType.READ, new ReadRequestPacket(topic, notification, readHeaderOnly, entry)),
topic,
timeoutMillis);
ResponsePacket responsePacket = response.get(timeoutMillis, TimeUnit.MILLISECONDS);
if (responsePacket.getResponseCode() == ResponseCodes.OK) {
return ((ReadResponsePacket) responsePacket.getPacket()).getBatchData();
} else if (responsePacket.getResponseCode() == ResponseCodes.NO_DATA) {
throw new DataNotFoundException("Request failed");
} else if (responsePacket.getResponseCode() == ResponseCodes.NOT_FOUND) {
throw new TopicNotFoundException("Topic " + topic + " not found");
} else if (responsePacket.getResponseCode() == ResponseCodes.REDIRECT) {
if (attempts > maxReadAttempts) {
throw new Exception("Retries exhausted for reading: " + notification);
}
client.reconnect(topic, true);
return readBatch(topic, notification, readHeaderOnly, entry, timeoutMillis, attempts + 1);
} else {
throw new ExecutionException("Request failed, code:" + responsePacket.getResponseCode()
+ " error:" + responsePacket.getErrorMessage(), null);
}
}
@Override
public void closeReader() {
StorageHandler.super.closeReader();
if (client != null) {
try {
client.close();
} catch (IOException e) {
logger.log(Level.SEVERE, "Failed to close client", e);
}
}
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/NoOpStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/NoOpStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import com.pinterest.memq.core.commons.Message;
import com.codahale.metrics.MetricRegistry;
import java.util.List;
import java.util.Properties;
@StorageHandlerName(name = "noop")
public class NoOpStorageHandler implements StorageHandler {
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
}
@Override
public String getReadUrl() {
return "noop";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandlerName.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandlerName.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE })
public @interface StorageHandlerName {
String name();
String previousName() default "";
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/DelayedDevNullStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/DelayedDevNullStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import com.codahale.metrics.MetricRegistry;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.commons.MessageBufferInputStream;
@StorageHandlerName(name = "delayeddevnull")
public class DelayedDevNullStorageHandler implements StorageHandler {
private int maxDelay;
private ThreadLocalRandom rand;
private int minDelay;
private static AtomicLong counter = new AtomicLong();
private static AtomicLong byteCounter = new AtomicLong();
private static AtomicLong inputStreamCounter = new AtomicLong();
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
minDelay = Integer.parseInt(outputHandlerConfig.getProperty("delay.min.millis", "100"));
maxDelay = Integer.parseInt(outputHandlerConfig.getProperty("delay.max.millis", "2000"));
rand = ThreadLocalRandom.current();
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
try {
Thread.sleep(rand.nextInt(minDelay, maxDelay));
counter.accumulateAndGet(messages.size(), (v1, v2) -> v1 + v2);
byteCounter.accumulateAndGet(sizeInBytes, (v1, v2) -> v1 + v2);
MessageBufferInputStream is = new MessageBufferInputStream(messages, null);
while (is.read() != -1) {
inputStreamCounter.incrementAndGet();
}
is.close();
} catch (InterruptedException | IOException e) {
// ignore errors
}
}
public static long getCounter() {
return counter.get();
}
public static long getByteCounter() {
return byteCounter.get();
}
public static long getInputStreamCounter() {
return inputStreamCounter.get();
}
public static void reset() {
counter.set(0);
byteCounter.set(0);
inputStreamCounter.set(0);
}
@Override
public String getReadUrl() {
return "delayeddevnull";
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/DevNullStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/DevNullStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import com.pinterest.memq.commons.storage.s3.KafkaNotificationSink;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.MiscUtils;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.google.gson.JsonObject;
import io.netty.buffer.ByteBuf;
import java.io.File;
import java.nio.channels.FileChannel;
import java.nio.file.StandardOpenOption;
import java.util.List;
import java.util.Properties;
@StorageHandlerName(name = "devnull")
public class DevNullStorageHandler implements StorageHandler {
private KafkaNotificationSink notificationSink;
private boolean disableNotifications;
private Timer notificationPublishingTimer;
@Override
public void initWriter(Properties outputHandlerConfig, String topic, MetricRegistry registry)
throws Exception {
this.disableNotifications = Boolean
.parseBoolean(outputHandlerConfig.getProperty("disableNotifications", "true"));
if (!disableNotifications) {
this.notificationSink = new KafkaNotificationSink();
this.notificationSink.init(outputHandlerConfig);
this.notificationPublishingTimer = MiscUtils.oneMinuteWindowTimer(registry,"output.notification.publish.latency");
}
}
@Override
public void writeOutput(int sizeInBytes, int checksum, List<Message> messages)
throws WriteFailedException {
File fileToWrite = new File("/dev/null");
ByteBuf batchHeader = StorageHandler.getBatchHeadersAsByteArray(messages);
try (FileChannel fc = FileChannel.open(fileToWrite.toPath(), StandardOpenOption.WRITE)) {
batchHeader.readBytes(fc, batchHeader.readableBytes());
for (Message m : messages) {
ByteBuf buf = m.getBuf();
buf.readBytes(fc, buf.readableBytes());
}
if (!disableNotifications) {
JsonObject payload = new JsonObject();
payload.addProperty("type", "devnull");
Timer.Context publishTime = notificationPublishingTimer.time();
notificationSink.notify(payload, 0);
publishTime.stop();
}
} catch (Exception e) {
throw new WriteFailedException(e);
} finally {
batchHeader.release();
}
}
@Override
public String getReadUrl() {
return "devnull";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandlerTable.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/StorageHandlerTable.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
import org.reflections.Reflections;
/**
* Ideal inherited from:
* https://github.com/srotya/sidewinder/blob/development/core/src/main/java/com/srotya/sidewinder/core/functions/FunctionTable.java
*
*/
public abstract class StorageHandlerTable {
private static final Logger logger = Logger.getLogger(StorageHandlerTable.class.getName());
private static Map<String, Class<? extends StorageHandler>> handlerMap = new HashMap<>();
static {
findAndRegisterOutputHandlers(StorageHandlerTable.class.getPackage().getName());
}
public static void findAndRegisterOutputHandlers(String packageName) {
Reflections reflections = new Reflections(packageName.trim());
Set<Class<?>> annotatedClasses = reflections.getTypesAnnotatedWith(StorageHandlerName.class);
for (Class<?> annotatedClass : annotatedClasses) {
StorageHandlerName plugin = annotatedClass.getAnnotation(StorageHandlerName.class);
if (plugin == null) {
logger.severe("Plugin info null:" + plugin);
continue;
}
registerStorageHandlerClassWithAlias(annotatedClass, plugin.name());
registerStorageHandlerClassWithAlias(annotatedClass, plugin.previousName());
}
}
@SuppressWarnings("unchecked")
private static void registerStorageHandlerClassWithAlias(Class<?> annotatedClass, String alias) {
if (alias == null || alias.isEmpty()) {
logger.warning("Ignoring aggregation function:" + annotatedClass.getName());
return;
}
if (handlerMap.containsKey(alias)) {
logger.severe(
"Output plugin alias '" + alias + "' already exists, " + annotatedClass.getName());
System.exit(-1);
}
handlerMap.put(alias, (Class<? extends StorageHandler>) annotatedClass);
logger
.info("Registered output handler(" + annotatedClass.getName() + ") with alias:" + alias);
}
@SuppressWarnings("unchecked")
public static Class<StorageHandler> getClass(String name) {
return (Class<StorageHandler>) handlerMap.get(name);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/SysoutStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/SysoutStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage;
import java.util.List;
import java.util.Properties;
import com.codahale.metrics.MetricRegistry;
import com.pinterest.memq.core.commons.Message;
import io.netty.buffer.ByteBuf;
@StorageHandlerName(name = "sysout")
public class SysoutStorageHandler implements StorageHandler {
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
}
@Override
public void writeOutput(int sizeInBytes,
int checksum,
List<Message> messages) throws WriteFailedException {
for (Message message : messages) {
System.out.println(new String(readToByteArray(message.getBuf())));
}
}
public static byte[] readToByteArray(ByteBuf buf) {
byte[] ary = new byte[buf.readableBytes()];
for (int i = 0; i < buf.readableBytes(); i++) {
ary[i] = buf.readByte();
}
return ary;
}
@Override
public String getReadUrl() {
return "System.in";
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/SessionTokenManager.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/SessionTokenManager.java | /**
* Copyright 2024 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3express;
import java.io.ByteArrayInputStream;
import java.net.URI;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import io.netty.channel.ChannelOption;
import reactor.netty.http.client.HttpClient;
import reactor.netty.resources.ConnectionProvider;
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider;
import software.amazon.awssdk.auth.signer.AwsS3V4Signer;
import software.amazon.awssdk.auth.signer.params.AwsS3V4SignerParams;
import software.amazon.awssdk.http.SdkHttpFullRequest;
import software.amazon.awssdk.http.SdkHttpMethod;
import software.amazon.awssdk.regions.Region;
/**
* Manages S3 Express session credentials
* Each bucket has a deque of credentials.
* The deque makes sure new credentials are added periodically while the old one is still valid.
*/
public class SessionTokenManager {
private static final SessionTokenManager mgr = new SessionTokenManager();
private static final Logger logger = Logger.getLogger(SessionTokenManager.class.getName());
private static final String S3_EXPRESS = "s3express";
private static final String CREDENTIAL_PROVIDER_THREAD_NAME = "IamCredentialUpdater";
private static final int DEFAULT_MAX_CONNECTIONS_SECOND = 10;
private static final int DEFAULT_MAX_IDLE_TIME_SECOND = 20;
private static final int DEFAULT_MAX_LIFE_TIME_SECOND = 60;
private static final int DEFAULT_PENDING_ACQUIRE_TIMEOUT_SECOND = 60;
private static final int DEFAULT_EVICT_IN_BACKGROUND_SECOND = 120;
private static final int FETCH_CREDENTIALS_INTERVAL_MS = 100;
private static final int SOCKET_SEND_BUFFER_BYTES = 4 * 1024 * 1024;
private static final int MAX_CREDS_PER_BUCKET = 2;
private String credentialProviderType = "instance";
private Map<String, ConcurrentLinkedDeque<SessionCreds>> bucketCredentialMap = new ConcurrentHashMap<>();
private ScheduledExecutorService es = Executors.newScheduledThreadPool(1, new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread th = new Thread(r);
th.setDaemon(true);
return th;
}
});
private HttpClient secureClient;
public SessionTokenManager() {
this(new Properties());
}
public SessionTokenManager(Properties props) {
ConnectionProvider connectionProvider = getConnectionProvider(props);
secureClient = HttpClient.create(connectionProvider)
.option(ChannelOption.SO_SNDBUF, SOCKET_SEND_BUFFER_BYTES)
.option(ChannelOption.SO_LINGER, 0).secure();
}
public static SessionTokenManager getInstance() {
return mgr;
}
/**
* Get the connection provider based on the properties.
* @param props
* @return ConnectionProvider
*/
private static ConnectionProvider getConnectionProvider(Properties props) {
int maxConnections = Integer.parseInt(props.getProperty("maxConnections",
String.valueOf(DEFAULT_MAX_CONNECTIONS_SECOND)));
int maxIdleTime = Integer.parseInt(props.getProperty("maxIdleTime",
String.valueOf(DEFAULT_MAX_IDLE_TIME_SECOND)));
int maxLifeTime = Integer.parseInt(props.getProperty("maxLifeTime",
String.valueOf(DEFAULT_MAX_LIFE_TIME_SECOND)));
int pendingAcquireTimeout = Integer.parseInt(props.getProperty("pendingAcquireTimeout",
String.valueOf(DEFAULT_PENDING_ACQUIRE_TIMEOUT_SECOND)));
int evictInBackground = Integer.parseInt(props.getProperty("evictInBackground",
String.valueOf(DEFAULT_EVICT_IN_BACKGROUND_SECOND)));
return ConnectionProvider.builder(S3_EXPRESS)
.maxConnections(maxConnections)
.maxIdleTime(Duration.ofSeconds(maxIdleTime))
.maxLifeTime(Duration.ofSeconds(maxLifeTime))
.pendingAcquireTimeout(Duration.ofSeconds(pendingAcquireTimeout))
.evictInBackground(Duration.ofSeconds(evictInBackground))
.build();
}
/**
* Running credentials fetcher for the given bucket.
* Each bucket has a deque of credentials.
* The deque makes sure new credentials are added periodically while the old one is still valid.
* @param bucketName
* @return SessionCreds session credentials
* @throws InterruptedException
*/
public SessionCreds getCredentials(final String bucketName) throws InterruptedException {
ConcurrentLinkedDeque<SessionCreds> concurrentLinkedDeque = bucketCredentialMap.get(bucketName);
if (concurrentLinkedDeque == null) {
synchronized (bucketCredentialMap) {
concurrentLinkedDeque = bucketCredentialMap.get(bucketName);
if (concurrentLinkedDeque == null) {
concurrentLinkedDeque = new ConcurrentLinkedDeque<>();
// start the scheduled task for credential refresh
final ConcurrentLinkedDeque<SessionCreds> concurrentLinkedDequeRef = concurrentLinkedDeque;
es.scheduleAtFixedRate(() -> {
try {
SessionCreds fetchCredentials = fetchCredentials(bucketName);
concurrentLinkedDequeRef.add(fetchCredentials);
if (concurrentLinkedDequeRef.size() == MAX_CREDS_PER_BUCKET) {
// purge existing credentials
concurrentLinkedDequeRef.poll();
}
} catch (Exception e) {
e.printStackTrace();
}
}, 0, 4, TimeUnit.MINUTES);
bucketCredentialMap.put(bucketName, concurrentLinkedDeque);
}
}
}
while (concurrentLinkedDeque.isEmpty()) {
Thread.sleep(FETCH_CREDENTIALS_INTERVAL_MS);
}
return concurrentLinkedDeque.peek();
}
/**
* Set the credential provider type
* @param credentialProviderType
*/
public void setCredentialProviderType(String credentialProviderType) {
this.credentialProviderType = credentialProviderType;
}
/**
* Get the credential provider based on the type.
* Default credential provider is mainly used when testing.
* Instance credential provider is mainly used when the service is deployed to EC2 instances.
* @return AwsCredentialsProvider
* @throws IllegalArgumentException
*/
public AwsCredentialsProvider getAwsCredentialsProvider() throws IllegalArgumentException {
if (credentialProviderType.equals("default")) {
return DefaultCredentialsProvider
.builder().asyncCredentialUpdateEnabled(true).build();
} else if (credentialProviderType.equals("instance")) {
return InstanceProfileCredentialsProvider
.builder().asyncCredentialUpdateEnabled(true).asyncThreadName(CREDENTIAL_PROVIDER_THREAD_NAME)
.build();
} else {
throw new IllegalArgumentException("Unsupported credential provider type: " + credentialProviderType);
}
}
/**
* Fetch the session credentials for the given bucket
* @param bucketName
* @return SessionCreds
* @throws Exception
*/
protected SessionCreds fetchCredentials(String bucketName) throws Exception {
SdkHttpFullRequest createSessionRequest = generateCreateSessionRequest(bucketName);
AwsCredentialsProvider credentialsProvider = getAwsCredentialsProvider();
Region region = Region.of(S3ExpressHelper.getRegionFromBucket(bucketName));
SdkHttpFullRequest signedCreateSessionRequest = signRequest(
createSessionRequest, credentialsProvider, region);
Map<String, List<String>> signedCreateSessionRequestHeaders = signedCreateSessionRequest.headers();
String awsResponse = secureClient.headers(headers -> {
for (Entry<String, List<String>> entry : signedCreateSessionRequestHeaders.entrySet()) {
headers.set(entry.getKey(), entry.getValue().get(0));
}
}).get().uri(createSessionRequest.getUri()).responseSingle((response, bytes) -> bytes.asString()).block();
logger.fine("AWS Credential Response: " + awsResponse);
return generateSessionCreds(awsResponse);
}
/**
* Sign the request with instance credentials.
* @param req
* @param region
* @return SdkHttpFullRequest signed request
*/
public static SdkHttpFullRequest signRequest(SdkHttpFullRequest req, AwsCredentialsProvider credentialProvider, Region region) {
AwsS3V4Signer signer = AwsS3V4Signer.create();
return signer.sign(req,
AwsS3V4SignerParams.builder().awsCredentials(credentialProvider.resolveCredentials())
.signingName(S3_EXPRESS).signingRegion(region).build());
}
/**
* Generate the create session request
* @param bucketName
* @return SdkHttpFullRequest create session request
* @throws Exception
*/
private static SdkHttpFullRequest generateCreateSessionRequest(String bucketName) throws Exception {
return SdkHttpFullRequest.builder()
.appendHeader("x-amz-create-session-mode", "ReadWrite")
.appendRawQueryParameter("session", "").method(SdkHttpMethod.GET)
.uri(URI.create(S3ExpressHelper.generateBucketUrl(bucketName)))
.build();
}
/**
* Generate the session credentials from the AWS response string
* @param awsResponse
* @return SessionCreds session credentials
* @throws Exception
*/
private static SessionCreds generateSessionCreds(String awsResponse) throws Exception {
DocumentBuilderFactory builderFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = builderFactory.newDocumentBuilder();
Document xmlDocument = builder.parse(new ByteArrayInputStream(awsResponse.getBytes()));
XPath xPath = XPathFactory.newInstance().newXPath();
String expression = "/CreateSessionResult/Credentials/SessionToken";
String token = ((Node) (xPath.compile(expression).evaluate(xmlDocument, XPathConstants.NODE)))
.getTextContent();
expression = "/CreateSessionResult/Credentials/SecretAccessKey";
String secret = ((Node) (xPath.compile(expression).evaluate(xmlDocument, XPathConstants.NODE)))
.getTextContent();
expression = "/CreateSessionResult/Credentials/AccessKeyId";
String key = ((Node) (xPath.compile(expression).evaluate(xmlDocument, XPathConstants.NODE)))
.getTextContent();
return new SessionCreds(key, secret, token);
}
/**
* Main method to fetch the session credentials for the given bucket. Used for testing.
* @param args bucket name as the first argument
* @throws Exception
*/
public static void main(String[] args) throws Exception {
SessionTokenManager token = new SessionTokenManager();
String bucketName = args[0];
token.fetchCredentials(bucketName);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/S3ExpressAsyncStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/S3ExpressAsyncStorageHandler.java | /**
* Copyright 2024 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3express;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.time.Duration;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import javax.naming.ConfigurationException;
import com.pinterest.memq.commons.storage.s3express.keygenerator.DateHourKeyGenerator;
import com.pinterest.memq.commons.storage.s3express.keygenerator.S3ExpressObjectKeyGenerator;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscription;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.gson.JsonObject;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerName;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.commons.storage.s3.AbstractS3StorageHandler;
import com.pinterest.memq.commons.storage.s3.KafkaNotificationSink;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MemqUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelOption;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.util.ReferenceCounted;
import reactor.core.publisher.Mono;
import reactor.netty.http.client.HttpClient;
import reactor.netty.http.client.HttpClientResponse;
import reactor.netty.resources.ConnectionProvider;
import software.amazon.awssdk.auth.credentials.AwsCredentials;
import software.amazon.awssdk.auth.signer.AwsS3V4Signer;
import software.amazon.awssdk.auth.signer.params.AwsS3V4SignerParams;
import software.amazon.awssdk.http.SdkHttpFullRequest;
import software.amazon.awssdk.http.SdkHttpMethod;
import software.amazon.awssdk.regions.Region;
/**
* S3 Express storage handler for MemQ, manages both writes and reads
*/
@StorageHandlerName(name = "s3express")
public class S3ExpressAsyncStorageHandler extends AbstractS3StorageHandler {
private static final int HIGH_LATENCY_THRESHOLD = 5;
private static final int ERROR_CODE = 500;
private static final int SUCCESS_CODE = 200;
private static final String SLASH = "/";
private static final String CONTENT_LENGTH = "Content-Length";
private static final String CONTENT_MD5 = "Content-MD5";
private static final String E_TAG = "ETag";
private static final String BUCKET = "bucket";
private static final String REGION = "region";
private static final String DEFAULT_REGION = "us-east-1";
private static final String DEFAULT_RETRY_TIMEOUT_MILLIS = "5000";
private static final String DEFAULT_RETRY_COUNT = "2";
private static final String DEFAULT_RETRY_COUNT_500S = "3";
private static final int LAST_ATTEMPT_TIMEOUT = 60_000;
private Logger logger = Logger.getLogger(S3ExpressAsyncStorageHandler.class.getName());
private String path;
private String bucket;
private KafkaNotificationSink notificationSink;
private String topic;
@SuppressWarnings("unused")
private boolean dryrun;
private boolean disableNotifications;
private boolean enableMD5;
private volatile int maxAttempts;
private volatile int maxS3Attempts;
private volatile int retryTimeoutMillis;
private HttpClient secureClient;
private MetricRegistry registry;
private ExecutorService requestExecutor;
private ScheduledExecutorService executionTimer;
private Timer s3PutLatencyTimer;
private Timer s3PutInternalLatencyTimer;
private Timer notificationPublishingTimer;
private Counter s3RetryCounters;
private Counter s3RequestCounter;
private Counter notificationFailureCounter;
private Counter timeoutExceptionCounter;
private String baseConnStr = null;
private S3ExpressObjectKeyGenerator keyGenerator;
static {
// Set the DNS cache TTL to 1 second to avoid stale DNS entries
java.security.Security.setProperty("networkaddress.cache.ttl", "1");
}
public S3ExpressAsyncStorageHandler() {
}
protected void initializeWriterRegistry(MetricRegistry registry) {
this.registry = registry;
this.s3RequestCounter = registry.counter(
"output.s3express.requests");
this.s3RetryCounters = registry.counter(
"output.s3express.retries");
this.timeoutExceptionCounter = registry.counter(
"output.timeout.exceptions");
this.notificationFailureCounter = registry.counter(
"output.notification.fail");
this.notificationPublishingTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.notification.publish.latency");
this.s3PutLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.s3express.putobjectlatency");
this.s3PutInternalLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.s3express.internalPutobjectlatency");
}
protected void loadOutputHandlerConfigs(Properties outputHandlerConfig, String topic) throws Exception {
this.dryrun = Boolean.parseBoolean(outputHandlerConfig.getProperty("dryrun", "false"));
this.disableNotifications = Boolean
.parseBoolean(outputHandlerConfig.getProperty("disableNotifications", "false"));
if (!disableNotifications) {
this.notificationSink = new KafkaNotificationSink();
this.notificationSink.init(outputHandlerConfig);
}
this.region = Region.of(outputHandlerConfig.getProperty(REGION, DEFAULT_REGION).toLowerCase());
this.bucket = outputHandlerConfig.getProperty(BUCKET);
if (bucket == null) {
throw new ConfigurationException("Missing S3 bucket name");
}
this.enableMD5 = Boolean.parseBoolean(outputHandlerConfig.getProperty("enableMD5", "false"));
if (!enableMD5) {
logger.warning("MD5 hashes for uploads have been disabled");
}
this.retryTimeoutMillis = Integer
.parseInt(outputHandlerConfig.getProperty("retryTimeoutMillis", DEFAULT_RETRY_TIMEOUT_MILLIS));
this.maxAttempts = Integer.parseInt(outputHandlerConfig.getProperty("retryCount", DEFAULT_RETRY_COUNT)) + 1;
this.maxS3Attempts = Integer.parseInt(outputHandlerConfig.getProperty("retryCount500s", DEFAULT_RETRY_COUNT_500S)) + 1;
this.path = outputHandlerConfig.getProperty("path", topic);
this.keyGenerator = new DateHourKeyGenerator(path);
}
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
this.topic = topic;
initializeWriterRegistry(registry);
loadOutputHandlerConfigs(outputHandlerConfig, topic);
this.logger = Logger.getLogger(S3ExpressAsyncStorageHandler.class.getName() + "-" + topic);
this.requestExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.executionTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory());
ConnectionProvider provider = ConnectionProvider.builder("s3express")
.maxConnections(10)
.maxIdleTime(Duration.ofSeconds(20))
.maxLifeTime(Duration.ofSeconds(60))
.pendingAcquireTimeout(Duration.ofSeconds(60))
.evictInBackground(Duration.ofSeconds(120))
.build();
this.secureClient = HttpClient.create(provider).option(ChannelOption.SO_SNDBUF, 4 * 1024 * 1024)
.option(ChannelOption.SO_LINGER, 0).secure();
logger.fine("Session Credentials: " + SessionTokenManager.getInstance().fetchCredentials(bucket));
baseConnStr = S3ExpressHelper.generateBucketUrl(bucket);
}
@Override
public boolean reconfigure(Properties outputHandlerConfig) {
int newRetryTimeoutMillis = Integer.parseInt(
outputHandlerConfig.getProperty("retryTimeoutMillis", DEFAULT_RETRY_TIMEOUT_MILLIS));
if (newRetryTimeoutMillis != retryTimeoutMillis) {
retryTimeoutMillis = newRetryTimeoutMillis;
}
int newMaxAttempts = Integer.parseInt(
outputHandlerConfig.getProperty("retryCount", DEFAULT_RETRY_COUNT)) + 1;
if (newMaxAttempts != maxAttempts) {
maxAttempts = newMaxAttempts;
}
return true;
}
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
super.initReader(properties, registry);
this.bucket = properties.getProperty(BUCKET);
if (bucket == null) {
throw new ConfigurationException("Missing S3 bucket name");
}
baseConnStr = S3ExpressHelper.generateBucketUrl(bucket);
}
@Override
public void writeOutput(int objectSize,
int checksum,
final List<Message> messages) throws WriteFailedException {
Context timer = s3PutLatencyTimer.time();
ByteBuf batchHeader = StorageHandler.getBatchHeadersAsByteArray(messages);
final List<ByteBuf> messageBuffers = messageToBufferList(messages);
try {
final int currentMaxAttempts = maxAttempts;
final int currentRetryTimeoutMs = retryTimeoutMillis;
int contentLength = batchHeader.writerIndex() + objectSize;
String contentMD5 = null;
UploadResult result = null;
boolean hasSucceeded = false;
int attempt = 0;
Message firstMessage = messages.get(0);
// map used for cancellation
Map<String, Future<UploadResult>> futureMap = new HashMap<>();
Map<String, CompletableFuture<UploadResult>> taskMap = new HashMap<>();
final Publisher<ByteBuf> bodyPublisher = getBodyPublisher(messageBuffers, batchHeader);
while (attempt < currentMaxAttempts) {
final int timeout = attempt == currentMaxAttempts - 1 ? LAST_ATTEMPT_TIMEOUT
: currentRetryTimeoutMs;
final int k = attempt;
final String key = getKeyGenerator().generateObjectKey(
firstMessage.getClientRequestId(),
firstMessage.getServerRequestId(),
k
);
CompletableFuture<UploadResult> task = new CompletableFuture<>();
Callable<UploadResult> uploadAttempt = () -> {
try {
UploadResult ur = attemptUpload(bodyPublisher, objectSize, checksum, contentLength,
contentMD5, key, k, 0);
task.complete(ur);
return ur;
} catch (Exception e) {
task.completeExceptionally(e);
throw e;
}
};
Future<UploadResult> future = requestExecutor.submit(uploadAttempt);
futureMap.put(key, future);
taskMap.put(key, task);
CompletableFuture<UploadResult> resultFuture = anyUploadResultOrTimeout(taskMap.values(),
Duration.ofMillis(timeout));
try {
result = resultFuture.get();
// start tracking response codes from s3
registry.counter("output.s3express.responseCode." + result.getResponseCode()).inc();
if (result.getResponseCode() == SUCCESS_CODE) {
hasSucceeded = true;
break;
} else {
// remove the task so that it doesn't short circuit the next iteration
taskMap.remove(result.getKey());
logger.severe("Request failed reason:" + result + " attempt:" + result.getAttempt());
if (result.getResponseCode() >= 500 && result.getResponseCode() < 600) {
// retry 500s without increasing attempts
s3RetryCounters.inc();
if (s3RetryCounters.getCount() >= maxS3Attempts) {
logger.severe(String.format("Retried %d times for key %s, still getting 5XX, giving up",
maxS3Attempts, key));
break;
}
continue;
}
}
} catch (ExecutionException ee) {
if (ee.getCause() instanceof TimeoutException) {
timeoutExceptionCounter.inc();
} else {
logger.log(Level.SEVERE, "Request failed", ee);
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Request failed", e);
}
attempt++;
s3RetryCounters.inc();
}
// best effort cancel all outstanding uploads, no matter what the result is
for (Map.Entry<String, Future<UploadResult>> entry : futureMap.entrySet()) {
if (result != null && entry.getKey().equals(result.getKey())) {
continue;
}
entry.getValue().cancel(true);
}
if (result == null) {
throw new WriteFailedException("All upload attempts failed");
} else if (!hasSucceeded) {
throw new WriteFailedException(
"Upload failed due to error out: s3express://" + bucket + "/" + result.getKey());
}
if (!disableNotifications) {
Context publishTime = notificationPublishingTimer.time();
JsonObject payload = buildPayload(topic, bucket, objectSize, messages.size(),
batchHeader.capacity(), result.getKey(), result.getAttempt());
if (contentMD5 != null) {
payload.addProperty(CONTENT_MD5, contentMD5);
}
try {
notificationSink.notify(payload, 0);
} catch (Exception e) {
notificationFailureCounter.inc();
throw e;
} finally {
publishTime.stop();
}
}
long latencySec = TimeUnit.NANOSECONDS.toSeconds(timer.stop());
if (latencySec > HIGH_LATENCY_THRESHOLD) {
final String s3path = "s3express://" + bucket + SLASH + result.getKey();
logger.info("Uploaded " + s3path + " latency(" + latencySec + ")s, successful on attempt "
+ result.getAttempt() + ", total tasks: " + futureMap.size());
}
} catch (Exception e) {
timer.stop();
throw new WriteFailedException(e);
} finally {
messageBuffers.forEach(ReferenceCounted::release);
batchHeader.release();
}
}
private UploadResult attemptUpload(final Publisher<ByteBuf> bodyPublisher,
int sizeInBytes,
int checksum,
int contentLength,
String contentMD5,
final String key,
final int count,
int timeout) throws URISyntaxException, InterruptedException {
Context internalLatency = s3PutInternalLatencyTimer.time();
SessionTokenManager instance = SessionTokenManager.getInstance();
SessionCreds credentials = instance.getCredentials(bucket);
AwsS3V4Signer signer = AwsS3V4Signer.create();
SdkHttpFullRequest req = SdkHttpFullRequest.builder().method(SdkHttpMethod.PUT)
.appendHeader("x-amz-s3session-token", credentials.token)
.appendHeader(CONTENT_LENGTH, String.valueOf(contentLength))
.uri(URI.create(baseConnStr + key))
.build();
final SdkHttpFullRequest req1 = signer.sign(req,
AwsS3V4SignerParams.builder().awsCredentials(new AwsCredentials() {
@Override
public String secretAccessKey() {
return credentials.secret;
}
@Override
public String accessKeyId() {
return credentials.key;
}
}).signingName("s3express").signingRegion(region).build());
s3RequestCounter.inc();
Mono<HttpClientResponse> responseFuture = secureClient.headers(headers -> {
for (Entry<String, List<String>> entry : req1.headers().entrySet()) {
headers.set(entry.getKey(), entry.getValue());
}
}).put().uri(req.getUri()).send(bodyPublisher).response();
HttpClientResponse response = responseFuture.block();
HttpResponseStatus status = response.status();
int responseCode = status.code();
HttpHeaders responseHeaders = response.responseHeaders();
if (responseCode != SUCCESS_CODE) {
logger.severe(responseCode + " reason:" + status.reasonPhrase() + "\t" + responseHeaders
+ " index:" + count + " url:" + req.getUri());
}
if (contentMD5 != null && responseCode == SUCCESS_CODE) {
try {
String eTagHex = responseHeaders.get(E_TAG);
String etagToBase64 = MemqUtils.etagToBase64(eTagHex.replace("\"", ""));
if (!contentMD5.equals(etagToBase64)) {
logger.severe("Request failed due to etag mismatch url:" + req.getUri());
responseCode = ERROR_CODE;
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to parse the returned etag", e);
}
}
return new UploadResult(key, responseCode, responseHeaders, internalLatency.stop(), count);
}
public static class UploadResult {
private final String key;
private final int responseCode;
private final HttpHeaders httpResponseHeaders;
private final long time;
private final int attempt;
public UploadResult(String key,
int responseCode,
HttpHeaders responseHeaders,
long time,
int attempt) {
this.key = key;
this.responseCode = responseCode;
this.httpResponseHeaders = responseHeaders;
this.time = time;
this.attempt = attempt;
}
public int getResponseCode() {
return responseCode;
}
public HttpHeaders getHttpResponseHeaders() {
return httpResponseHeaders;
}
public String getKey() {
return key;
}
public long getTime() {
return time;
}
public int getAttempt() {
return attempt;
}
@Override
public String toString() {
return "UploadResult [key=" + key + ", responseCode=" + responseCode
+ ", httpResponseHeaders=" + httpResponseHeaders + "]";
}
}
public static List<ByteBuf> messageToBufferList(List<Message> messages) {
return messages.stream().map(m -> m.getBuf().retainedDuplicate()).collect(Collectors.toList());
}
public static CompositeByteBuf messageAndHeaderToCompositeBuffer(final List<ByteBuf> messageByteBufs,
ByteBuf batchHeaders) {
CompositeByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.compositeBuffer();
byteBuf.addComponent(true, batchHeaders.retainedDuplicate());
byteBuf.addComponents(true,
messageByteBufs.stream().map(ByteBuf::retainedDuplicate).collect(Collectors.toList()));
return byteBuf;
}
public static Publisher<ByteBuf> getBodyPublisher(
final List<ByteBuf> messageByteBufs, ByteBuf batchHeaders) {
return s -> s.onSubscribe(new Subscription() {
@Override
public void request(long n) {
CompositeByteBuf byteBuf = messageAndHeaderToCompositeBuffer(messageByteBufs, batchHeaders);
s.onNext(byteBuf);
s.onComplete();
}
@Override
public void cancel() {
}
});
}
public CompletableFuture<UploadResult> anyUploadResultOrTimeout(Collection<CompletableFuture<UploadResult>> tasks,
Duration duration) {
final CompletableFuture<UploadResult> promise = new CompletableFuture<>();
executionTimer.schedule(() -> {
final TimeoutException ex = new TimeoutException(
"Timeout after " + duration.toMillis() + " milliseconds");
return promise.completeExceptionally(ex);
}, duration.toMillis(), TimeUnit.MILLISECONDS);
CompletableFuture<UploadResult> anyUploadResultFuture = CompletableFuture
.anyOf(tasks.toArray(new CompletableFuture[0])).thenApply(o -> (UploadResult) o);
return anyUploadResultFuture.applyToEither(promise, Function.identity());
}
public void closeWriter() {
notificationSink.close();
}
protected KafkaNotificationSink getNotificationSink() {
return notificationSink;
}
@Override
public String getReadUrl() {
return notificationSink.getReadUrl();
}
@Override
public Logger getLogger() {
return logger;
}
private SdkHttpFullRequest generateGetObjectRequest(SessionCreds creds, String objectKey) {
return SdkHttpFullRequest.builder().method(SdkHttpMethod.GET)
.appendHeader("x-amz-s3session-token", creds.token)
.uri(URI.create(baseConnStr + objectKey))
.build();
}
private SdkHttpFullRequest signRequest(SessionCreds creds, SdkHttpFullRequest request) {
AwsS3V4Signer signer = AwsS3V4Signer.create();
return signer.sign(request,
AwsS3V4SignerParams.builder().awsCredentials(new AwsCredentials() {
@Override
public String secretAccessKey() {
return creds.secret;
}
@Override
public String accessKeyId() {
return creds.key;
}
}).signingName("s3express").signingRegion(region).build());
}
private SessionCreds getCredentials(String bucket) throws IOException {
SessionTokenManager instance = SessionTokenManager.getInstance();
SessionCreds credentials;
try {
credentials = instance.getCredentials(bucket);
} catch (InterruptedException e) {
throw new IOException(e);
}
return credentials;
}
public void setKeyGenerator(S3ExpressObjectKeyGenerator keyGenerator) {
this.keyGenerator = keyGenerator;
}
public S3ExpressObjectKeyGenerator getKeyGenerator() {
return keyGenerator;
}
private SdkHttpFullRequest generateFetchRequest(JsonObject nextNotificationToProcess) throws IOException {
String currentBucket = nextNotificationToProcess.get(BUCKET).getAsString();
String currentKey = nextNotificationToProcess.get(KEY).getAsString();
int currentObjectSize = nextNotificationToProcess.get(SIZE).getAsInt();
logger.fine("Updating bucket and key: " + currentBucket + "/" + currentKey + " {"
+ nextNotificationToProcess.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID)
.getAsNumber()
+ ", " + nextNotificationToProcess
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET).getAsNumber()
+ "}");
logger.finest("Object size: " + currentObjectSize);
SessionCreds credentials = getCredentials(currentBucket);
SdkHttpFullRequest request = generateGetObjectRequest(credentials, currentKey);
SdkHttpFullRequest signedRequest = signRequest(credentials, request);
return signedRequest;
}
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) throws IOException {
final SdkHttpFullRequest request = generateFetchRequest(nextNotificationToProcess);
long fetchStartTime = System.currentTimeMillis();
try {
return httpClient.tryObjectGet(request);
} finally {
long fetchTime = System.currentTimeMillis() - fetchStartTime;
getLogger().fine("Fetch Time:" + fetchTime);
}
}
@Override
public BatchData fetchBatchStreamForNotificationBuf(JsonObject nextNotificationToProcess) throws IOException {
int currentObjectSize = nextNotificationToProcess.get(SIZE).getAsInt();
final SdkHttpFullRequest request = generateFetchRequest(nextNotificationToProcess);
return new BatchData(currentObjectSize, httpClient.tryObjectGetAsBuffer(request));
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/S3ExpressHelper.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/S3ExpressHelper.java | package com.pinterest.memq.commons.storage.s3express;
import java.util.HashMap;
import java.util.Map;
public class S3ExpressHelper {
public static class S3ExpressParsingException extends Exception {
public S3ExpressParsingException(String message) {
super(message);
}
}
/**
* Map from region code to AWS region name
* The region code is the second part of the bucket name, e.g. "use1" in "s3express--use1--us-east-1--x-s3"
* The region name is the AWS region name, e.g. "us-east-1"
*/
public static final Map<String, String> awsRegionMap = new HashMap<String, String>() {{
put("use1", "us-east-1");
}};
/**
* Validate the bucket name is a valid s3express bucket name
* https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html#bucketnamingrules-directorybucket
* @param bucketName the bucket name to validate
* @throws S3ExpressParsingException
*/
public static void validateS3ExpressBucketName(String bucketName) throws S3ExpressParsingException {
if (!bucketName.matches(".*--.*-.*--x-s3")) {
throw new S3ExpressParsingException("Invalid s3express bucket name: " + bucketName);
}
}
/**
* Generate the bucket URL from the bucket name
* @param bucketName
* @return the bucket URL
* @throws S3ExpressParsingException
*/
public static String generateBucketUrl(String bucketName) throws S3ExpressParsingException{
validateS3ExpressBucketName(bucketName);
String region = getRegionFromBucket(bucketName);
String azName = bucketName.split("--")[1];
return String.format("https://%s.s3express-%s.%s.amazonaws.com/", bucketName, azName, region);
}
/**
* Get the region name from the bucket name
* @param bucketName
* @return the region name
* @throws S3ExpressParsingException
*/
public static String getRegionFromBucket(String bucketName) throws S3ExpressParsingException {
validateS3ExpressBucketName(bucketName);
String regionCode = bucketName.split("--")[1].split("-")[0];
if (!awsRegionMap.containsKey(regionCode)) {
throw new S3ExpressParsingException(
String.format("Unknown region code %s from bucket name %s", regionCode, bucketName));
}
return awsRegionMap.get(regionCode);
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/SessionCreds.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/SessionCreds.java | /**
* Copyright 2024 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3express;
/**
* Hold AWS Session Credentials
*/
public class SessionCreds {
public String key;
public String secret;
public String token;
public SessionCreds() {
}
public SessionCreds(String key, String secret, String token) {
this.key = key;
this.secret = secret;
this.token = token;
}
public void setKey(String key) {
this.key = key;
}
public void setSecret(String secret) {
this.secret = secret;
}
public void setToken(String token) {
this.token = token;
}
public String getKey() {
return key;
}
public String getSecret() {
return secret;
}
public String getToken() {
return token;
}
@Override
public String toString() {
return "SessionCreds [key=" + key + ", secret=" + secret + ", token=" + token + "]";
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/keygenerator/S3ExpressObjectKeyGenerator.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/keygenerator/S3ExpressObjectKeyGenerator.java | package com.pinterest.memq.commons.storage.s3express.keygenerator;
public abstract class S3ExpressObjectKeyGenerator {
protected static final String SLASH = "/";
protected static final String SEPARATOR = "_";
protected String path;
public S3ExpressObjectKeyGenerator(String path) {
this.path = path;
}
/**
* Generate the S3Express object key
* @param firstMessageClientRequestId
* @param firstMessageServerRequestId
* @param attempt
* @return
*/
public abstract String generateObjectKey(long firstMessageClientRequestId,
long firstMessageServerRequestId,
int attempt);
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/keygenerator/DateHourKeyGenerator.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3express/keygenerator/DateHourKeyGenerator.java | package com.pinterest.memq.commons.storage.s3express.keygenerator;
import java.text.SimpleDateFormat;
/**
* Generate S3 object key with date and hour as prefix
* The key format is: yyMMdd-HH/{path}/{firstMessageClientRequestId}_{firstMessageServerRequestId}_{attempt}
* For example: 240101-01/test_topic/123_456_1
*
* The date-hour prefix can help the cleaning job to clean up the old data.
* Until 2024/11/05, AWS S3Express does not support object lifecycle policy.
* We need to clean up the old data manually or via scripts.
* With this setup, we can easily clean up the old data by deleting the hourly prefix.
*/
public class DateHourKeyGenerator extends S3ExpressObjectKeyGenerator {
private static final String DATE_HOUR_PATTERN = "yyMMdd-HH";
public DateHourKeyGenerator(String path) {
super(path);
}
@Override
public String generateObjectKey(long firstMessageClientRequestId,
long firstMessageServerRequestId,
int attempt) {
StringBuilder keyBuilder = new StringBuilder();
keyBuilder.append(getCurrentDateHr());
keyBuilder.append(SLASH);
keyBuilder.append(path);
keyBuilder.append(SLASH);
keyBuilder.append(firstMessageClientRequestId);
keyBuilder.append(SEPARATOR);
keyBuilder.append(firstMessageServerRequestId);
keyBuilder.append(SEPARATOR);
keyBuilder.append(attempt);
return keyBuilder.toString();
}
protected static String getCurrentDateHr() {
return new SimpleDateFormat(DATE_HOUR_PATTERN).format(new java.util.Date());
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/MemqS3DNSResolver.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/MemqS3DNSResolver.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.logging.Logger;
import org.apache.http.conn.DnsResolver;
public class MemqS3DNSResolver implements DnsResolver {
private static final Logger logger = Logger.getLogger(MemqS3DNSResolver.class.getCanonicalName());
@Override
public InetAddress[] resolve(String host) throws UnknownHostException {
InetAddress[] address = InetAddress.getAllByName(host);
logger.fine(() -> "Host:" + host + " address:" + Arrays.toString(address));
return address;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/S3Exception.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/S3Exception.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.io.IOException;
public abstract class S3Exception extends IOException {
private static final long serialVersionUID = 1L;
private int errorCode;
public S3Exception() {
}
public S3Exception(int errorCode) {
super("S3 Exception (code: " + errorCode + ")");
this.errorCode = errorCode;
}
public S3Exception(String message) {
super(message);
}
public S3Exception(int errorCode, String message) {
super("S3 Exception (code: " + errorCode + ", message: " + message + ")");
this.errorCode = errorCode;
}
public int getErrorCode() {
return errorCode;
}
public void setErrorCode(int errorCode) {
this.errorCode = errorCode;
}
public static class RetriableException extends S3Exception {
public RetriableException(int errorCode) {
super(errorCode);
}
private static final long serialVersionUID = 1L;
}
public static class NotFoundException extends S3Exception {
public NotFoundException() {
super(404);
}
private static final long serialVersionUID = 1L;
}
public static class ForbiddenException extends S3Exception {
public ForbiddenException() {
super(403);
}
private static final long serialVersionUID = 1L;
}
public static class InternalServerErrorException extends RetriableException {
public InternalServerErrorException() {
super(500);
}
private static final long serialVersionUID = 1L;
}
public static class ServiceUnavailableException extends RetriableException {
public ServiceUnavailableException() {
super(503);
}
private static final long serialVersionUID = 1L;
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/KafkaNotificationSink.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/KafkaNotificationSink.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.List;
import java.util.Properties;
import java.util.logging.Logger;
import javax.naming.ConfigurationException;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
public class KafkaNotificationSink {
private static final Logger logger = Logger.getLogger(KafkaNotificationSink.class.getCanonicalName());
public static final String NOTIFICATION_SERVERSET = "notificationServerset";
public static final String NOTIFICATION_TOPIC = "notificationTopic";
private KafkaProducer<String, String> producer;
private Gson gson = new Gson();
private String notificationTopic;
private Properties props;
private String bootstrapServers;
public synchronized KafkaNotificationSink init(Properties props) throws Exception {
this.props = props;
bootstrapServers = getBootstrapServers(props, 10);
Properties producerProps = new Properties();
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
producerProps.put(ProducerConfig.ACKS_CONFIG, "-1");
producerProps.put(ProducerConfig.RETRIES_CONFIG, "3");
ProducerConfig.configNames().forEach((s) -> {
if (props.containsKey(s)) {
producerProps.put(s, props.getProperty(s));
}
});
if (!props.containsKey(NOTIFICATION_TOPIC)) {
throw new ConfigurationException("Missing notification topic name");
}
notificationTopic = props.get(NOTIFICATION_TOPIC).toString();
props.get(NOTIFICATION_TOPIC).toString();
if (producer == null) {
producer = new KafkaProducer<>(producerProps);
logger.info("Initialized notification sink:" + notificationTopic + " on:" + bootstrapServers);
}
return this;
}
public void reinitializeSink() throws Exception {
logger.warning("Notification sink reset triggered");
producer.close();
producer = null;
init(props);
}
protected String getBootstrapServers(Properties props, int limit) throws ConfigurationException,
IOException {
if (!props.containsKey(NOTIFICATION_SERVERSET)) {
throw new ConfigurationException("Missing serverset configuration for notification sink");
}
String notificationServerset = props.get(NOTIFICATION_SERVERSET).toString();
List<String> lines = Files.readAllLines(new File(notificationServerset).toPath());
lines = lines.subList(0, lines.size() > limit ? limit : lines.size());
return String.join(",", lines);
}
public synchronized void notify(JsonObject payload,
int retryCount
) throws Exception {
try {
producer
.send(new ProducerRecord<String, String>(notificationTopic, null, gson.toJson(payload)))
.get();
} catch (Exception e) {
reinitializeSink();
if (retryCount < 2) {
notify(payload, retryCount++);
} else {
throw e;
}
}
}
public String getReadUrl() {
return bootstrapServers;
}
public void close() {
producer.close();
}
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/CustomS3AsyncStorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/CustomS3AsyncStorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.io.IOException;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.ProtocolException;
import java.net.URL;
import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.naming.ConfigurationException;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.compress.utils.IOUtils;
import com.codahale.metrics.Counter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.codahale.metrics.Timer.Context;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.StorageHandlerName;
import com.pinterest.memq.commons.storage.WriteFailedException;
import com.pinterest.memq.core.commons.Message;
import com.pinterest.memq.core.commons.MessageBufferInputStream;
import com.pinterest.memq.core.utils.DaemonThreadFactory;
import com.pinterest.memq.core.utils.MemqUtils;
import com.pinterest.memq.core.utils.MiscUtils;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufInputStream;
import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRequest.Builder;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.PresignedPutObjectRequest;
import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest;
@StorageHandlerName(name = "customs3aync")
public class CustomS3AsyncStorageHandler extends AbstractS3StorageHandler {
private static final int HIGH_LATENCY_THRESHOLD = 5;
private static final int NANOSECONDS_TO_SECONDS = 1000_000_000;
private static final int ERROR_CODE = 500;
private static final int SUCCESS_CODE = 200;
private static final int FIRST_INDEX = 0;
private static final String SLASH = "/";
private static final String CONTENT_LENGTH = "Content-Length";
private static final String APPLICATION_OCTET_STREAM = "application/octet-stream";
private static final String CONTENT_MD5 = "Content-MD5";
private static final String CONTENT_TYPE = "Content-Type";
private static final String E_TAG = "ETag";
private static final String S3_REQUEST_ID = "x-amz-request-id";
private static final String S3_EXTENDED_REQUEST_ID = "x-amz-id-2";
private static final String SEPARATOR = "_";
private static final int LAST_ATTEMPT_TIMEOUT = 60_000;
static {
java.security.Security.setProperty("networkaddress.cache.ttl", "1");
}
private static final String HOSTNAME = MiscUtils.getHostname();
private static final Gson GSON = new Gson();
private Logger logger = Logger.getLogger(CustomS3AsyncStorageHandler.class.getName());
private String path;
private String bucket;
private Counter streamResetCounter;
private KafkaNotificationSink notificationSink;
private String topic;
@SuppressWarnings("unused")
private boolean dryrun;
private boolean disableNotifications;
private Timer s3PutLatencyTimer;
private boolean enableHashing;
private ExecutorService requestExecutor;
private int maxAttempts;
private int retryTimeoutMillis;
private Counter s3RetryCounters;
private Timer s3PutInternalLatencyTimer;
private Timer streamCopyTimer;
private S3Presigner signer;
private MetricRegistry registry;
private Counter notificationFailureCounter;
private boolean enableMD5;
private Counter s3RequestCounter;
private ExecutorService asyncSlowUploadHandlingExecutor;
public CustomS3AsyncStorageHandler() {
}
@Override
public void initWriter(Properties outputHandlerConfig,
String topic,
MetricRegistry registry) throws Exception {
this.logger = Logger.getLogger(CustomS3AsyncStorageHandler.class.getName() + "-" + topic);
this.topic = topic;
this.registry = registry;
this.dryrun = Boolean.parseBoolean(outputHandlerConfig.getProperty("dryrun", "false"));
this.disableNotifications = Boolean
.parseBoolean(outputHandlerConfig.getProperty("disableNotifications", "true"));
if (!disableNotifications) {
this.notificationSink = new KafkaNotificationSink();
this.notificationSink.init(outputHandlerConfig);
}
this.s3RequestCounter = registry.counter("output.s3.requests");
this.streamResetCounter = registry.counter("output.s3.streamReset");
this.notificationFailureCounter = registry.counter("output.notification.fail");
this.s3PutLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry, "output.s3.putobjectlatency");
this.s3PutInternalLatencyTimer = MiscUtils.oneMinuteWindowTimer(registry,
"output.s3.internalPutobjectlatency");
this.streamCopyTimer = MiscUtils.oneMinuteWindowTimer(registry, "output.s3.streamCopyTime");
this.bucket = outputHandlerConfig.getProperty("bucket");
if (bucket == null) {
throw new ConfigurationException("Missing S3 bucket name");
}
this.enableMD5 = Boolean.parseBoolean(outputHandlerConfig.getProperty("enableMD5", "true"));
if (!enableMD5) {
logger.warning("MD5 hashes for uploads have been disabled");
}
this.enableHashing = Boolean
.parseBoolean(outputHandlerConfig.getProperty("enableHashing", "true"));
if (!enableHashing) {
logger.warning("Hashing has been disabled for object uploads");
}
this.path = outputHandlerConfig.getProperty("path", topic);
this.requestExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.asyncSlowUploadHandlingExecutor = Executors.newCachedThreadPool(new DaemonThreadFactory());
this.s3RetryCounters = registry.counter("output.s3.retries");
this.retryTimeoutMillis = Integer
.parseInt(outputHandlerConfig.getProperty("retryTimeoutMillis", "5000"));
this.maxAttempts = Integer.parseInt(outputHandlerConfig.getProperty("retryCount", "2")) + 1;
signer = S3Presigner.builder()
.credentialsProvider(InstanceProfileCredentialsProvider.builder()
.asyncCredentialUpdateEnabled(true).asyncThreadName("IamCredentialUpdater").build())
.build();
}
@Override
public void writeOutput(int objectSize,
int checksum,
final List<Message> messages) throws WriteFailedException {
Context timer = s3PutLatencyTimer.time();
ByteBuf ref = null;
try {
String key = null;
ByteBuf header = StorageHandler.getBatchHeadersAsByteArray(messages);
ref = header;
String contentMD5 = null;
UploadResult result = null;
boolean hasSucceeded = false;
int i = 0;
// for s3 tracking
List<Future<UploadResult>> taskFutures = new ArrayList<>();
while (!hasSucceeded && i < maxAttempts) {
final int k = i;
key = createKey(messages, i).toString();
String tmpKey = key;
Future<UploadResult> taskFuture = requestExecutor.submit(() -> {
return attemptUpload(header.duplicate(), objectSize, checksum, contentMD5, messages,
tmpKey, k);
});
taskFutures.add(taskFuture);
try {
if (i < maxAttempts - 1) {
result = taskFuture.get(retryTimeoutMillis + i * 2000, TimeUnit.MILLISECONDS);
} else {
// if this is the last attempt then don't timeout
result = taskFuture.get(LAST_ATTEMPT_TIMEOUT, TimeUnit.MILLISECONDS);
}
// start tracking response codes from s3
registry.counter("output.s3.responseCode." + result.getResponseCode()).inc();
if (result.getResponseCode() == SUCCESS_CODE) {
hasSucceeded = true;
} else {
logger.severe("Request failed reason:" + result + " key:" + key);
if (result.getResponseCode() == ERROR_CODE) {
continue;
}
}
break;
} catch (Exception e) {
e.printStackTrace();
// TODO: commented out for latency tracking
// resultFuture.cancel(true);
s3RetryCounters.inc();
}
i++;
}
if (!hasSucceeded) {
throw new WriteFailedException("Upload failed due to error out: " + key);
}
if (!disableNotifications) {
JsonObject payload = buildPayload(topic, bucket, objectSize, messages.size(),
header.capacity(), key, i);
try {
notificationSink.notify(payload, 0);
} catch (Exception e) {
notificationFailureCounter.inc();
throw e;
}
}
long latency = timer.stop() / NANOSECONDS_TO_SECONDS;
if (latency > HIGH_LATENCY_THRESHOLD) {
final String s3path = "s3://" + bucket + SLASH + key;
logger.info("Uploaded " + s3path + " latency(" + latency + ")s");
asyncSlowUploadHandlingExecutor.submit(() -> {
Map<String, Object> message = new HashMap<>();
message.put("s3path", s3path);
message.put("latencySeconds", latency);
List<Map<String, Object>> attempts = new ArrayList<>();
for (int taskId = 0; taskId < taskFutures.size(); taskId++) {
Map<String, Object> attempt = new HashMap<>();
try {
UploadResult res = taskFutures.get(taskId).get();
if (res.getResponseCode() == 200) {
attempt.put("requestId", res.getHttpResponseHeaders().get(S3_REQUEST_ID));
attempt.put("extendedRequestId",
res.getHttpResponseHeaders().get(S3_EXTENDED_REQUEST_ID));
} else {
attempt.put("responseCode", res.getResponseCode());
}
attempt.put("timeMillis", res.getTime() / 1_000_000);
} catch (Exception e) {
attempt.put("exception", e.getMessage());
}
attempts.add(attempt);
}
message.put("attempts", attempts);
logger.fine(() -> GSON.toJson(message));
});
}
} catch (Exception e) {
timer.stop();
throw new WriteFailedException(e);
} finally {
if (ref != null) {
ref.release();
}
}
}
private UploadResult attemptUpload(ByteBuf header,
int sizeInBytes,
int checksum,
String contentMD5,
final List<Message> messages,
final String key,
int count) throws IOException, ProtocolException {
header.resetReaderIndex();
Context internalLatency = s3PutInternalLatencyTimer.time();
try {
Builder putRequestBuilder = PutObjectRequest.builder().bucket(bucket).key(key);
if (contentMD5 != null) {
putRequestBuilder.contentMD5(contentMD5);
}
int length = header.capacity() + sizeInBytes;
putRequestBuilder.contentLength((long) length);
MessageBufferInputStream input = new MessageBufferInputStream(messages, streamResetCounter);
PresignedPutObjectRequest presignPutObject = signer.presignPutObject(
PutObjectPresignRequest.builder().putObjectRequest(putRequestBuilder.build())
.signatureDuration(Duration.ofSeconds(2000)).build());
URL presignedUrl = presignPutObject.url();
HttpURLConnection connection = (HttpURLConnection) presignedUrl.openConnection();
connection.setDoOutput(true);
connection.setRequestProperty(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
if (contentMD5 != null) {
connection.setRequestProperty(CONTENT_MD5, contentMD5);
}
connection.setRequestProperty(CONTENT_LENGTH, String.valueOf(length));
connection.setRequestMethod("PUT");
Context streamCopyLatency = streamCopyTimer.time();
OutputStream outputStream = connection.getOutputStream();
IOUtils.copy(new ByteBufInputStream(header), outputStream);
IOUtils.copy(input, outputStream);
outputStream.flush();
outputStream.close();
streamCopyLatency.stop();
s3RequestCounter.inc();
int responseCode = connection.getResponseCode();
if (responseCode != SUCCESS_CODE) {
logger.severe(responseCode + " reason:" + connection.getResponseMessage() + "\t"
+ connection.getHeaderFields() + " index:" + count + " url:" + presignedUrl);
}
if (contentMD5 != null && responseCode == SUCCESS_CODE) {
try {
String eTagHex = connection.getHeaderFields().get(E_TAG).get(FIRST_INDEX);
String etagToBase64 = MemqUtils.etagToBase64(eTagHex.replace("\"", ""));
if (!contentMD5.equals(etagToBase64)) {
logger.severe("Request failed due to etag mismatch url:" + presignedUrl);
responseCode = ERROR_CODE;
}
} catch (Exception e) {
logger.log(Level.SEVERE, "Unable to parse the returnedetag", e);
}
}
connection.disconnect();
return new UploadResult(responseCode, header.capacity(), connection.getHeaderFields(),
internalLatency.stop());
} finally {
internalLatency.stop();
}
}
public static class UploadResult {
private int responseCode;
private int memqBatchHeaderSize;
private Map<String, List<String>> httpResponseHeaders;
private long time;
public UploadResult(int responseCode,
int memqBatchHeaderSize,
Map<String, List<String>> httpResponseHeaders,
long time) {
this.responseCode = responseCode;
this.memqBatchHeaderSize = memqBatchHeaderSize;
this.httpResponseHeaders = httpResponseHeaders;
this.time = time;
}
public int getResponseCode() {
return responseCode;
}
public void setResponseCode(int responseCode) {
this.responseCode = responseCode;
}
public int getMemqBatchHeaderSize() {
return memqBatchHeaderSize;
}
public void setMemqBatchHeaderSize(int memqBatchHeaderSize) {
this.memqBatchHeaderSize = memqBatchHeaderSize;
}
public Map<String, List<String>> getHttpResponseHeaders() {
return httpResponseHeaders;
}
public void setHttpResponseHeaders(Map<String, List<String>> httpResponseHeaders) {
this.httpResponseHeaders = httpResponseHeaders;
}
public long getTime() {
return time;
}
public void setTime(long time) {
this.time = time;
}
}
private StringBuilder createKey(List<Message> messages, int attempt) {
Message firstMessage = messages.get(0);
StringBuilder keyBuilder = new StringBuilder();
if (enableHashing) {
String hash = DigestUtils.md2Hex(String.valueOf(firstMessage.getClientRequestId()));
keyBuilder.append(hash.substring(0, 2));
keyBuilder.append(SLASH);
}
keyBuilder.append(path);
keyBuilder.append(SLASH);
keyBuilder.append(firstMessage.getClientRequestId());
keyBuilder.append(SEPARATOR);
keyBuilder.append(firstMessage.getServerRequestId());
keyBuilder.append(SEPARATOR);
keyBuilder.append(System.currentTimeMillis());
keyBuilder.append(SEPARATOR);
keyBuilder.append(attempt);
keyBuilder.append(SEPARATOR);
keyBuilder.append(HOSTNAME);
return keyBuilder;
}
public void closeWriter() {
notificationSink.close();
}
public KafkaNotificationSink getNotificationSink() {
return notificationSink;
}
@Override
public String getReadUrl() {
return notificationSink.getReadUrl();
}
@Override
public Logger getLogger() {
return logger;
}
} | java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
pinterest/memq | https://github.com/pinterest/memq/blob/1c4d02b96895be0a380d7f55d4f47e1f056f811e/memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/AbstractS3StorageHandler.java | memq-client/src/main/java/com/pinterest/memq/commons/storage/s3/AbstractS3StorageHandler.java | /**
* Copyright 2022 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.memq.commons.storage.s3;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import java.util.logging.Logger;
import org.apache.commons.compress.utils.IOUtils;
import com.codahale.metrics.MetricRegistry;
import com.google.gson.JsonObject;
import com.pinterest.memq.commons.BatchHeader;
import com.pinterest.memq.commons.BatchHeader.IndexEntry;
import com.pinterest.memq.commons.MemqLogMessage;
import com.pinterest.memq.commons.protocol.BatchData;
import com.pinterest.memq.commons.storage.StorageHandler;
import com.pinterest.memq.commons.storage.s3.S3Exception.ForbiddenException;
import com.pinterest.memq.commons.storage.s3.S3Exception.InternalServerErrorException;
import com.pinterest.memq.commons.storage.s3.S3Exception.NotFoundException;
import com.pinterest.memq.commons.storage.s3.S3Exception.ServiceUnavailableException;
import com.pinterest.memq.commons.storage.s3.reader.client.ApacheRequestClient;
import com.pinterest.memq.commons.storage.s3.reader.client.ReactorNettyRequestClient;
import com.pinterest.memq.commons.storage.s3.reader.client.RequestClient;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
public abstract class AbstractS3StorageHandler implements StorageHandler {
public static final NotFoundException NOT_FOUND_EXCEPTION = new NotFoundException();
public static final InternalServerErrorException ISE_EXCEPTION = new InternalServerErrorException();
public static final ServiceUnavailableException UNAVAILABLE_EXCEPTION = new ServiceUnavailableException();
public static final ForbiddenException FORBIDDEN_EXCEPTION = new ForbiddenException();
public static final String OBJECT_FETCH_LATENCY_MS_HISTOGRAM_KEY = "objectFetchLatencyMs";
public static final String OBJECT_FETCH_ERROR_KEY = "objectFetchErrorKey";
public static final String TOPIC = "topic";
public static final String KEY = "key";
public static final String BUCKET = "bucket";
public static final String HEADER_SIZE = "headerSize";
public static final String REGION = "region";
public static final String NUMBER_OF_MESSAGES_IN_BATCH = "numBatchMessages";
public static final String CONTENT_MD5 = "contentMD5";
public static final String NUM_ATTEMPTS = "numAttempts";
public static final String USE_APACHE_HTTP_CLIENT = "useApacheHttpClient";
protected static final String DEFAULT_REGION = "us-east-1";
private MetricRegistry registry;
protected RequestClient httpClient;
protected Region region;
@Override
public void initReader(Properties properties, MetricRegistry registry) throws Exception {
this.registry = registry;
if (properties.containsKey(USE_APACHE_HTTP_CLIENT)
&& Boolean.parseBoolean(properties.get(USE_APACHE_HTTP_CLIENT).toString())) {
this.httpClient = new ApacheRequestClient(registry);
} else {
this.httpClient = new ReactorNettyRequestClient(registry);
}
httpClient.initialize(properties);
this.region = Region.of(
properties.getProperty(REGION, DEFAULT_REGION).toLowerCase());
}
@Override
public InputStream fetchBatchStreamForNotification(JsonObject nextNotificationToProcess) throws IOException {
String currentBucket = nextNotificationToProcess.get(BUCKET).getAsString();
String currentKey = nextNotificationToProcess.get(KEY).getAsString();
int currentObjectSize = nextNotificationToProcess.get(SIZE).getAsInt();
getLogger().fine("Updating bucket and key: " + currentBucket + "/" + currentKey + " {"
+ nextNotificationToProcess.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID)
.getAsNumber()
+ ", " + nextNotificationToProcess
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET).getAsNumber()
+ "}");
getLogger().finest("Object size: " + currentObjectSize);
long fetchStartTime = System.currentTimeMillis();
try {
return httpClient
.tryObjectGet(GetObjectRequest.builder().bucket(currentBucket).key(currentKey).build());
} finally {
long fetchTime = System.currentTimeMillis() - fetchStartTime;
getLogger().fine("Fetch Time:" + fetchTime);
registry.histogram(OBJECT_FETCH_LATENCY_MS_HISTOGRAM_KEY).update(fetchTime);
}
}
@Override
public BatchData fetchBatchStreamForNotificationBuf(JsonObject nextNotificationToProcess) throws IOException {
String currentBucket = nextNotificationToProcess.get(BUCKET).getAsString();
String currentKey = nextNotificationToProcess.get(KEY).getAsString();
int currentObjectSize = nextNotificationToProcess.get(SIZE).getAsInt();
getLogger().fine("Updating bucket and key: " + currentBucket + "/" + currentKey + " {"
+ nextNotificationToProcess.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_ID)
.getAsNumber()
+ ", " + nextNotificationToProcess
.get(MemqLogMessage.INTERNAL_FIELD_NOTIFICATION_PARTITION_OFFSET).getAsNumber()
+ "}");
getLogger().finest("Object size: " + currentObjectSize);
long fetchStartTime = System.currentTimeMillis();
try {
return new BatchData(currentObjectSize, httpClient.tryObjectGetAsBuffer(
GetObjectRequest.builder().bucket(currentBucket).key(currentKey).build()));
} finally {
long fetchTime = System.currentTimeMillis() - fetchStartTime;
getLogger().fine("Fetch Time:" + fetchTime);
registry.histogram(OBJECT_FETCH_LATENCY_MS_HISTOGRAM_KEY).update(fetchTime);
}
}
@Override
public BatchHeader fetchHeaderForBatch(JsonObject nextNotificationToProcess) throws IOException {
String bucketName = nextNotificationToProcess.get(BUCKET).getAsString();
String key = nextNotificationToProcess.get(KEY).getAsString();
int headerSize = nextNotificationToProcess.get("headerSize").getAsInt();
GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(key)
.range("bytes=0-" + headerSize).build();
InputStream is = httpClient.tryObjectGet(getObjectRequest);
DataInputStream dis = new DataInputStream(is);
BatchHeader batchHeader = new BatchHeader(dis);
dis.close();
return batchHeader;
}
@Override
public DataInputStream fetchMessageAtIndex(JsonObject objectNotification,
IndexEntry index) throws IOException {
String bucketName = objectNotification.get(BUCKET).getAsString();
String key = objectNotification.get(KEY).getAsString();
GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(key)
.range("bytes=" + index.getOffset() + "-" + (index.getOffset() + index.getSize())).build();
InputStream is = httpClient.tryObjectGet(getObjectRequest);
return new DataInputStream(is);
}
public static DataInputStream convertS3StreamToInMemory(InputStream objectContentStream) throws IOException {
ByteArrayOutputStream str = new ByteArrayOutputStream();
IOUtils.copy(objectContentStream, str);
str.close();
objectContentStream.close();
return new DataInputStream(new ByteArrayInputStream(str.toByteArray()));
}
public static JsonObject buildPayload(String topic,
String bucket,
int objectSize,
int numberOfMessages,
int batchHeaderLength,
String key,
int attempt) {
JsonObject payload = new JsonObject();
payload.addProperty(BUCKET, bucket);
payload.addProperty(KEY, key);
payload.addProperty(SIZE, objectSize);
payload.addProperty(TOPIC, topic);
payload.addProperty(HEADER_SIZE, batchHeaderLength);
payload.addProperty(NUMBER_OF_MESSAGES_IN_BATCH, numberOfMessages);
payload.addProperty(NUM_ATTEMPTS, attempt);
return payload;
}
@Override
public void closeReader() {
try {
httpClient.close();
} catch (IOException ioe) {
getLogger().warning("Failed to close http client when closing S3 storage handler");
}
}
public abstract Logger getLogger();
}
| java | Apache-2.0 | 1c4d02b96895be0a380d7f55d4f47e1f056f811e | 2026-01-05T02:41:45.251484Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.