file_path
stringlengths
3
280
file_language
stringclasses
66 values
content
stringlengths
1
1.04M
repo_name
stringlengths
5
92
repo_stars
int64
0
154k
repo_description
stringlengths
0
402
repo_primary_language
stringclasses
108 values
developer_username
stringlengths
1
25
developer_name
stringlengths
0
30
developer_company
stringlengths
0
82
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/config/StreamingWorkerConfig.java
Java
package org.ray.streaming.runtime.config; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Streaming job worker specified config. */ public class StreamingWorkerConfig extends StreamingGlobalConfig { private static final Logger LOG = LoggerFactory.getLogger(StreamingWorkerConfig.class); public StreamingWorkerConfig(final Map<String, String> conf) { super(conf); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/config/global/CommonConfig.java
Java
package org.ray.streaming.runtime.config.global; import org.ray.streaming.runtime.config.Config; /** * Job common config. */ public interface CommonConfig extends Config { String JOB_ID = "streaming.job.id"; String JOB_NAME = "streaming.job.name"; /** * Ray streaming job id. Non-custom. * @return Job id with string type. */ @DefaultValue(value = "default-job-id") @Key(value = JOB_ID) String jobId(); /** * Ray streaming job name. Non-custom. * @return Job name with string type. */ @DefaultValue(value = "default-job-name") @Key(value = JOB_NAME) String jobName(); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/collector/OutputCollector.java
Java
package org.ray.streaming.runtime.core.collector; import java.nio.ByteBuffer; import java.util.Collection; import org.ray.runtime.util.Serializer; import org.ray.streaming.api.collector.Collector; import org.ray.streaming.api.partition.Partition; import org.ray.streaming.message.Record; import org.ray.streaming.runtime.transfer.ChannelID; import org.ray.streaming.runtime.transfer.DataWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class OutputCollector implements Collector<Record> { private static final Logger LOGGER = LoggerFactory.getLogger(OutputCollector.class); private Partition partition; private DataWriter writer; private ChannelID[] outputQueues; public OutputCollector(Collection<String> outputQueueIds, DataWriter writer, Partition partition) { this.outputQueues = outputQueueIds.stream().map(ChannelID::from).toArray(ChannelID[]::new); this.writer = writer; this.partition = partition; LOGGER.debug("OutputCollector constructed, outputQueueIds:{}, partition:{}.", outputQueueIds, this.partition); } @Override public void collect(Record record) { int[] partitions = this.partition.partition(record, outputQueues.length); ByteBuffer msgBuffer = ByteBuffer.wrap(Serializer.encode(record)); for (int partition : partitions) { writer.write(outputQueues[partition], msgBuffer); } } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/command/BatchInfo.java
Java
package org.ray.streaming.runtime.core.command; import java.io.Serializable; public class BatchInfo implements Serializable { private long batchId; public BatchInfo(long batchId) { this.batchId = batchId; } public long getBatchId() { return batchId; } public void setBatchId(long batchId) { this.batchId = batchId; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/graph/ExecutionEdge.java
Java
package org.ray.streaming.runtime.core.graph; import java.io.Serializable; import org.ray.streaming.api.partition.Partition; /** * An edge in the physical execution graph. */ public class ExecutionEdge implements Serializable { private int srcNodeId; private int targetNodeId; private Partition partition; public ExecutionEdge(int srcNodeId, int targetNodeId, Partition partition) { this.srcNodeId = srcNodeId; this.targetNodeId = targetNodeId; this.partition = partition; } public int getSrcNodeId() { return srcNodeId; } public void setSrcNodeId(int srcNodeId) { this.srcNodeId = srcNodeId; } public int getTargetNodeId() { return targetNodeId; } public void setTargetNodeId(int targetNodeId) { this.targetNodeId = targetNodeId; } public Partition getPartition() { return partition; } public void setPartition(Partition partition) { this.partition = partition; } public String getStream() { return "stream:" + srcNodeId + "-" + targetNodeId; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/graph/ExecutionGraph.java
Java
package org.ray.streaming.runtime.core.graph; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.ray.api.RayActor; import org.ray.streaming.runtime.worker.JobWorker; /** * Physical execution graph. */ public class ExecutionGraph implements Serializable { private long buildTime; private List<ExecutionNode> executionNodeList; private List<RayActor<JobWorker>> sourceWorkers = new ArrayList<>(); private List<RayActor<JobWorker>> sinkWorkers = new ArrayList<>(); public ExecutionGraph(List<ExecutionNode> executionNodes) { this.executionNodeList = executionNodes; for (ExecutionNode executionNode : executionNodeList) { if (executionNode.getNodeType() == ExecutionNode.NodeType.SOURCE) { List<RayActor<JobWorker>> actors = executionNode.getExecutionTasks().stream() .map(ExecutionTask::getWorker).collect(Collectors.toList()); sourceWorkers.addAll(actors); } if (executionNode.getNodeType() == ExecutionNode.NodeType.SINK) { List<RayActor<JobWorker>> actors = executionNode.getExecutionTasks().stream() .map(ExecutionTask::getWorker).collect(Collectors.toList()); sinkWorkers.addAll(actors); } } buildTime = System.currentTimeMillis(); } public List<RayActor<JobWorker>> getSourceWorkers() { return sourceWorkers; } public List<RayActor<JobWorker>> getSinkWorkers() { return sinkWorkers; } public List<ExecutionNode> getExecutionNodeList() { return executionNodeList; } public ExecutionTask getExecutionTaskByTaskId(int taskId) { for (ExecutionNode executionNode : executionNodeList) { for (ExecutionTask executionTask : executionNode.getExecutionTasks()) { if (executionTask.getTaskId() == taskId) { return executionTask; } } } throw new RuntimeException("Task " + taskId + " does not exist!"); } public ExecutionNode getExecutionNodeByNodeId(int nodeId) { for (ExecutionNode executionNode : executionNodeList) { if (executionNode.getNodeId() == nodeId) { return executionNode; } } throw new RuntimeException("Node " + nodeId + " does not exist!"); } public ExecutionNode getExecutionNodeByTaskId(int taskId) { for (ExecutionNode executionNode : executionNodeList) { for (ExecutionTask executionTask : executionNode.getExecutionTasks()) { if (executionTask.getTaskId() == taskId) { return executionNode; } } } throw new RuntimeException("Task " + taskId + " does not exist!"); } public Map<Integer, RayActor<JobWorker>> getTaskId2WorkerByNodeId(int nodeId) { for (ExecutionNode executionNode : executionNodeList) { if (executionNode.getNodeId() == nodeId) { Map<Integer, RayActor<JobWorker>> taskId2Worker = new HashMap<>(); for (ExecutionTask executionTask : executionNode.getExecutionTasks()) { taskId2Worker.put(executionTask.getTaskId(), executionTask.getWorker()); } return taskId2Worker; } } throw new RuntimeException("Node " + nodeId + " does not exist!"); } public long getBuildTime() { return buildTime; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/graph/ExecutionNode.java
Java
package org.ray.streaming.runtime.core.graph; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import org.ray.streaming.jobgraph.VertexType; import org.ray.streaming.operator.StreamOperator; /** * A node in the physical execution graph. */ public class ExecutionNode implements Serializable { private int nodeId; private int parallelism; private NodeType nodeType; private StreamOperator streamOperator; private List<ExecutionTask> executionTasks; private List<ExecutionEdge> inputsEdges; private List<ExecutionEdge> outputEdges; public ExecutionNode(int nodeId, int parallelism) { this.nodeId = nodeId; this.parallelism = parallelism; this.executionTasks = new ArrayList<>(); this.inputsEdges = new ArrayList<>(); this.outputEdges = new ArrayList<>(); } public int getNodeId() { return nodeId; } public void setNodeId(int nodeId) { this.nodeId = nodeId; } public int getParallelism() { return parallelism; } public void setParallelism(int parallelism) { this.parallelism = parallelism; } public List<ExecutionTask> getExecutionTasks() { return executionTasks; } public void setExecutionTasks(List<ExecutionTask> executionTasks) { this.executionTasks = executionTasks; } public List<ExecutionEdge> getOutputEdges() { return outputEdges; } public void setOutputEdges(List<ExecutionEdge> outputEdges) { this.outputEdges = outputEdges; } public void addExecutionEdge(ExecutionEdge executionEdge) { this.outputEdges.add(executionEdge); } public void addInputEdge(ExecutionEdge executionEdge) { this.inputsEdges.add(executionEdge); } public List<ExecutionEdge> getInputsEdges() { return inputsEdges; } public StreamOperator getStreamOperator() { return streamOperator; } public void setStreamOperator(StreamOperator streamOperator) { this.streamOperator = streamOperator; } public NodeType getNodeType() { return nodeType; } public void setNodeType(VertexType vertexType) { switch (vertexType) { case SOURCE: this.nodeType = NodeType.SOURCE; break; case SINK: this.nodeType = NodeType.SINK; break; default: this.nodeType = NodeType.PROCESS; } } @Override public String toString() { final StringBuilder sb = new StringBuilder("ExecutionNode{"); sb.append("nodeId=").append(nodeId); sb.append(", parallelism=").append(parallelism); sb.append(", nodeType=").append(nodeType); sb.append(", streamOperator=").append(streamOperator); sb.append('}'); return sb.toString(); } public enum NodeType { SOURCE, PROCESS, SINK, } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/graph/ExecutionTask.java
Java
package org.ray.streaming.runtime.core.graph; import java.io.Serializable; import org.ray.api.RayActor; import org.ray.streaming.runtime.worker.JobWorker; /** * ExecutionTask is minimal execution unit. * <p> * An ExecutionNode has n ExecutionTasks if parallelism is n. */ public class ExecutionTask implements Serializable { private int taskId; private int taskIndex; private RayActor<JobWorker> worker; public ExecutionTask(int taskId, int taskIndex, RayActor<JobWorker> worker) { this.taskId = taskId; this.taskIndex = taskIndex; this.worker = worker; } public int getTaskId() { return taskId; } public void setTaskId(int taskId) { this.taskId = taskId; } public int getTaskIndex() { return taskIndex; } public void setTaskIndex(int taskIndex) { this.taskIndex = taskIndex; } public RayActor<JobWorker> getWorker() { return worker; } public void setWorker(RayActor<JobWorker> worker) { this.worker = worker; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/OneInputProcessor.java
Java
package org.ray.streaming.runtime.core.processor; import org.ray.streaming.message.Record; import org.ray.streaming.operator.OneInputOperator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class OneInputProcessor<T> extends StreamProcessor<Record<T>, OneInputOperator<T>> { private static final Logger LOGGER = LoggerFactory.getLogger(OneInputProcessor.class); public OneInputProcessor(OneInputOperator<T> operator) { super(operator); } @Override public void process(Record<T> record) { try { this.operator.processElement(record); } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { this.operator.close(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/ProcessBuilder.java
Java
package org.ray.streaming.runtime.core.processor; import org.ray.streaming.operator.OneInputOperator; import org.ray.streaming.operator.OperatorType; import org.ray.streaming.operator.StreamOperator; import org.ray.streaming.operator.TwoInputOperator; import org.ray.streaming.operator.impl.SourceOperator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ProcessBuilder { private static final Logger LOGGER = LoggerFactory.getLogger(ProcessBuilder.class); public static StreamProcessor buildProcessor(StreamOperator streamOperator) { OperatorType type = streamOperator.getOpType(); LOGGER.info("Building StreamProcessor, operator type = {}, operator = {}.", type, streamOperator.getClass().getSimpleName().toString()); switch (type) { case SOURCE: return new SourceProcessor<>((SourceOperator) streamOperator); case ONE_INPUT: return new OneInputProcessor<>((OneInputOperator) streamOperator); case TWO_INPUT: return new TwoInputProcessor((TwoInputOperator) streamOperator); default: throw new RuntimeException("current operator type is not support"); } } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/Processor.java
Java
package org.ray.streaming.runtime.core.processor; import java.io.Serializable; import java.util.List; import org.ray.streaming.api.collector.Collector; import org.ray.streaming.api.context.RuntimeContext; public interface Processor<T> extends Serializable { void open(List<Collector> collectors, RuntimeContext runtimeContext); void process(T t); void close(); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/SourceProcessor.java
Java
package org.ray.streaming.runtime.core.processor; import org.ray.streaming.message.Record; import org.ray.streaming.operator.impl.SourceOperator; /** * The processor for the stream sources, containing a SourceOperator. * * @param <T> The type of source data. */ public class SourceProcessor<T> extends StreamProcessor<Record, SourceOperator<T>> { public SourceProcessor(SourceOperator<T> operator) { super(operator); } @Override public void process(Record record) { throw new UnsupportedOperationException("SourceProcessor should not process record"); } public void run() { operator.run(); } @Override public void close() { } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/StreamProcessor.java
Java
package org.ray.streaming.runtime.core.processor; import java.util.List; import org.ray.streaming.api.collector.Collector; import org.ray.streaming.api.context.RuntimeContext; import org.ray.streaming.operator.Operator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * StreamingProcessor is a process unit for a operator. * * @param <T> The type of process data. * @param <P> Type of the specific operator class. */ public abstract class StreamProcessor<T, P extends Operator> implements Processor<T> { private static final Logger LOGGER = LoggerFactory.getLogger(StreamProcessor.class); protected List<Collector> collectors; protected RuntimeContext runtimeContext; protected P operator; public StreamProcessor(P operator) { this.operator = operator; } @Override public void open(List<Collector> collectors, RuntimeContext runtimeContext) { this.collectors = collectors; this.runtimeContext = runtimeContext; if (operator != null) { this.operator.open(collectors, runtimeContext); } LOGGER.info("opened {}", this); } @Override public String toString() { return this.getClass().getSimpleName(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/core/processor/TwoInputProcessor.java
Java
package org.ray.streaming.runtime.core.processor; import org.ray.streaming.message.Record; import org.ray.streaming.operator.TwoInputOperator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class TwoInputProcessor<T, O> extends StreamProcessor<Record, TwoInputOperator<T, O>> { private static final Logger LOGGER = LoggerFactory.getLogger(TwoInputProcessor.class); private String leftStream; private String rightStream; public TwoInputProcessor(TwoInputOperator<T, O> operator) { super(operator); } @Override public void process(Record record) { try { if (record.getStream().equals(leftStream)) { this.operator.processElement(record, null); } else { this.operator.processElement(null, record); } } catch (Exception e) { throw new RuntimeException(e); } } @Override public void close() { this.operator.close(); } public String getLeftStream() { return leftStream; } public void setLeftStream(String leftStream) { this.leftStream = leftStream; } public String getRightStream() { return rightStream; } public void setRightStream(String rightStream) { this.rightStream = rightStream; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/schedule/JobSchedulerImpl.java
Java
package org.ray.streaming.runtime.schedule; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.ray.api.Ray; import org.ray.api.RayActor; import org.ray.api.RayObject; import org.ray.streaming.jobgraph.JobGraph; import org.ray.streaming.jobgraph.JobVertex; import org.ray.streaming.runtime.cluster.ResourceManager; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.core.graph.ExecutionNode; import org.ray.streaming.runtime.core.graph.ExecutionTask; import org.ray.streaming.runtime.worker.JobWorker; import org.ray.streaming.runtime.worker.context.WorkerContext; import org.ray.streaming.schedule.JobScheduler; /** * JobSchedulerImpl schedules workers by the Plan and the resource information * from ResourceManager. */ public class JobSchedulerImpl implements JobScheduler { private JobGraph jobGraph; private Map<String, String> jobConfig; private ResourceManager resourceManager; private TaskAssigner taskAssigner; public JobSchedulerImpl() { this.resourceManager = new ResourceManager(); this.taskAssigner = new TaskAssignerImpl(); } /** * Schedule physical plan to execution graph, and call streaming worker to init and run. */ @Override public void schedule(JobGraph jobGraph, Map<String, String> jobConfig) { this.jobConfig = jobConfig; this.jobGraph = jobGraph; System.setProperty("ray.raylet.config.num_workers_per_process_java", "1"); Ray.init(); List<RayActor<JobWorker>> workers = this.resourceManager.createWorkers(getPlanWorker()); ExecutionGraph executionGraph = this.taskAssigner.assign(this.jobGraph, workers); List<ExecutionNode> executionNodes = executionGraph.getExecutionNodeList(); List<RayObject<Boolean>> waits = new ArrayList<>(); for (ExecutionNode executionNode : executionNodes) { List<ExecutionTask> executionTasks = executionNode.getExecutionTasks(); for (ExecutionTask executionTask : executionTasks) { int taskId = executionTask.getTaskId(); RayActor<JobWorker> streamWorker = executionTask.getWorker(); waits.add(Ray.call(JobWorker::init, streamWorker, new WorkerContext(taskId, executionGraph, jobConfig))); } } Ray.wait(waits); } private int getPlanWorker() { List<JobVertex> jobVertexList = jobGraph.getJobVertexList(); return jobVertexList.stream().map(JobVertex::getParallelism).reduce(0, Integer::sum); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/schedule/TaskAssigner.java
Java
package org.ray.streaming.runtime.schedule; import java.io.Serializable; import java.util.List; import org.ray.api.RayActor; import org.ray.streaming.jobgraph.JobGraph; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.worker.JobWorker; /** * Interface of the task assigning strategy. */ public interface TaskAssigner extends Serializable { /** * Assign logical plan to physical execution graph. */ ExecutionGraph assign(JobGraph jobGraph, List<RayActor<JobWorker>> workers); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/schedule/TaskAssignerImpl.java
Java
package org.ray.streaming.runtime.schedule; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.ray.api.RayActor; import org.ray.streaming.jobgraph.JobEdge; import org.ray.streaming.jobgraph.JobGraph; import org.ray.streaming.jobgraph.JobVertex; import org.ray.streaming.runtime.core.graph.ExecutionEdge; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.core.graph.ExecutionNode; import org.ray.streaming.runtime.core.graph.ExecutionTask; import org.ray.streaming.runtime.worker.JobWorker; public class TaskAssignerImpl implements TaskAssigner { /** * Assign an optimized logical plan to execution graph. * * @param jobGraph The logical plan. * @param workers The worker actors. * @return The physical execution graph. */ @Override public ExecutionGraph assign(JobGraph jobGraph, List<RayActor<JobWorker>> workers) { List<JobVertex> jobVertices = jobGraph.getJobVertexList(); List<JobEdge> jobEdges = jobGraph.getJobEdgeList(); int taskId = 0; Map<Integer, ExecutionNode> idToExecutionNode = new HashMap<>(); for (JobVertex jobVertex : jobVertices) { ExecutionNode executionNode = new ExecutionNode(jobVertex.getVertexId(), jobVertex.getParallelism()); executionNode.setNodeType(jobVertex.getVertexType()); List<ExecutionTask> vertexTasks = new ArrayList<>(); for (int taskIndex = 0; taskIndex < jobVertex.getParallelism(); taskIndex++) { vertexTasks.add(new ExecutionTask(taskId, taskIndex, workers.get(taskId))); taskId++; } executionNode.setExecutionTasks(vertexTasks); executionNode.setStreamOperator(jobVertex.getStreamOperator()); idToExecutionNode.put(executionNode.getNodeId(), executionNode); } for (JobEdge jobEdge : jobEdges) { int srcNodeId = jobEdge.getSrcVertexId(); int targetNodeId = jobEdge.getTargetVertexId(); ExecutionEdge executionEdge = new ExecutionEdge(srcNodeId, targetNodeId, jobEdge.getPartition()); idToExecutionNode.get(srcNodeId).addExecutionEdge(executionEdge); idToExecutionNode.get(targetNodeId).addInputEdge(executionEdge); } List<ExecutionNode> executionNodes = idToExecutionNode.values().stream() .collect(Collectors.toList()); return new ExecutionGraph(executionNodes); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/ChannelID.java
Java
package org.ray.streaming.runtime.transfer; import com.google.common.base.FinalizablePhantomReference; import com.google.common.base.FinalizableReferenceQueue; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import com.google.common.io.BaseEncoding; import java.lang.ref.Reference; import java.nio.ByteBuffer; import java.util.Random; import java.util.Set; import sun.nio.ch.DirectBuffer; /** * ChannelID is used to identify a transfer channel between a upstream worker * and downstream worker. */ public class ChannelID { public static final int ID_LENGTH = 20; private static final FinalizableReferenceQueue REFERENCE_QUEUE = new FinalizableReferenceQueue(); // This ensures that the FinalizablePhantomReference itself is not garbage-collected. private static final Set<Reference<?>> references = Sets.newConcurrentHashSet(); private final byte[] bytes; private final String strId; private final ByteBuffer buffer; private final long address; private final long nativeIdPtr; private ChannelID(String strId, byte[] idBytes) { this.strId = strId; this.bytes = idBytes; ByteBuffer directBuffer = ByteBuffer.allocateDirect(ID_LENGTH); directBuffer.put(bytes); directBuffer.rewind(); this.buffer = directBuffer; this.address = ((DirectBuffer) (buffer)).address(); long nativeIdPtr = 0; nativeIdPtr = createNativeID(address); this.nativeIdPtr = nativeIdPtr; } public byte[] getBytes() { return bytes; } public ByteBuffer getBuffer() { return buffer; } public long getAddress() { return address; } public long getNativeIdPtr() { if (nativeIdPtr == 0) { throw new IllegalStateException("native ID not available"); } return nativeIdPtr; } @Override public String toString() { return strId; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ChannelID that = (ChannelID) o; return strId.equals(that.strId); } @Override public int hashCode() { return strId.hashCode(); } private static native long createNativeID(long idAddress); private static native void destroyNativeID(long nativeIdPtr); /** * @param id hex string representation of channel id */ public static ChannelID from(String id) { return from(id, ChannelID.idStrToBytes(id)); } /** * @param idBytes bytes representation of channel id */ public static ChannelID from(byte[] idBytes) { return from(idBytesToStr(idBytes), idBytes); } private static ChannelID from(String strID, byte[] idBytes) { ChannelID id = new ChannelID(strID, idBytes); long nativeIdPtr = id.nativeIdPtr; if (nativeIdPtr != 0) { Reference<ChannelID> reference = new FinalizablePhantomReference<ChannelID>(id, REFERENCE_QUEUE) { @Override public void finalizeReferent() { destroyNativeID(nativeIdPtr); references.remove(this); } }; references.add(reference); } return id; } /** * @return a random channel id string */ public static String genRandomIdStr() { StringBuilder sb = new StringBuilder(); Random random = new Random(); for (int i = 0; i < ChannelID.ID_LENGTH * 2; ++i) { sb.append((char) (random.nextInt(6) + 'A')); } return sb.toString(); } /** * Generate channel name, which will be 20 character * * @param fromTaskId upstream task id * @param toTaskId downstream task id * @return channel name */ public static String genIdStr(int fromTaskId, int toTaskId, long ts) { /* | Head | Timestamp | Empty | From | To | | 8 bytes | 4bytes | 4bytes| 2bytes| 2bytes | */ Preconditions.checkArgument(fromTaskId < Short.MAX_VALUE, "fromTaskId %d is larger than %d", fromTaskId, Short.MAX_VALUE); Preconditions.checkArgument(toTaskId < Short.MAX_VALUE, "toTaskId %d is larger than %d", fromTaskId, Short.MAX_VALUE); byte[] channelName = new byte[20]; for (int i = 11; i >= 8; i--) { channelName[i] = (byte) (ts & 0xff); ts >>= 8; } channelName[16] = (byte) ((fromTaskId & 0xffff) >> 8); channelName[17] = (byte) (fromTaskId & 0xff); channelName[18] = (byte) ((toTaskId & 0xffff) >> 8); channelName[19] = (byte) (toTaskId & 0xff); return ChannelID.idBytesToStr(channelName); } /** * @param id hex string representation of channel id * @return bytes representation of channel id */ static byte[] idStrToBytes(String id) { byte[] idBytes = BaseEncoding.base16().decode(id.toUpperCase()); assert idBytes.length == ChannelID.ID_LENGTH; return idBytes; } /** * @param id bytes representation of channel id * @return hex string representation of channel id */ static String idBytesToStr(byte[] id) { assert id.length == ChannelID.ID_LENGTH; return BaseEncoding.base16().encode(id).toLowerCase(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/ChannelInitException.java
Java
package org.ray.streaming.runtime.transfer; import java.util.ArrayList; import java.util.List; public class ChannelInitException extends Exception { private final List<byte[]> abnormalQueues; public ChannelInitException(String message, List<byte[]> abnormalQueues) { super(message); this.abnormalQueues = abnormalQueues; } public List<byte[]> getAbnormalChannels() { return abnormalQueues; } public List<String> getAbnormalChannelsString() { List<String> res = new ArrayList<>(); abnormalQueues.forEach(ele -> res.add(ChannelID.idBytesToStr(ele))); return res; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/ChannelInterruptException.java
Java
package org.ray.streaming.runtime.transfer; public class ChannelInterruptException extends RuntimeException { public ChannelInterruptException() { super(); } public ChannelInterruptException(String message) { super(message); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/ChannelUtils.java
Java
package org.ray.streaming.runtime.transfer; import java.util.Map; import org.ray.streaming.runtime.generated.Streaming; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class ChannelUtils { private static final Logger LOGGER = LoggerFactory.getLogger(ChannelUtils.class); static byte[] toNativeConf(Map<String, String> conf) { Streaming.StreamingConfig.Builder builder = Streaming.StreamingConfig.newBuilder(); if (conf.containsKey(Config.STREAMING_JOB_NAME)) { builder.setJobName(conf.get(Config.STREAMING_JOB_NAME)); } if (conf.containsKey(Config.TASK_JOB_ID)) { builder.setTaskJobId(conf.get(Config.TASK_JOB_ID)); } if (conf.containsKey(Config.STREAMING_WORKER_NAME)) { builder.setWorkerName(conf.get(Config.STREAMING_WORKER_NAME)); } if (conf.containsKey(Config.STREAMING_OP_NAME)) { builder.setOpName(conf.get(Config.STREAMING_OP_NAME)); } if (conf.containsKey(Config.STREAMING_RING_BUFFER_CAPACITY)) { builder.setRingBufferCapacity( Integer.parseInt(conf.get(Config.STREAMING_RING_BUFFER_CAPACITY))); } if (conf.containsKey(Config.STREAMING_EMPTY_MESSAGE_INTERVAL)) { builder.setEmptyMessageInterval( Integer.parseInt(conf.get(Config.STREAMING_EMPTY_MESSAGE_INTERVAL))); } Streaming.StreamingConfig streamingConf = builder.build(); LOGGER.info("Streaming native conf {}", streamingConf.toString()); return streamingConf.toByteArray(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/DataMessage.java
Java
package org.ray.streaming.runtime.transfer; import java.nio.ByteBuffer; /** * DataMessage represents data between upstream and downstream operator */ public class DataMessage implements Message { private final ByteBuffer body; private final long msgId; private final long timestamp; private final String channelId; public DataMessage(ByteBuffer body, long timestamp, long msgId, String channelId) { this.body = body; this.timestamp = timestamp; this.msgId = msgId; this.channelId = channelId; } @Override public ByteBuffer body() { return body; } @Override public long timestamp() { return timestamp; } /** * @return message id */ public long msgId() { return msgId; } /** * @return string id of channel where data is coming from */ public String channelId() { return channelId; } @Override public String toString() { return "DataMessage{" + "body=" + body + ", msgId=" + msgId + ", timestamp=" + timestamp + ", channelId='" + channelId + '\'' + '}'; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/DataReader.java
Java
package org.ray.streaming.runtime.transfer; import com.google.common.base.Preconditions; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import org.ray.api.id.ActorId; import org.ray.streaming.runtime.util.Platform; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * DataReader is wrapper of streaming c++ DataReader, which read data * from channels of upstream workers */ public class DataReader { private static final Logger LOGGER = LoggerFactory.getLogger(DataReader.class); private long nativeReaderPtr; private Queue<DataMessage> buf = new LinkedList<>(); public DataReader(List<String> inputChannels, List<ActorId> fromActors, Map<String, String> conf) { Preconditions.checkArgument(inputChannels.size() > 0); Preconditions.checkArgument(inputChannels.size() == fromActors.size()); byte[][] inputChannelsBytes = inputChannels.stream() .map(ChannelID::idStrToBytes).toArray(byte[][]::new); byte[][] fromActorsBytes = fromActors.stream() .map(ActorId::getBytes).toArray(byte[][]::new); long[] seqIds = new long[inputChannels.size()]; long[] msgIds = new long[inputChannels.size()]; for (int i = 0; i < inputChannels.size(); i++) { seqIds[i] = 0; msgIds[i] = 0; } long timerInterval = Long.parseLong( conf.getOrDefault(Config.TIMER_INTERVAL_MS, "-1")); String channelType = conf.getOrDefault(Config.CHANNEL_TYPE, Config.DEFAULT_CHANNEL_TYPE); boolean isMock = false; if (Config.MEMORY_CHANNEL.equals(channelType)) { isMock = true; } boolean isRecreate = Boolean.parseBoolean( conf.getOrDefault(Config.IS_RECREATE, "false")); this.nativeReaderPtr = createDataReaderNative( inputChannelsBytes, fromActorsBytes, seqIds, msgIds, timerInterval, isRecreate, ChannelUtils.toNativeConf(conf), isMock ); LOGGER.info("create DataReader succeed"); } // params set by getBundleNative: bundle data address + size private final ByteBuffer getBundleParams = ByteBuffer.allocateDirect(24); // We use direct buffer to reduce gc overhead and memory copy. private final ByteBuffer bundleData = Platform.wrapDirectBuffer(0, 0); private final ByteBuffer bundleMeta = ByteBuffer.allocateDirect(BundleMeta.LENGTH); { getBundleParams.order(ByteOrder.nativeOrder()); bundleData.order(ByteOrder.nativeOrder()); bundleMeta.order(ByteOrder.nativeOrder()); } /** * Read message from input channels, if timeout, return null. * * @param timeoutMillis timeout * @return message or null */ public DataMessage read(long timeoutMillis) { if (buf.isEmpty()) { getBundle(timeoutMillis); // if bundle not empty. empty message still has data size + seqId + msgId if (bundleData.position() < bundleData.limit()) { BundleMeta bundleMeta = new BundleMeta(this.bundleMeta); // barrier if (bundleMeta.getBundleType() == DataBundleType.BARRIER) { throw new UnsupportedOperationException( "Unsupported bundle type " + bundleMeta.getBundleType()); } else if (bundleMeta.getBundleType() == DataBundleType.BUNDLE) { String channelID = bundleMeta.getChannelID(); long timestamp = bundleMeta.getBundleTs(); for (int i = 0; i < bundleMeta.getMessageListSize(); i++) { buf.offer(getDataMessage(bundleData, channelID, timestamp)); } } else if (bundleMeta.getBundleType() == DataBundleType.EMPTY) { long messageId = bundleMeta.getLastMessageId(); buf.offer(new DataMessage(null, bundleMeta.getBundleTs(), messageId, bundleMeta.getChannelID())); } } } if (buf.isEmpty()) { return null; } return buf.poll(); } private DataMessage getDataMessage(ByteBuffer bundleData, String channelID, long timestamp) { int dataSize = bundleData.getInt(); // msgId long msgId = bundleData.getLong(); // msgType bundleData.getInt(); // make `data.capacity() == data.remaining()`, because some code used `capacity()` // rather than `remaining()` int position = bundleData.position(); int limit = bundleData.limit(); bundleData.limit(position + dataSize); ByteBuffer data = bundleData.slice(); bundleData.limit(limit); bundleData.position(position + dataSize); return new DataMessage(data, timestamp, msgId, channelID); } private void getBundle(long timeoutMillis) { getBundleNative(nativeReaderPtr, timeoutMillis, Platform.getAddress(getBundleParams), Platform.getAddress(bundleMeta)); bundleMeta.rewind(); long bundleAddress = getBundleParams.getLong(0); int bundleSize = getBundleParams.getInt(8); // This has better performance than NewDirectBuffer or set address/capacity in jni. Platform.wrapDirectBuffer(bundleData, bundleAddress, bundleSize); } /** * Stop reader */ public void stop() { stopReaderNative(nativeReaderPtr); } /** * Close reader to release resource */ public void close() { if (nativeReaderPtr == 0) { return; } LOGGER.info("closing DataReader."); closeReaderNative(nativeReaderPtr); nativeReaderPtr = 0; LOGGER.info("closing DataReader done."); } private static native long createDataReaderNative( byte[][] inputChannels, byte[][] inputActorIds, long[] seqIds, long[] msgIds, long timerInterval, boolean isRecreate, byte[] configBytes, boolean isMock); private native void getBundleNative(long nativeReaderPtr, long timeoutMillis, long params, long metaAddress); private native void stopReaderNative(long nativeReaderPtr); private native void closeReaderNative(long nativeReaderPtr); enum DataBundleType { EMPTY(1), BARRIER(2), BUNDLE(3); int code; DataBundleType(int code) { this.code = code; } } static class BundleMeta { // kMessageBundleHeaderSize + kUniqueIDSize: // magicNum(4b) + bundleTs(8b) + lastMessageId(8b) + messageListSize(4b) // + bundleType(4b) + rawBundleSize(4b) + channelID(20b) static final int LENGTH = 4 + 8 + 8 + 4 + 4 + 4 + 20; private int magicNum; private long bundleTs; private long lastMessageId; private int messageListSize; private DataBundleType bundleType; private String channelID; private int rawBundleSize; BundleMeta(ByteBuffer buffer) { // StreamingMessageBundleMeta Deserialization // magicNum magicNum = buffer.getInt(); // messageBundleTs bundleTs = buffer.getLong(); // lastOffsetSeqId lastMessageId = buffer.getLong(); messageListSize = buffer.getInt(); int typeInt = buffer.getInt(); if (DataBundleType.BUNDLE.code == typeInt) { bundleType = DataBundleType.BUNDLE; } else if (DataBundleType.BARRIER.code == typeInt) { bundleType = DataBundleType.BARRIER; } else { bundleType = DataBundleType.EMPTY; } // rawBundleSize rawBundleSize = buffer.getInt(); channelID = getQidString(buffer); } private String getQidString(ByteBuffer buffer) { byte[] bytes = new byte[ChannelID.ID_LENGTH]; buffer.get(bytes); return ChannelID.idBytesToStr(bytes); } public int getMagicNum() { return magicNum; } public long getBundleTs() { return bundleTs; } public long getLastMessageId() { return lastMessageId; } public int getMessageListSize() { return messageListSize; } public DataBundleType getBundleType() { return bundleType; } public String getChannelID() { return channelID; } public int getRawBundleSize() { return rawBundleSize; } } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/DataWriter.java
Java
package org.ray.streaming.runtime.transfer; import com.google.common.base.Preconditions; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.List; import java.util.Map; import java.util.Set; import org.ray.api.id.ActorId; import org.ray.streaming.runtime.util.Platform; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * DataWriter is a wrapper of streaming c++ DataWriter, which sends data * to downstream workers */ public class DataWriter { private static final Logger LOGGER = LoggerFactory.getLogger(DataWriter.class); private long nativeWriterPtr; private ByteBuffer buffer = ByteBuffer.allocateDirect(0); private long bufferAddress; { ensureBuffer(0); } /** * @param outputChannels output channels ids * @param toActors downstream output actors * @param conf configuration */ public DataWriter(List<String> outputChannels, List<ActorId> toActors, Map<String, String> conf) { Preconditions.checkArgument(!outputChannels.isEmpty()); Preconditions.checkArgument(outputChannels.size() == toActors.size()); byte[][] outputChannelsBytes = outputChannels.stream() .map(ChannelID::idStrToBytes).toArray(byte[][]::new); byte[][] toActorsBytes = toActors.stream() .map(ActorId::getBytes).toArray(byte[][]::new); long channelSize = Long.parseLong( conf.getOrDefault(Config.CHANNEL_SIZE, Config.CHANNEL_SIZE_DEFAULT)); long[] msgIds = new long[outputChannels.size()]; for (int i = 0; i < outputChannels.size(); i++) { msgIds[i] = 0; } String channelType = conf.getOrDefault(Config.CHANNEL_TYPE, Config.DEFAULT_CHANNEL_TYPE); boolean isMock = false; if (Config.MEMORY_CHANNEL.equals(channelType)) { isMock = true; } this.nativeWriterPtr = createWriterNative( outputChannelsBytes, toActorsBytes, msgIds, channelSize, ChannelUtils.toNativeConf(conf), isMock ); LOGGER.info("create DataWriter succeed"); } /** * Write msg into the specified channel * * @param id channel id * @param item message item data section is specified by [position, limit). */ public void write(ChannelID id, ByteBuffer item) { int size = item.remaining(); ensureBuffer(size); buffer.clear(); buffer.put(item); writeMessageNative(nativeWriterPtr, id.getNativeIdPtr(), bufferAddress, size); } /** * Write msg into the specified channels * * @param ids channel ids * @param item message item data section is specified by [position, limit). * item doesn't have to be a direct buffer. */ public void write(Set<ChannelID> ids, ByteBuffer item) { int size = item.remaining(); ensureBuffer(size); for (ChannelID id : ids) { buffer.clear(); buffer.put(item.duplicate()); writeMessageNative(nativeWriterPtr, id.getNativeIdPtr(), bufferAddress, size); } } private void ensureBuffer(int size) { if (buffer.capacity() < size) { buffer = ByteBuffer.allocateDirect(size); buffer.order(ByteOrder.nativeOrder()); bufferAddress = Platform.getAddress(buffer); } } /** * stop writer */ public void stop() { stopWriterNative(nativeWriterPtr); } /** * close writer to release resources */ public void close() { if (nativeWriterPtr == 0) { return; } LOGGER.info("closing data writer."); closeWriterNative(nativeWriterPtr); nativeWriterPtr = 0; LOGGER.info("closing data writer done."); } private static native long createWriterNative( byte[][] outputQueueIds, byte[][] outputActorIds, long[] msgIds, long channelSize, byte[] confBytes, boolean isMock); private native long writeMessageNative( long nativeQueueProducerPtr, long nativeIdPtr, long address, int size); private native void stopWriterNative(long nativeQueueProducerPtr); private native void closeWriterNative(long nativeQueueProducerPtr); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/Message.java
Java
package org.ray.streaming.runtime.transfer; import java.nio.ByteBuffer; public interface Message { /** * Message data * * Message body is a direct byte buffer, which may be invalid after call next * <code>DataReader#getBundleNative</code>. Please consume this buffer fully * before next call <code>getBundleNative</code>. * * @return message body */ ByteBuffer body(); /** * @return timestamp when item is written by upstream DataWriter */ long timestamp(); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/transfer/TransferHandler.java
Java
package org.ray.streaming.runtime.transfer; import com.google.common.base.Preconditions; import org.ray.runtime.RayNativeRuntime; import org.ray.runtime.functionmanager.FunctionDescriptor; import org.ray.runtime.functionmanager.JavaFunctionDescriptor; import org.ray.runtime.util.JniUtils; /** * TransferHandler is used for handle direct call based data transfer between workers. * TransferHandler is used by streaming queue for data transfer. */ public class TransferHandler { static { try { Class.forName(RayNativeRuntime.class.getName()); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } JniUtils.loadLibrary("streaming_java"); } private long writerClientNative; private long readerClientNative; public TransferHandler(long coreWorkerNative, JavaFunctionDescriptor writerAsyncFunc, JavaFunctionDescriptor writerSyncFunc, JavaFunctionDescriptor readerAsyncFunc, JavaFunctionDescriptor readerSyncFunc) { Preconditions.checkArgument(coreWorkerNative != 0); writerClientNative = createWriterClientNative( coreWorkerNative, writerAsyncFunc, writerSyncFunc); readerClientNative = createReaderClientNative( coreWorkerNative, readerAsyncFunc, readerSyncFunc); } public void onWriterMessage(byte[] buffer) { handleWriterMessageNative(writerClientNative, buffer); } public byte[] onWriterMessageSync(byte[] buffer) { return handleWriterMessageSyncNative(writerClientNative, buffer); } public void onReaderMessage(byte[] buffer) { handleReaderMessageNative(readerClientNative, buffer); } public byte[] onReaderMessageSync(byte[] buffer) { return handleReaderMessageSyncNative(readerClientNative, buffer); } private native long createWriterClientNative( long coreWorkerNative, FunctionDescriptor asyncFunc, FunctionDescriptor syncFunc); private native long createReaderClientNative( long coreWorkerNative, FunctionDescriptor asyncFunc, FunctionDescriptor syncFunc); private native void handleWriterMessageNative(long handler, byte[] buffer); private native byte[] handleWriterMessageSyncNative(long handler, byte[] buffer); private native void handleReaderMessageNative(long handler, byte[] buffer); private native byte[] handleReaderMessageSyncNative(long handler, byte[] buffer); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/util/EnvUtil.java
Java
package org.ray.streaming.runtime.util; import org.ray.runtime.RayNativeRuntime; import org.ray.runtime.util.JniUtils; public class EnvUtil { public static void loadNativeLibraries() { // Explicitly load `RayNativeRuntime`, to make sure `core_worker_library_java` // is loaded before `streaming_java`. try { Class.forName(RayNativeRuntime.class.getName()); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } JniUtils.loadLibrary("streaming_java"); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/util/Platform.java
Java
package org.ray.streaming.runtime.util; import com.google.common.base.Preconditions; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.nio.Buffer; import java.nio.ByteBuffer; import sun.misc.Unsafe; import sun.nio.ch.DirectBuffer; /** * Based on org.apache.spark.unsafe.Platform */ public final class Platform { public static final Unsafe UNSAFE; static { Unsafe unsafe; try { Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe"); unsafeField.setAccessible(true); unsafe = (Unsafe) unsafeField.get(null); } catch (Throwable cause) { throw new UnsupportedOperationException("Unsafe is not supported in this platform."); } UNSAFE = unsafe; } // Access fields and constructors once and store them, for performance: private static final Constructor<?> DBB_CONSTRUCTOR; private static final long BUFFER_ADDRESS_FIELD_OFFSET; private static final long BUFFER_CAPACITY_FIELD_OFFSET; static { try { Class<?> cls = Class.forName("java.nio.DirectByteBuffer"); Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE); constructor.setAccessible(true); DBB_CONSTRUCTOR = constructor; Field addressField = Buffer.class.getDeclaredField("address"); BUFFER_ADDRESS_FIELD_OFFSET = UNSAFE.objectFieldOffset(addressField); Preconditions.checkArgument(BUFFER_ADDRESS_FIELD_OFFSET != 0); Field capacityField = Buffer.class.getDeclaredField("capacity"); BUFFER_CAPACITY_FIELD_OFFSET = UNSAFE.objectFieldOffset(capacityField); Preconditions.checkArgument(BUFFER_CAPACITY_FIELD_OFFSET != 0); } catch (ClassNotFoundException | NoSuchMethodException | NoSuchFieldException e) { throw new IllegalStateException(e); } } private static final ThreadLocal<ByteBuffer> localEmptyBuffer = ThreadLocal.withInitial(() -> { try { return (ByteBuffer) DBB_CONSTRUCTOR.newInstance(0, 0); } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { UNSAFE.throwException(e); } throw new IllegalStateException("unreachable"); }); /** * Wrap a buffer [address, address + size) as a DirectByteBuffer. */ public static ByteBuffer wrapDirectBuffer(long address, int size) { ByteBuffer buffer = localEmptyBuffer.get().duplicate(); UNSAFE.putLong(buffer, BUFFER_ADDRESS_FIELD_OFFSET, address); UNSAFE.putInt(buffer, BUFFER_CAPACITY_FIELD_OFFSET, size); buffer.clear(); return buffer; } /** * Wrap a buffer [address, address + size) into provided <code>buffer</code>. */ public static void wrapDirectBuffer(ByteBuffer buffer, long address, int size) { UNSAFE.putLong(buffer, BUFFER_ADDRESS_FIELD_OFFSET, address); UNSAFE.putInt(buffer, BUFFER_CAPACITY_FIELD_OFFSET, size); buffer.clear(); } /** * @param buffer a DirectBuffer backed by off-heap memory * @return address of off-heap memory */ public static long getAddress(ByteBuffer buffer) { return ((DirectBuffer) buffer).address(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/JobWorker.java
Java
package org.ray.streaming.runtime.worker; import java.io.Serializable; import java.util.Map; import org.ray.api.Ray; import org.ray.api.annotation.RayRemote; import org.ray.runtime.RayMultiWorkerNativeRuntime; import org.ray.runtime.functionmanager.JavaFunctionDescriptor; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.core.graph.ExecutionNode; import org.ray.streaming.runtime.core.graph.ExecutionNode.NodeType; import org.ray.streaming.runtime.core.graph.ExecutionTask; import org.ray.streaming.runtime.core.processor.OneInputProcessor; import org.ray.streaming.runtime.core.processor.ProcessBuilder; import org.ray.streaming.runtime.core.processor.SourceProcessor; import org.ray.streaming.runtime.core.processor.StreamProcessor; import org.ray.streaming.runtime.transfer.TransferHandler; import org.ray.streaming.runtime.util.EnvUtil; import org.ray.streaming.runtime.worker.context.WorkerContext; import org.ray.streaming.runtime.worker.tasks.OneInputStreamTask; import org.ray.streaming.runtime.worker.tasks.SourceStreamTask; import org.ray.streaming.runtime.worker.tasks.StreamTask; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * The stream job worker, it is a ray actor. */ @RayRemote public class JobWorker implements Serializable { private static final Logger LOGGER = LoggerFactory.getLogger(JobWorker.class); static { EnvUtil.loadNativeLibraries(); } private int taskId; private Map<String, String> config; private WorkerContext workerContext; private ExecutionNode executionNode; private ExecutionTask executionTask; private ExecutionGraph executionGraph; private StreamProcessor streamProcessor; private NodeType nodeType; private StreamTask task; private TransferHandler transferHandler; public Boolean init(WorkerContext workerContext) { this.workerContext = workerContext; this.taskId = workerContext.getTaskId(); this.config = workerContext.getConfig(); this.executionGraph = this.workerContext.getExecutionGraph(); this.executionTask = executionGraph.getExecutionTaskByTaskId(taskId); this.executionNode = executionGraph.getExecutionNodeByTaskId(taskId); this.nodeType = executionNode.getNodeType(); this.streamProcessor = ProcessBuilder .buildProcessor(executionNode.getStreamOperator()); LOGGER.debug("Initializing StreamWorker, taskId: {}, operator: {}.", taskId, streamProcessor); String channelType = (String) this.config.getOrDefault( Config.CHANNEL_TYPE, Config.DEFAULT_CHANNEL_TYPE); if (channelType.equals(Config.NATIVE_CHANNEL)) { transferHandler = new TransferHandler( getNativeCoreWorker(), new JavaFunctionDescriptor(JobWorker.class.getName(), "onWriterMessage", "([B)V"), new JavaFunctionDescriptor(JobWorker.class.getName(), "onWriterMessageSync", "([B)[B"), new JavaFunctionDescriptor(JobWorker.class.getName(), "onReaderMessage", "([B)V"), new JavaFunctionDescriptor(JobWorker.class.getName(), "onReaderMessageSync", "([B)[B")); } task = createStreamTask(); task.start(); return true; } private StreamTask createStreamTask() { if (streamProcessor instanceof OneInputProcessor) { return new OneInputStreamTask(taskId, streamProcessor, this); } else if (streamProcessor instanceof SourceProcessor) { return new SourceStreamTask(taskId, streamProcessor, this); } else { throw new RuntimeException("Unsupported type: " + streamProcessor); } } public int getTaskId() { return taskId; } public Map<String, String> getConfig() { return config; } public WorkerContext getWorkerContext() { return workerContext; } public NodeType getNodeType() { return nodeType; } public ExecutionNode getExecutionNode() { return executionNode; } public ExecutionTask getExecutionTask() { return executionTask; } public ExecutionGraph getExecutionGraph() { return executionGraph; } public StreamProcessor getStreamProcessor() { return streamProcessor; } public StreamTask getTask() { return task; } /** * Used by upstream streaming queue to send data to this actor */ public void onReaderMessage(byte[] buffer) { transferHandler.onReaderMessage(buffer); } /** * Used by upstream streaming queue to send data to this actor * and receive result from this actor */ public byte[] onReaderMessageSync(byte[] buffer) { return transferHandler.onReaderMessageSync(buffer); } /** * Used by downstream streaming queue to send data to this actor */ public void onWriterMessage(byte[] buffer) { transferHandler.onWriterMessage(buffer); } /** * Used by downstream streaming queue to send data to this actor * and receive result from this actor */ public byte[] onWriterMessageSync(byte[] buffer) { return transferHandler.onWriterMessageSync(buffer); } private static long getNativeCoreWorker() { long pointer = 0; if (Ray.internal() instanceof RayMultiWorkerNativeRuntime) { pointer = ((RayMultiWorkerNativeRuntime) Ray.internal()) .getCurrentRuntime().getNativeCoreWorkerPointer(); } return pointer; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/context/RayRuntimeContext.java
Java
package org.ray.streaming.runtime.worker.context; import static org.ray.streaming.util.Config.STREAMING_BATCH_MAX_COUNT; import java.util.Map; import org.ray.streaming.api.context.RuntimeContext; import org.ray.streaming.runtime.core.graph.ExecutionTask; /** * Use Ray to implement RuntimeContext. */ public class RayRuntimeContext implements RuntimeContext { private int taskId; private int taskIndex; private int parallelism; private Long batchId; private final Long maxBatch; private Map<String, String> config; public RayRuntimeContext(ExecutionTask executionTask, Map<String, String> config, int parallelism) { this.taskId = executionTask.getTaskId(); this.config = config; this.taskIndex = executionTask.getTaskIndex(); this.parallelism = parallelism; if (config.containsKey(STREAMING_BATCH_MAX_COUNT)) { this.maxBatch = Long.valueOf(config.get(STREAMING_BATCH_MAX_COUNT)); } else { this.maxBatch = Long.MAX_VALUE; } } @Override public int getTaskId() { return taskId; } @Override public int getTaskIndex() { return taskIndex; } @Override public int getParallelism() { return parallelism; } @Override public Long getBatchId() { return batchId; } @Override public Long getMaxBatch() { return maxBatch; } public void setBatchId(Long batchId) { this.batchId = batchId; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/context/WorkerContext.java
Java
package org.ray.streaming.runtime.worker.context; import java.io.Serializable; import java.util.Map; import org.ray.streaming.runtime.core.graph.ExecutionGraph; /** * Encapsulate the context information for worker initialization. */ public class WorkerContext implements Serializable { private int taskId; private ExecutionGraph executionGraph; private Map<String, String> config; public WorkerContext(int taskId, ExecutionGraph executionGraph, Map<String, String> jobConfig) { this.taskId = taskId; this.executionGraph = executionGraph; this.config = jobConfig; } public int getTaskId() { return taskId; } public void setTaskId(int taskId) { this.taskId = taskId; } public ExecutionGraph getExecutionGraph() { return executionGraph; } public void setExecutionGraph(ExecutionGraph executionGraph) { this.executionGraph = executionGraph; } public Map<String, String> getConfig() { return config; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/tasks/InputStreamTask.java
Java
package org.ray.streaming.runtime.worker.tasks; import org.ray.runtime.util.Serializer; import org.ray.streaming.runtime.core.processor.Processor; import org.ray.streaming.runtime.transfer.Message; import org.ray.streaming.runtime.worker.JobWorker; import org.ray.streaming.util.Config; public abstract class InputStreamTask extends StreamTask { private volatile boolean running = true; private volatile boolean stopped = false; private long readTimeoutMillis; public InputStreamTask(int taskId, Processor processor, JobWorker streamWorker) { super(taskId, processor, streamWorker); readTimeoutMillis = Long.parseLong((String) streamWorker.getConfig() .getOrDefault(Config.READ_TIMEOUT_MS, Config.DEFAULT_READ_TIMEOUT_MS)); } @Override protected void init() { } @Override public void run() { while (running) { Message item = reader.read(readTimeoutMillis); if (item != null) { byte[] bytes = new byte[item.body().remaining()]; item.body().get(bytes); Object obj = Serializer.decode(bytes); processor.process(obj); } } stopped = true; } @Override protected void cancelTask() throws Exception { running = false; while (!stopped) { } } @Override public String toString() { final StringBuilder sb = new StringBuilder("InputStreamTask{"); sb.append("taskId=").append(taskId); sb.append(", processor=").append(processor); sb.append('}'); return sb.toString(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/tasks/OneInputStreamTask.java
Java
package org.ray.streaming.runtime.worker.tasks; import org.ray.streaming.runtime.core.processor.Processor; import org.ray.streaming.runtime.worker.JobWorker; public class OneInputStreamTask<IN> extends InputStreamTask { public OneInputStreamTask(int taskId, Processor processor, JobWorker streamWorker) { super(taskId, processor, streamWorker); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/tasks/SourceStreamTask.java
Java
package org.ray.streaming.runtime.worker.tasks; import org.ray.streaming.runtime.core.processor.Processor; import org.ray.streaming.runtime.core.processor.SourceProcessor; import org.ray.streaming.runtime.worker.JobWorker; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class SourceStreamTask<IN> extends StreamTask { private static final Logger LOGGER = LoggerFactory.getLogger(SourceStreamTask.class); public SourceStreamTask(int taskId, Processor processor, JobWorker worker) { super(taskId, processor, worker); } @Override protected void init() { } @Override public void run() { final SourceProcessor<IN> sourceProcessor = (SourceProcessor<IN>) this.processor; sourceProcessor.run(); } @Override protected void cancelTask() throws Exception { } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/main/java/org/ray/streaming/runtime/worker/tasks/StreamTask.java
Java
package org.ray.streaming.runtime.worker.tasks; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.ray.api.Ray; import org.ray.api.RayActor; import org.ray.api.id.ActorId; import org.ray.streaming.api.collector.Collector; import org.ray.streaming.api.context.RuntimeContext; import org.ray.streaming.api.partition.Partition; import org.ray.streaming.runtime.core.collector.OutputCollector; import org.ray.streaming.runtime.core.graph.ExecutionEdge; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.core.graph.ExecutionNode; import org.ray.streaming.runtime.core.processor.Processor; import org.ray.streaming.runtime.transfer.ChannelID; import org.ray.streaming.runtime.transfer.DataReader; import org.ray.streaming.runtime.transfer.DataWriter; import org.ray.streaming.runtime.worker.JobWorker; import org.ray.streaming.runtime.worker.context.RayRuntimeContext; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public abstract class StreamTask implements Runnable { private static final Logger LOG = LoggerFactory.getLogger(StreamTask.class); protected int taskId; protected Processor processor; protected JobWorker worker; protected DataReader reader; private Map<ExecutionEdge, DataWriter> writers; private Thread thread; public StreamTask(int taskId, Processor processor, JobWorker worker) { this.taskId = taskId; this.processor = processor; this.worker = worker; prepareTask(); this.thread = new Thread(Ray.wrapRunnable(this), this.getClass().getName() + "-" + System.currentTimeMillis()); this.thread.setDaemon(true); } private void prepareTask() { Map<String, String> queueConf = new HashMap<>(); worker.getConfig().forEach((k, v) -> queueConf.put(k, String.valueOf(v))); String queueSize = (String) worker.getConfig() .getOrDefault(Config.CHANNEL_SIZE, Config.CHANNEL_SIZE_DEFAULT); queueConf.put(Config.CHANNEL_SIZE, queueSize); queueConf.put(Config.TASK_JOB_ID, Ray.getRuntimeContext().getCurrentJobId().toString()); String channelType = (String) worker.getConfig() .getOrDefault(Config.CHANNEL_TYPE, Config.MEMORY_CHANNEL); queueConf.put(Config.CHANNEL_TYPE, channelType); ExecutionGraph executionGraph = worker.getExecutionGraph(); ExecutionNode executionNode = worker.getExecutionNode(); // writers writers = new HashMap<>(); List<ExecutionEdge> outputEdges = executionNode.getOutputEdges(); List<Collector> collectors = new ArrayList<>(); for (ExecutionEdge edge : outputEdges) { Map<String, ActorId> outputActorIds = new HashMap<>(); Map<Integer, RayActor<JobWorker>> taskId2Worker = executionGraph .getTaskId2WorkerByNodeId(edge.getTargetNodeId()); taskId2Worker.forEach((targetTaskId, targetActor) -> { String queueName = ChannelID.genIdStr(taskId, targetTaskId, executionGraph.getBuildTime()); outputActorIds.put(queueName, targetActor.getId()); }); if (!outputActorIds.isEmpty()) { List<String> channelIDs = new ArrayList<>(); List<ActorId> toActorIds = new ArrayList<>(); outputActorIds.forEach((k, v) -> { channelIDs.add(k); toActorIds.add(v); }); DataWriter writer = new DataWriter(channelIDs, toActorIds, queueConf); LOG.info("Create DataWriter succeed."); writers.put(edge, writer); Partition partition = edge.getPartition(); collectors.add(new OutputCollector(channelIDs, writer, partition)); } } // consumer List<ExecutionEdge> inputEdges = executionNode.getInputsEdges(); Map<String, ActorId> inputActorIds = new HashMap<>(); for (ExecutionEdge edge : inputEdges) { Map<Integer, RayActor<JobWorker>> taskId2Worker = executionGraph .getTaskId2WorkerByNodeId(edge.getSrcNodeId()); taskId2Worker.forEach((srcTaskId, srcActor) -> { String queueName = ChannelID.genIdStr(srcTaskId, taskId, executionGraph.getBuildTime()); inputActorIds.put(queueName, srcActor.getId()); }); } if (!inputActorIds.isEmpty()) { List<String> channelIDs = new ArrayList<>(); List<ActorId> fromActorIds = new ArrayList<>(); inputActorIds.forEach((k, v) -> { channelIDs.add(k); fromActorIds.add(v); }); LOG.info("Register queue consumer, queues {}.", channelIDs); reader = new DataReader(channelIDs, fromActorIds, queueConf); } RuntimeContext runtimeContext = new RayRuntimeContext( worker.getExecutionTask(), worker.getConfig(), executionNode.getParallelism()); processor.open(collectors, runtimeContext); Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { // Make DataReader stop read data when MockQueue destructor gets called to avoid crash StreamTask.this.cancelTask(); } catch (Exception e) { e.printStackTrace(); } })); } protected abstract void init() throws Exception; protected abstract void cancelTask() throws Exception; public void start() { this.thread.start(); LOG.info("started {}-{}", this.getClass().getSimpleName(), taskId); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/BaseUnitTest.java
Java
package org.ray.streaming.runtime; import java.lang.reflect.Method; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; public abstract class BaseUnitTest { private static final Logger LOG = LoggerFactory.getLogger(BaseUnitTest.class); @BeforeClass public void setUp() { TestHelper.setUTFlag(); } @AfterClass public void tearDown() { TestHelper.clearUTFlag(); } @BeforeMethod public void testBegin(Method method) { LOG.info(">>>>>>>>>>>>>>>>>>>> Test case: " + method.getName() + " began >>>>>>>>>>>>>>>>>>>>"); } @AfterMethod public void testEnd(Method method) { LOG.info(">>>>>>>>>>>>>>>>>>>> Test case: " + method.getName() + " end >>>>>>>>>>>>>>>>>>"); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/TestHelper.java
Java
package org.ray.streaming.runtime; public class TestHelper { private static volatile boolean UT_FLAG = false; public static void setUTFlag() { UT_FLAG = true; } public static void clearUTFlag() { UT_FLAG = false; } public static boolean isUT() { return UT_FLAG; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/config/ConfigTest.java
Java
package org.ray.streaming.runtime.config; import java.util.HashMap; import java.util.Map; import org.aeonbits.owner.ConfigFactory; import org.nustaq.serialization.FSTConfiguration; import org.ray.streaming.runtime.BaseUnitTest; import org.ray.streaming.runtime.config.global.CommonConfig; import org.testng.Assert; import org.testng.annotations.Test; public class ConfigTest extends BaseUnitTest { @Test public void testBaseFunc() { // conf using CommonConfig commonConfig = ConfigFactory.create(CommonConfig.class); Assert.assertTrue(commonConfig.jobId().equals("default-job-id")); // override conf Map<String, String> customConf = new HashMap<>(); customConf.put(CommonConfig.JOB_ID, "111"); CommonConfig commonConfig2 = ConfigFactory.create(CommonConfig.class, customConf); Assert.assertTrue(commonConfig2.jobId().equals("111")); } @Test public void testMapTransformation() { Map<String, String> conf = new HashMap<>(); String testValue = "222"; conf.put(CommonConfig.JOB_ID, testValue); StreamingConfig config = new StreamingConfig(conf); Map<String, String> wholeConfigMap = config.getMap(); Assert.assertTrue(wholeConfigMap.get(CommonConfig.JOB_ID).equals(testValue)); } @Test public void testCustomConfKeeping() { Map<String, String> conf = new HashMap<>(); String customKey = "test_key"; String customValue = "test_value"; conf.put(customKey, customValue); StreamingConfig config = new StreamingConfig(conf); Assert.assertEquals(config.getMap().get(customKey), customValue); } @Test public void testSerialization() { Map<String, String> conf = new HashMap<>(); String customKey = "test_key"; String customValue = "test_value"; conf.put(customKey, customValue); StreamingConfig config = new StreamingConfig(conf); FSTConfiguration fstConf = FSTConfiguration.createDefaultConfiguration(); byte[] configBytes = fstConf.asByteArray(config); StreamingConfig deserializedConfig = (StreamingConfig) fstConf.asObject(configBytes); Assert.assertEquals(deserializedConfig.masterConfig.commonConfig.jobId(), "default-job-id"); Assert.assertEquals(deserializedConfig.getMap().get(customKey), customValue); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/demo/WordCountTest.java
Java
package org.ray.streaming.runtime.demo; import com.google.common.collect.ImmutableMap; import org.ray.streaming.api.context.StreamingContext; import org.ray.streaming.api.function.impl.FlatMapFunction; import org.ray.streaming.api.function.impl.ReduceFunction; import org.ray.streaming.api.function.impl.SinkFunction; import org.ray.streaming.api.stream.DataStreamSource; import org.ray.streaming.runtime.BaseUnitTest; import org.ray.streaming.util.Config; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.Test; public class WordCountTest extends BaseUnitTest implements Serializable { private static final Logger LOGGER = LoggerFactory.getLogger(WordCountTest.class); // TODO(zhenxuanpan): this test only works in single-process mode, because we put // results in this in-memory map. static Map<String, Integer> wordCount = new ConcurrentHashMap<>(); @Test public void testWordCount() { StreamingContext streamingContext = StreamingContext.buildContext(); Map<String, String> config = new HashMap<>(); config.put(Config.STREAMING_BATCH_MAX_COUNT, "1"); config.put(Config.CHANNEL_TYPE, Config.MEMORY_CHANNEL); streamingContext.withConfig(config); List<String> text = new ArrayList<>(); text.add("hello world eagle eagle eagle"); DataStreamSource<String> streamSource = DataStreamSource.buildSource(streamingContext, text); streamSource .flatMap((FlatMapFunction<String, WordAndCount>) (value, collector) -> { String[] records = value.split(" "); for (String record : records) { collector.collect(new WordAndCount(record, 1)); } }) .filter(pair -> !pair.word.contains("world")) .keyBy(pair -> pair.word) .reduce((ReduceFunction<WordAndCount>) (oldValue, newValue) -> new WordAndCount(oldValue.word, oldValue.count + newValue.count)) .sink((SinkFunction<WordAndCount>) result -> wordCount.put(result.word, result.count)); streamingContext.execute("testWordCount"); // Sleep until the count for every word is computed. while (wordCount.size() < 2) { try { Thread.sleep(100); } catch (InterruptedException e) { LOGGER.warn("Got an exception while sleeping.", e); } } Assert.assertEquals(wordCount, ImmutableMap.of("eagle", 3, "hello", 1)); } private static class WordAndCount implements Serializable { public final String word; public final Integer count; public WordAndCount(String key, Integer count) { this.word = key; this.count = count; } } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/schedule/TaskAssignerImplTest.java
Java
package org.ray.streaming.runtime.schedule; import java.util.ArrayList; import java.util.List; import com.google.common.collect.Lists; import org.ray.api.RayActor; import org.ray.api.id.ActorId; import org.ray.api.id.ObjectId; import org.ray.runtime.actor.LocalModeRayActor; import org.ray.streaming.api.context.StreamingContext; import org.ray.streaming.api.partition.impl.RoundRobinPartition; import org.ray.streaming.api.stream.DataStream; import org.ray.streaming.api.stream.DataStreamSink; import org.ray.streaming.api.stream.DataStreamSource; import org.ray.streaming.runtime.BaseUnitTest; import org.ray.streaming.runtime.core.graph.ExecutionEdge; import org.ray.streaming.runtime.core.graph.ExecutionGraph; import org.ray.streaming.runtime.core.graph.ExecutionNode; import org.ray.streaming.runtime.core.graph.ExecutionNode.NodeType; import org.ray.streaming.runtime.worker.JobWorker; import org.ray.streaming.jobgraph.JobGraph; import org.ray.streaming.jobgraph.JobGraphBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.Test; public class TaskAssignerImplTest extends BaseUnitTest { private static final Logger LOGGER = LoggerFactory.getLogger(TaskAssignerImplTest.class); @Test public void testTaskAssignImpl() { JobGraph jobGraph = buildDataSyncPlan(); List<RayActor<JobWorker>> workers = new ArrayList<>(); for(int i = 0; i < jobGraph.getJobVertexList().size(); i++) { workers.add(new LocalModeRayActor(ActorId.fromRandom(), ObjectId.fromRandom())); } TaskAssigner taskAssigner = new TaskAssignerImpl(); ExecutionGraph executionGraph = taskAssigner.assign(jobGraph, workers); List<ExecutionNode> executionNodeList = executionGraph.getExecutionNodeList(); Assert.assertEquals(executionNodeList.size(), 2); ExecutionNode sourceNode = executionNodeList.get(0); Assert.assertEquals(sourceNode.getNodeType(), NodeType.SOURCE); Assert.assertEquals(sourceNode.getExecutionTasks().size(), 1); Assert.assertEquals(sourceNode.getOutputEdges().size(), 1); List<ExecutionEdge> sourceExecutionEdges = sourceNode.getOutputEdges(); Assert.assertEquals(sourceExecutionEdges.size(), 1); ExecutionEdge source2Sink = sourceExecutionEdges.get(0); Assert.assertEquals(source2Sink.getPartition().getClass(), RoundRobinPartition.class); ExecutionNode sinkNode = executionNodeList.get(1); Assert.assertEquals(sinkNode.getNodeType(), NodeType.SINK); Assert.assertEquals(sinkNode.getExecutionTasks().size(), 1); Assert.assertEquals(sinkNode.getOutputEdges().size(), 0); } public JobGraph buildDataSyncPlan() { StreamingContext streamingContext = StreamingContext.buildContext(); DataStream<String> dataStream = DataStreamSource.buildSource(streamingContext, Lists.newArrayList("a", "b", "c")); DataStreamSink streamSink = dataStream.sink(x -> LOGGER.info(x)); JobGraphBuilder jobGraphBuilder = new JobGraphBuilder(Lists.newArrayList(streamSink)); JobGraph jobGraph = jobGraphBuilder.build(); return jobGraph; } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/streamingqueue/StreamingQueueTest.java
Java
package org.ray.streaming.runtime.streamingqueue; import com.google.common.collect.ImmutableMap; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.ray.api.Ray; import org.ray.api.RayActor; import org.ray.api.options.ActorCreationOptions; import org.ray.api.options.ActorCreationOptions.Builder; import org.ray.streaming.api.context.StreamingContext; import org.ray.streaming.api.function.impl.FlatMapFunction; import org.ray.streaming.api.function.impl.ReduceFunction; import org.ray.streaming.api.stream.DataStreamSource; import org.ray.streaming.runtime.BaseUnitTest; import org.ray.streaming.runtime.transfer.ChannelID; import org.ray.streaming.runtime.util.EnvUtil; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; public class StreamingQueueTest extends BaseUnitTest implements Serializable { private static Logger LOGGER = LoggerFactory.getLogger(StreamingQueueTest.class); static { EnvUtil.loadNativeLibraries(); } @org.testng.annotations.BeforeSuite public void suiteSetUp() throws Exception { LOGGER.info("Do set up"); String management = ManagementFactory.getRuntimeMXBean().getName(); String pid = management.split("@")[0]; LOGGER.info("StreamingQueueTest pid: {}", pid); LOGGER.info("java.library.path = {}", System.getProperty("java.library.path")); } @org.testng.annotations.AfterSuite public void suiteTearDown() throws Exception { LOGGER.warn("Do tear down"); } @BeforeClass public void setUp() { } @BeforeMethod void beforeMethod() { LOGGER.info("beforeTest"); Ray.shutdown(); System.setProperty("ray.resources", "CPU:4,RES-A:4"); System.setProperty("ray.raylet.config.num_workers_per_process_java", "1"); System.setProperty("ray.run-mode", "CLUSTER"); System.setProperty("ray.redirect-output", "true"); // ray init Ray.init(); } @AfterMethod void afterMethod() { LOGGER.info("afterTest"); Ray.shutdown(); System.clearProperty("ray.run-mode"); } @Test(timeOut = 3000000) public void testReaderWriter() { LOGGER.info("StreamingQueueTest.testReaderWriter run-mode: {}", System.getProperty("ray.run-mode")); Ray.shutdown(); System.setProperty("ray.resources", "CPU:4,RES-A:4"); System.setProperty("ray.raylet.config.num_workers_per_process_java", "1"); System.setProperty("ray.run-mode", "CLUSTER"); System.setProperty("ray.redirect-output", "true"); // ray init Ray.init(); ActorCreationOptions.Builder builder = new Builder(); RayActor<WriterWorker> writerActor = Ray.createActor(WriterWorker::new, "writer", builder.createActorCreationOptions()); RayActor<ReaderWorker> readerActor = Ray.createActor(ReaderWorker::new, "reader", builder.createActorCreationOptions()); LOGGER.info("call getName on writerActor: {}", Ray.call(WriterWorker::getName, writerActor).get()); LOGGER.info("call getName on readerActor: {}", Ray.call(ReaderWorker::getName, readerActor).get()); // LOGGER.info(Ray.call(WriterWorker::testCallReader, writerActor, readerActor).get()); List<String> outputQueueList = new ArrayList<>(); List<String> inputQueueList = new ArrayList<>(); int queueNum = 2; for (int i = 0; i < queueNum; ++i) { String qid = ChannelID.genRandomIdStr(); LOGGER.info("getRandomQueueId: {}", qid); inputQueueList.add(qid); outputQueueList.add(qid); readerActor.getId(); } final int msgCount = 100; Ray.call(ReaderWorker::init, readerActor, inputQueueList, writerActor, msgCount); try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } Ray.call(WriterWorker::init, writerActor, outputQueueList, readerActor, msgCount); long time = 0; while (time < 20000 && Ray.call(ReaderWorker::getTotalMsg, readerActor).get() < msgCount * queueNum) { try { Thread.sleep(1000); time += 1000; } catch (InterruptedException e) { e.printStackTrace(); } } Assert.assertEquals( Ray.call(ReaderWorker::getTotalMsg, readerActor).get().intValue(), msgCount * queueNum); } @Test(timeOut = 60000) public void testWordCount() { LOGGER.info("StreamingQueueTest.testWordCount run-mode: {}", System.getProperty("ray.run-mode")); String resultFile = "/tmp/org.ray.streaming.runtime.streamingqueue.testWordCount.txt"; deleteResultFile(resultFile); Map<String, Integer> wordCount = new ConcurrentHashMap<>(); StreamingContext streamingContext = StreamingContext.buildContext(); Map<String, String> config = new HashMap<>(); config.put(Config.STREAMING_BATCH_MAX_COUNT, "1"); config.put(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL); config.put(Config.CHANNEL_SIZE, "100000"); streamingContext.withConfig(config); List<String> text = new ArrayList<>(); text.add("hello world eagle eagle eagle"); DataStreamSource<String> streamSource = DataStreamSource.buildSource(streamingContext, text); streamSource .flatMap((FlatMapFunction<String, WordAndCount>) (value, collector) -> { String[] records = value.split(" "); for (String record : records) { collector.collect(new WordAndCount(record, 1)); } }) .keyBy(pair -> pair.word) .reduce((ReduceFunction<WordAndCount>) (oldValue, newValue) -> { LOGGER.info("reduce: {} {}", oldValue, newValue); return new WordAndCount(oldValue.word, oldValue.count + newValue.count); }) .sink(s -> { LOGGER.info("sink {} {}", s.word, s.count); wordCount.put(s.word, s.count); serializeResultToFile(resultFile, wordCount); }); streamingContext.execute("testWordCount"); Map<String, Integer> checkWordCount = (Map<String, Integer>) deserializeResultFromFile(resultFile); // Sleep until the count for every word is computed. while (checkWordCount == null || checkWordCount.size() < 3) { LOGGER.info("sleep"); try { Thread.sleep(1000); } catch (InterruptedException e) { LOGGER.warn("Got an exception while sleeping.", e); } checkWordCount = (Map<String, Integer>) deserializeResultFromFile(resultFile); } LOGGER.info("check"); Assert.assertEquals(checkWordCount, ImmutableMap.of("eagle", 3, "hello", 1, "world", 1)); } private void serializeResultToFile(String fileName, Object obj) { try { ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(fileName)); out.writeObject(obj); } catch (Exception e) { LOGGER.error(String.valueOf(e)); } } private Object deserializeResultFromFile(String fileName) { Map<String, Integer> checkWordCount = null; try { ObjectInputStream in = new ObjectInputStream(new FileInputStream(fileName)); checkWordCount = (Map<String, Integer>) in.readObject(); Assert.assertEquals(checkWordCount, ImmutableMap.of("eagle", 3, "hello", 1, "world", 1)); } catch (Exception e) { LOGGER.error(String.valueOf(e)); } return checkWordCount; } private static class WordAndCount implements Serializable { public final String word; public final Integer count; public WordAndCount(String key, Integer count) { this.word = key; this.count = count; } } private void deleteResultFile(String path) { File file = new File(path); file.deleteOnExit(); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/streamingqueue/Worker.java
Java
package org.ray.streaming.runtime.streamingqueue; import java.lang.management.ManagementFactory; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.ray.api.Ray; import org.ray.api.RayActor; import org.ray.api.annotation.RayRemote; import org.ray.api.id.ActorId; import org.ray.runtime.RayMultiWorkerNativeRuntime; import org.ray.runtime.actor.NativeRayActor; import org.ray.runtime.functionmanager.JavaFunctionDescriptor; import org.ray.streaming.runtime.transfer.ChannelID; import org.ray.streaming.runtime.transfer.DataMessage; import org.ray.streaming.runtime.transfer.DataReader; import org.ray.streaming.runtime.transfer.DataWriter; import org.ray.streaming.runtime.transfer.TransferHandler; import org.ray.streaming.util.Config; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.Assert; public class Worker { private static final Logger LOGGER = LoggerFactory.getLogger(Worker.class); protected TransferHandler transferHandler = null; public Worker() { transferHandler = new TransferHandler(((RayMultiWorkerNativeRuntime) Ray.internal()) .getCurrentRuntime().getNativeCoreWorkerPointer(), new JavaFunctionDescriptor(Worker.class.getName(), "onWriterMessage", "([B)V"), new JavaFunctionDescriptor(Worker.class.getName(), "onWriterMessageSync", "([B)[B"), new JavaFunctionDescriptor(Worker.class.getName(), "onReaderMessage", "([B)V"), new JavaFunctionDescriptor(Worker.class.getName(), "onReaderMessageSync", "([B)[B")); } public void onReaderMessage(byte[] buffer) { transferHandler.onReaderMessage(buffer); } public byte[] onReaderMessageSync(byte[] buffer) { return transferHandler.onReaderMessageSync(buffer); } public void onWriterMessage(byte[] buffer) { transferHandler.onWriterMessage(buffer); } public byte[] onWriterMessageSync(byte[] buffer) { return transferHandler.onWriterMessageSync(buffer); } } @RayRemote class ReaderWorker extends Worker { private static final Logger LOGGER = LoggerFactory.getLogger(ReaderWorker.class); private String name = null; private List<String> inputQueueList = null; private List<ActorId> inputActorIds = new ArrayList<>(); private DataReader dataReader = null; private long handler = 0; private RayActor peerActor = null; private int msgCount = 0; private int totalMsg = 0; public ReaderWorker(String name) { LOGGER.info("ReaderWorker constructor"); this.name = name; } public String getName() { String management = ManagementFactory.getRuntimeMXBean().getName(); String pid = management.split("@")[0]; LOGGER.info("pid: {} name: {}", pid, name); return name; } public String testRayCall() { LOGGER.info("testRayCall called"); return "testRayCall"; } public boolean init(List<String> inputQueueList, RayActor peer, int msgCount) { this.inputQueueList = inputQueueList; this.peerActor = peer; this.msgCount = msgCount; LOGGER.info("ReaderWorker init"); LOGGER.info("java.library.path = {}", System.getProperty("java.library.path")); for (String queue : this.inputQueueList) { inputActorIds.add(this.peerActor.getId()); LOGGER.info("ReaderWorker actorId: {}", this.peerActor.getId()); } Map<String, String> conf = new HashMap<>(); conf.put(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL); conf.put(Config.CHANNEL_SIZE, "100000"); conf.put(Config.STREAMING_JOB_NAME, "integrationTest1"); dataReader = new DataReader(inputQueueList, inputActorIds, conf); // Should not GetBundle in RayCall thread Thread readThread = new Thread(Ray.wrapRunnable(new Runnable() { @Override public void run() { consume(); } })); readThread.start(); LOGGER.info("ReaderWorker init done"); return true; } public final void consume() { int checkPointId = 1; for (int i = 0; i < msgCount * inputQueueList.size(); ++i) { DataMessage dataMessage = dataReader.read(100); if (dataMessage == null) { LOGGER.error("dataMessage is null"); i--; continue; } int bufferSize = dataMessage.body().remaining(); int dataSize = dataMessage.body().getInt(); // check size LOGGER.info("capacity {} bufferSize {} dataSize {}", dataMessage.body().capacity(), bufferSize, dataSize); Assert.assertEquals(bufferSize, dataSize); if (dataMessage instanceof DataMessage) { if (LOGGER.isInfoEnabled()) { LOGGER.info("{} : {} message.", i, dataMessage.toString()); } // check content for (int j = 0; j < dataSize - 4; ++j) { Assert.assertEquals(dataMessage.body().get(), (byte) j); } } else { LOGGER.error("unknown message type"); Assert.fail(); } totalMsg++; } LOGGER.info("ReaderWorker consume data done."); } void onQueueTransfer(long handler, byte[] buffer) { } public boolean done() { return totalMsg == msgCount; } public int getTotalMsg() { return totalMsg; } } @RayRemote class WriterWorker extends Worker { private static final Logger LOGGER = LoggerFactory.getLogger(WriterWorker.class); private String name = null; private List<String> outputQueueList = null; private List<ActorId> outputActorIds = new ArrayList<>(); DataWriter dataWriter = null; RayActor peerActor = null; int msgCount = 0; public WriterWorker(String name) { this.name = name; } public String getName() { String management = ManagementFactory.getRuntimeMXBean().getName(); String pid = management.split("@")[0]; LOGGER.info("pid: {} name: {}", pid, name); return name; } public String testCallReader(RayActor readerActor) { String name = (String) Ray.call(ReaderWorker::getName, readerActor).get(); LOGGER.info("testCallReader: {}", name); return name; } public boolean init(List<String> outputQueueList, RayActor peer, int msgCount) { this.outputQueueList = outputQueueList; this.peerActor = peer; this.msgCount = msgCount; LOGGER.info("WriterWorker init:"); for (String queue : this.outputQueueList) { outputActorIds.add(this.peerActor.getId()); LOGGER.info("WriterWorker actorId: {}", this.peerActor.getId()); } LOGGER.info("Peer isDirectActorCall: {}", ((NativeRayActor) peer).isDirectCallActor()); int count = 3; while (count-- != 0) { Ray.call(ReaderWorker::testRayCall, peer).get(); } try { Thread.sleep(2 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } Map<String, String> conf = new HashMap<>(); conf.put(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL); conf.put(Config.CHANNEL_SIZE, "100000"); conf.put(Config.STREAMING_JOB_NAME, "integrationTest1"); dataWriter = new DataWriter(this.outputQueueList, this.outputActorIds, conf); Thread writerThread = new Thread(Ray.wrapRunnable(new Runnable() { @Override public void run() { produce(); } })); writerThread.start(); LOGGER.info("WriterWorker init done"); return true; } public final void produce() { int checkPointId = 1; Random random = new Random(); this.msgCount = 100; for (int i = 0; i < this.msgCount; ++i) { for (int j = 0; j < outputQueueList.size(); ++j) { LOGGER.info("WriterWorker produce"); int dataSize = (random.nextInt(100)) + 10; if (LOGGER.isInfoEnabled()) { LOGGER.info("dataSize: {}", dataSize); } ByteBuffer bb = ByteBuffer.allocate(dataSize); bb.putInt(dataSize); for (int k = 0; k < dataSize - 4; ++k) { bb.put((byte) k); } bb.clear(); ChannelID qid = ChannelID.from(outputQueueList.get(j)); dataWriter.write(qid, bb); } } try { Thread.sleep(20 * 1000); } catch (InterruptedException e) { e.printStackTrace(); } } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/streaming-runtime/src/test/java/org/ray/streaming/runtime/transfer/ChannelIDTest.java
Java
package org.ray.streaming.runtime.transfer; import static org.testng.Assert.assertEquals; import org.ray.streaming.runtime.BaseUnitTest; import org.ray.streaming.runtime.util.EnvUtil; import org.testng.annotations.Test; public class ChannelIDTest extends BaseUnitTest { static { EnvUtil.loadNativeLibraries(); } @Test public void testIdStrToBytes() { String idStr = ChannelID.genRandomIdStr(); assertEquals(idStr.length(), ChannelID.ID_LENGTH * 2); assertEquals(ChannelID.idStrToBytes(idStr).length, ChannelID.ID_LENGTH); } }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/java/test.sh
Shell
#!/usr/bin/env bash # Cause the script to exit if a single command fails. set -e # Show explicitly which commands are currently running. set -x ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd) run_testng() { "$@" || exit_code=$? # exit_code == 2 means there are skipped tests. if [ $exit_code -ne 2 ] && [ $exit_code -ne 0 ] ; then exit $exit_code fi } echo "build ray streaming" bazel build //streaming/java:all echo "Linting Java code with checkstyle." bazel test //streaming/java:all --test_tag_filters="checkstyle" --build_tests_only echo "Running streaming tests." run_testng java -cp $ROOT_DIR/../../bazel-bin/streaming/java/all_streaming_tests_deploy.jar\ org.testng.TestNG -d /tmp/ray_streaming_java_test_output $ROOT_DIR/testng.xml echo "Streaming TestNG results" cat /tmp/ray_streaming_java_test_output/testng-results.xml echo "Testing maven install." cd $ROOT_DIR/../../java echo "build ray maven deps" bazel build gen_maven_deps echo "maven install ray" mvn clean install -DskipTests cd $ROOT_DIR echo "maven install ray streaming" mvn clean install -DskipTests
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/__init__.py
Python
# flake8: noqa # Ray should be imported before streaming import ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/_streaming.pyx
Cython
# cython: profile=False # distutils: language = c++ # cython: embedsignature = True # cython: language_level = 3 include "includes/transfer.pxi"
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/communication.py
Python
import hashlib import logging import pickle import sys import time import ray import ray.streaming.runtime.transfer as transfer from ray.streaming.config import Config from ray.streaming.operator import PStrategy from ray.streaming.runtime.transfer import ChannelID logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) # Forward and broadcast stream partitioning strategies forward_broadcast_strategies = [PStrategy.Forward, PStrategy.Broadcast] # Used to choose output channel in case of hash-based shuffling def _hash(value): if isinstance(value, int): return value try: return int(hashlib.sha1(value.encode("utf-8")).hexdigest(), 16) except AttributeError: return int(hashlib.sha1(value).hexdigest(), 16) class DataChannel: """A data channel for actor-to-actor communication. Attributes: env (Environment): The environment the channel belongs to. src_operator_id (UUID): The id of the source operator of the channel. src_instance_index (int): The id of the source instance. dst_operator_id (UUID): The id of the destination operator of the channel. dst_instance_index (int): The id of the destination instance. """ def __init__(self, src_operator_id, src_instance_index, dst_operator_id, dst_instance_index, str_qid): self.src_operator_id = src_operator_id self.src_instance_index = src_instance_index self.dst_operator_id = dst_operator_id self.dst_instance_index = dst_instance_index self.str_qid = str_qid self.qid = ChannelID(str_qid) def __repr__(self): return "(src({},{}),dst({},{}), qid({}))".format( self.src_operator_id, self.src_instance_index, self.dst_operator_id, self.dst_instance_index, self.str_qid) _CLOSE_FLAG = b" " # Pulls and merges data from multiple input channels class DataInput: """An input gate of an operator instance. The input gate pulls records from all input channels in a round-robin fashion. Attributes: input_channels (list): The list of input channels. channel_index (int): The index of the next channel to pull from. max_index (int): The number of input channels. closed (list): A list of flags indicating whether an input channel has been marked as 'closed'. all_closed (bool): Denotes whether all input channels have been closed (True) or not (False). """ def __init__(self, env, channels): assert len(channels) > 0 self.env = env self.reader = None # created in `init` method self.input_channels = channels self.channel_index = 0 self.max_index = len(channels) # Tracks the channels that have been closed. qid: close status self.closed = {} def init(self): channels = [c.str_qid for c in self.input_channels] input_actors = [] for c in self.input_channels: actor = self.env.execution_graph.get_actor(c.src_operator_id, c.src_instance_index) input_actors.append(actor) logger.info("DataInput input_actors %s", input_actors) conf = { Config.TASK_JOB_ID: ray.runtime_context._get_runtime_context() .current_driver_id, Config.CHANNEL_TYPE: self.env.config.channel_type } self.reader = transfer.DataReader(channels, input_actors, conf) def pull(self): # pull from channel item = self.reader.read(100) while item is None: time.sleep(0.001) item = self.reader.read(100) msg_data = item.body() if msg_data == _CLOSE_FLAG: self.closed[item.channel_id] = True if len(self.closed) == len(self.input_channels): return None else: return self.pull() else: return pickle.loads(msg_data) def close(self): self.reader.stop() # Selects output channel(s) and pushes data class DataOutput: """An output gate of an operator instance. The output gate pushes records to output channels according to the user-defined partitioning scheme. Attributes: partitioning_schemes (dict): A mapping from destination operator ids to partitioning schemes (see: PScheme in operator.py). forward_channels (list): A list of channels to forward records. shuffle_channels (list(list)): A list of output channels to shuffle records grouped by destination operator. shuffle_key_channels (list(list)): A list of output channels to shuffle records by a key grouped by destination operator. shuffle_exists (bool): A flag indicating that there exists at least one shuffle_channel. shuffle_key_exists (bool): A flag indicating that there exists at least one shuffle_key_channel. """ def __init__(self, env, channels, partitioning_schemes): assert len(channels) > 0 self.env = env self.writer = None # created in `init` method self.channels = channels self.key_selector = None self.round_robin_indexes = [0] self.partitioning_schemes = partitioning_schemes # Prepare output -- collect channels by type self.forward_channels = [] # Forward and broadcast channels slots = sum(1 for scheme in self.partitioning_schemes.values() if scheme.strategy == PStrategy.RoundRobin) self.round_robin_channels = [[]] * slots # RoundRobin channels self.round_robin_indexes = [-1] * slots slots = sum(1 for scheme in self.partitioning_schemes.values() if scheme.strategy == PStrategy.Shuffle) # Flag used to avoid hashing when there is no shuffling self.shuffle_exists = slots > 0 self.shuffle_channels = [[]] * slots # Shuffle channels slots = sum(1 for scheme in self.partitioning_schemes.values() if scheme.strategy == PStrategy.ShuffleByKey) # Flag used to avoid hashing when there is no shuffling by key self.shuffle_key_exists = slots > 0 self.shuffle_key_channels = [[]] * slots # Shuffle by key channels # Distinct shuffle destinations shuffle_destinations = {} # Distinct shuffle by key destinations shuffle_by_key_destinations = {} # Distinct round robin destinations round_robin_destinations = {} index_1 = 0 index_2 = 0 index_3 = 0 for channel in channels: p_scheme = self.partitioning_schemes[channel.dst_operator_id] strategy = p_scheme.strategy if strategy in forward_broadcast_strategies: self.forward_channels.append(channel) elif strategy == PStrategy.Shuffle: pos = shuffle_destinations.setdefault(channel.dst_operator_id, index_1) self.shuffle_channels[pos].append(channel) if pos == index_1: index_1 += 1 elif strategy == PStrategy.ShuffleByKey: pos = shuffle_by_key_destinations.setdefault( channel.dst_operator_id, index_2) self.shuffle_key_channels[pos].append(channel) if pos == index_2: index_2 += 1 elif strategy == PStrategy.RoundRobin: pos = round_robin_destinations.setdefault( channel.dst_operator_id, index_3) self.round_robin_channels[pos].append(channel) if pos == index_3: index_3 += 1 else: # TODO (john): Add support for other strategies sys.exit("Unrecognized or unsupported partitioning strategy.") # A KeyedDataStream can only be shuffled by key assert not (self.shuffle_exists and self.shuffle_key_exists) def init(self): """init DataOutput which creates DataWriter""" channel_ids = [c.str_qid for c in self.channels] to_actors = [] for c in self.channels: actor = self.env.execution_graph.get_actor(c.dst_operator_id, c.dst_instance_index) to_actors.append(actor) logger.info("DataOutput output_actors %s", to_actors) conf = { Config.TASK_JOB_ID: ray.runtime_context._get_runtime_context() .current_driver_id, Config.CHANNEL_TYPE: self.env.config.channel_type } self.writer = transfer.DataWriter(channel_ids, to_actors, conf) def close(self): """Close the channel (True) by propagating _CLOSE_FLAG _CLOSE_FLAG is used as special type of record that is propagated from sources to sink to notify that the end of data in a stream. """ for c in self.channels: self.writer.write(c.qid, _CLOSE_FLAG) # must ensure DataWriter send None flag to peer actor self.writer.stop() def push(self, record): target_channels = [] # Forward record for c in self.forward_channels: logger.debug("[writer] Push record '{}' to channel {}".format( record, c)) target_channels.append(c) # Forward record index = 0 for channels in self.round_robin_channels: self.round_robin_indexes[index] += 1 if self.round_robin_indexes[index] == len(channels): self.round_robin_indexes[index] = 0 # Reset index c = channels[self.round_robin_indexes[index]] logger.debug("[writer] Push record '{}' to channel {}".format( record, c)) target_channels.append(c) index += 1 # Hash-based shuffling by key if self.shuffle_key_exists: key, _ = record h = _hash(key) for channels in self.shuffle_key_channels: num_instances = len(channels) # Downstream instances c = channels[h % num_instances] logger.debug( "[key_shuffle] Push record '{}' to channel {}".format( record, c)) target_channels.append(c) elif self.shuffle_exists: # Hash-based shuffling per destination h = _hash(record) for channels in self.shuffle_channels: num_instances = len(channels) # Downstream instances c = channels[h % num_instances] logger.debug("[shuffle] Push record '{}' to channel {}".format( record, c)) target_channels.append(c) else: # TODO (john): Handle rescaling pass msg_data = pickle.dumps(record) for c in target_channels: # send data to channel self.writer.write(c.qid, msg_data) def push_all(self, records): for record in records: self.push(record)
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/config.py
Python
class Config: STREAMING_JOB_NAME = "streaming.job.name" STREAMING_OP_NAME = "streaming.op_name" TASK_JOB_ID = "streaming.task_job_id" STREAMING_WORKER_NAME = "streaming.worker_name" # channel CHANNEL_TYPE = "channel_type" MEMORY_CHANNEL = "memory_channel" NATIVE_CHANNEL = "native_channel" CHANNEL_SIZE = "channel_size" CHANNEL_SIZE_DEFAULT = 10**8 IS_RECREATE = "streaming.is_recreate" # return from StreamingReader.getBundle if only empty message read in this # interval. TIMER_INTERVAL_MS = "timer_interval_ms" STREAMING_RING_BUFFER_CAPACITY = "streaming.ring_buffer_capacity" # write an empty message if there is no data to be written in this # interval. STREAMING_EMPTY_MESSAGE_INTERVAL = "streaming.empty_message_interval" # operator type OPERATOR_TYPE = "operator_type"
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/examples/key_selectors.py
Python
import argparse import logging import time import ray from ray.streaming.streaming import Environment logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument("--input-file", required=True, help="the input text file") # A class used to check attribute-based key selection class Record: def __init__(self, record): k, _ = record self.word = k self.record = record # Splits input line into words and outputs objects of type Record # each one consisting of a key (word) and a tuple (word,1) def splitter(line): records = [] words = line.split() for w in words: records.append(Record((w, 1))) return records # Receives an object of type Record and returns the actual tuple def as_tuple(record): return record.record if __name__ == "__main__": # Get program parameters args = parser.parse_args() input_file = str(args.input_file) ray.init() ray.register_custom_serializer(Record, use_dict=True) # A Ray streaming environment with the default configuration env = Environment() env.set_parallelism(2) # Each operator will be executed by two actors # 'key_by("word")' physically partitions the stream of records # based on the hash value of the 'word' attribute (see Record class above) # 'map(as_tuple)' maps a record of type Record into a tuple # 'sum(1)' sums the 2nd element of the tuple, i.e. the word count stream = env.read_text_file(input_file) \ .round_robin() \ .flat_map(splitter) \ .key_by("word") \ .map(as_tuple) \ .sum(1) \ .inspect(print) # Prints the content of the # stream to stdout start = time.time() env_handle = env.execute() # Deploys and executes the dataflow ray.get(env_handle) # Stay alive until execution finishes end = time.time() logger.info("Elapsed time: {} secs".format(end - start)) logger.debug("Output stream id: {}".format(stream.id))
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/examples/simple.py
Python
import argparse import logging import time import ray from ray.streaming.config import Config from ray.streaming.streaming import Environment, Conf logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument("--input-file", required=True, help="the input text file") # Test functions def splitter(line): return line.split() def filter_fn(word): if "f" in word: return True return False if __name__ == "__main__": args = parser.parse_args() ray.init(local_mode=False) # A Ray streaming environment with the default configuration env = Environment(config=Conf(channel_type=Config.NATIVE_CHANNEL)) # Stream represents the ouput of the filter and # can be forked into other dataflows stream = env.read_text_file(args.input_file) \ .shuffle() \ .flat_map(splitter) \ .set_parallelism(2) \ .filter(filter_fn) \ .set_parallelism(2) \ .inspect(lambda x: print("result", x)) # Prints the contents of the # stream to stdout start = time.time() env_handle = env.execute() ray.get(env_handle) # Stay alive until execution finishes env.wait_finish() end = time.time() logger.info("Elapsed time: {} secs".format(end - start)) logger.debug("Output stream id: {}".format(stream.id))
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/examples/wordcount.py
Python
import argparse import logging import time import ray import wikipedia from ray.streaming.streaming import Environment logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( "--titles-file", required=True, help="the file containing the wikipedia titles to lookup") # A custom data source that reads articles from wikipedia # Custom data sources need to implement a get_next() method # that returns the next data element, in this case sentences class Wikipedia: def __init__(self, title_file): # Titles in this file will be as queries self.title_file = title_file # TODO (john): Handle possible exception here self.title_reader = iter(list(open(self.title_file, "r").readlines())) self.done = False self.article_done = True self.sentences = iter([]) # Returns next sentence from a wikipedia article def get_next(self): if self.done: return None # Source exhausted while True: if self.article_done: try: # Try next title next_title = next(self.title_reader) except StopIteration: self.done = True # Source exhausted return None # Get next article logger.debug("Next article: {}".format(next_title)) article = wikipedia.page(next_title).content # Split article in sentences self.sentences = iter(article.split(".")) self.article_done = False try: # Try next sentence sentence = next(self.sentences) logger.debug("Next sentence: {}".format(sentence)) return sentence except StopIteration: self.article_done = True # Splits input line into words and # outputs records of the form (word,1) def splitter(line): records = [] words = line.split() for w in words: records.append((w, 1)) return records # Returns the first attribute of a tuple def key_selector(tuple): return tuple[0] # Returns the second attribute of a tuple def attribute_selector(tuple): return tuple[1] if __name__ == "__main__": # Get program parameters args = parser.parse_args() titles_file = str(args.titles_file) ray.init() # A Ray streaming environment with the default configuration env = Environment() env.set_parallelism(2) # Each operator will be executed by two actors # The following dataflow is a simple streaming wordcount # with a rolling sum operator. # It reads articles from wikipedia, splits them in words, # shuffles words, and counts the occurences of each word. stream = env.source(Wikipedia(titles_file)) \ .round_robin() \ .flat_map(splitter) \ .key_by(key_selector) \ .sum(attribute_selector) \ .inspect(print) # Prints the contents of the # stream to stdout start = time.time() env_handle = env.execute() # Deploys and executes the dataflow ray.get(env_handle) # Stay alive until execution finishes env.wait_finish() end = time.time() logger.info("Elapsed time: {} secs".format(end - start)) logger.debug("Output stream id: {}".format(stream.id))
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/includes/libstreaming.pxd
Cython
# cython: profile=False # distutils: language = c++ # cython: embedsignature = True # cython: language_level = 3 # flake8: noqa from libc.stdint cimport * from libcpp cimport bool as c_bool from libcpp.memory cimport shared_ptr from libcpp.vector cimport vector as c_vector from libcpp.list cimport list as c_list from cpython cimport PyObject cimport cpython cdef inline object PyObject_to_object(PyObject* o): # Cast to "object" increments reference count cdef object result = <object> o cpython.Py_DECREF(result) return result from ray.includes.common cimport ( CLanguage, CRayObject, CRayStatus, CRayFunction ) from ray.includes.unique_ids cimport ( CActorID, CJobID, CTaskID, CObjectID, ) from ray.includes.libcoreworker cimport CCoreWorker cdef extern from "status.h" namespace "ray::streaming" nogil: cdef cppclass CStreamingStatus "ray::streaming::StreamingStatus": pass cdef CStreamingStatus StatusOK "ray::streaming::StreamingStatus::OK" cdef CStreamingStatus StatusReconstructTimeOut "ray::streaming::StreamingStatus::ReconstructTimeOut" cdef CStreamingStatus StatusQueueIdNotFound "ray::streaming::StreamingStatus::QueueIdNotFound" cdef CStreamingStatus StatusResubscribeFailed "ray::streaming::StreamingStatus::ResubscribeFailed" cdef CStreamingStatus StatusEmptyRingBuffer "ray::streaming::StreamingStatus::EmptyRingBuffer" cdef CStreamingStatus StatusFullChannel "ray::streaming::StreamingStatus::FullChannel" cdef CStreamingStatus StatusNoSuchItem "ray::streaming::StreamingStatus::NoSuchItem" cdef CStreamingStatus StatusInitQueueFailed "ray::streaming::StreamingStatus::InitQueueFailed" cdef CStreamingStatus StatusGetBundleTimeOut "ray::streaming::StreamingStatus::GetBundleTimeOut" cdef CStreamingStatus StatusSkipSendEmptyMessage "ray::streaming::StreamingStatus::SkipSendEmptyMessage" cdef CStreamingStatus StatusInterrupted "ray::streaming::StreamingStatus::Interrupted" cdef CStreamingStatus StatusWaitQueueTimeOut "ray::streaming::StreamingStatus::WaitQueueTimeOut" cdef CStreamingStatus StatusOutOfMemory "ray::streaming::StreamingStatus::OutOfMemory" cdef CStreamingStatus StatusInvalid "ray::streaming::StreamingStatus::Invalid" cdef CStreamingStatus StatusUnknownError "ray::streaming::StreamingStatus::UnknownError" cdef CStreamingStatus StatusTailStatus "ray::streaming::StreamingStatus::TailStatus" cdef cppclass CStreamingCommon "ray::streaming::StreamingCommon": void SetConfig(const uint8_t *, uint32_t size) cdef extern from "runtime_context.h" namespace "ray::streaming" nogil: cdef cppclass CRuntimeContext "ray::streaming::RuntimeContext": CRuntimeContext() void SetConfig(const uint8_t *data, uint32_t size) inline void MarkMockTest() inline c_bool IsMockTest() cdef extern from "message/message.h" namespace "ray::streaming" nogil: cdef cppclass CStreamingMessageType "ray::streaming::StreamingMessageType": pass cdef CStreamingMessageType MessageTypeBarrier "ray::streaming::StreamingMessageType::Barrier" cdef CStreamingMessageType MessageTypeMessage "ray::streaming::StreamingMessageType::Message" cdef cppclass CStreamingMessage "ray::streaming::StreamingMessage": inline uint8_t *RawData() const inline uint32_t GetDataSize() const inline CStreamingMessageType GetMessageType() const inline uint64_t GetMessageSeqId() const cdef extern from "message/message_bundle.h" namespace "ray::streaming" nogil: cdef cppclass CStreamingMessageBundleType "ray::streaming::StreamingMessageBundleType": pass cdef CStreamingMessageBundleType BundleTypeEmpty "ray::streaming::StreamingMessageBundleType::Empty" cdef CStreamingMessageBundleType BundleTypeBarrier "ray::streaming::StreamingMessageBundleType::Barrier" cdef CStreamingMessageBundleType BundleTypeBundle "ray::streaming::StreamingMessageBundleType::Bundle" cdef cppclass CStreamingMessageBundleMeta "ray::streaming::StreamingMessageBundleMeta": CStreamingMessageBundleMeta() inline uint64_t GetMessageBundleTs() const inline uint64_t GetLastMessageId() const inline uint32_t GetMessageListSize() const inline CStreamingMessageBundleType GetBundleType() const inline c_bool IsBarrier() inline c_bool IsBundle() ctypedef shared_ptr[CStreamingMessageBundleMeta] CStreamingMessageBundleMetaPtr uint32_t kMessageBundleHeaderSize "ray::streaming::kMessageBundleHeaderSize" cdef cppclass CStreamingMessageBundle "ray::streaming::StreamingMessageBundle"(CStreamingMessageBundleMeta): @staticmethod void GetMessageListFromRawData(const uint8_t *data, uint32_t size, uint32_t msg_nums, c_list[shared_ptr[CStreamingMessage]] &msg_list); cdef extern from "queue/queue_client.h" namespace "ray::streaming" nogil: cdef cppclass CReaderClient "ray::streaming::ReaderClient": CReaderClient(CCoreWorker *core_worker, CRayFunction &async_func, CRayFunction &sync_func) void OnReaderMessage(shared_ptr[CLocalMemoryBuffer] buffer); shared_ptr[CLocalMemoryBuffer] OnReaderMessageSync(shared_ptr[CLocalMemoryBuffer] buffer); cdef cppclass CWriterClient "ray::streaming::WriterClient": CWriterClient(CCoreWorker *core_worker, CRayFunction &async_func, CRayFunction &sync_func) void OnWriterMessage(shared_ptr[CLocalMemoryBuffer] buffer); shared_ptr[CLocalMemoryBuffer] OnWriterMessageSync(shared_ptr[CLocalMemoryBuffer] buffer); cdef extern from "data_reader.h" namespace "ray::streaming" nogil: cdef cppclass CDataBundle "ray::streaming::DataBundle": uint8_t *data uint32_t data_size CObjectID c_from "from" uint64_t seq_id CStreamingMessageBundleMetaPtr meta cdef cppclass CDataReader "ray::streaming::DataReader"(CStreamingCommon): CDataReader(shared_ptr[CRuntimeContext] &runtime_context) void Init(const c_vector[CObjectID] &input_ids, const c_vector[CActorID] &actor_ids, const c_vector[uint64_t] &seq_ids, const c_vector[uint64_t] &msg_ids, int64_t timer_interval); CStreamingStatus GetBundle(const uint32_t timeout_ms, shared_ptr[CDataBundle] &message) void Stop() cdef extern from "data_writer.h" namespace "ray::streaming" nogil: cdef cppclass CDataWriter "ray::streaming::DataWriter"(CStreamingCommon): CDataWriter(shared_ptr[CRuntimeContext] &runtime_context) CStreamingStatus Init(const c_vector[CObjectID] &channel_ids, const c_vector[CActorID] &actor_ids, const c_vector[uint64_t] &message_ids, const c_vector[uint64_t] &queue_size_vec); long WriteMessageToBufferRing( const CObjectID &q_id, uint8_t *data, uint32_t data_size) void Run() void Stop() cdef extern from "ray/common/buffer.h" nogil: cdef cppclass CLocalMemoryBuffer "ray::LocalMemoryBuffer": uint8_t *Data() const size_t Size() const
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/jobworker.py
Python
import logging import pickle import threading import ray import ray.streaming._streaming as _streaming from ray.streaming.config import Config from ray.function_manager import FunctionDescriptor from ray.streaming.communication import DataInput, DataOutput logger = logging.getLogger(__name__) @ray.remote class JobWorker: """A streaming job worker. Attributes: worker_id: The id of the instance. input_channels: The input gate that manages input channels of the instance (see: DataInput in communication.py). output_channels (DataOutput): The output gate that manages output channels of the instance (see: DataOutput in communication.py). the operator instance. """ def __init__(self, worker_id, operator, input_channels, output_channels): self.env = None self.worker_id = worker_id self.operator = operator processor_name = operator.processor_class.__name__ processor_instance = operator.processor_class(operator) self.processor_name = processor_name self.processor_instance = processor_instance self.input_channels = input_channels self.output_channels = output_channels self.input_gate = None self.output_gate = None self.reader_client = None self.writer_client = None def init(self, env): """init streaming actor""" env = pickle.loads(env) self.env = env logger.info("init operator instance %s", self.processor_name) if env.config.channel_type == Config.NATIVE_CHANNEL: core_worker = ray.worker.global_worker.core_worker reader_async_func = FunctionDescriptor( __name__, self.on_reader_message.__name__, self.__class__.__name__) reader_sync_func = FunctionDescriptor( __name__, self.on_reader_message_sync.__name__, self.__class__.__name__) self.reader_client = _streaming.ReaderClient( core_worker, reader_async_func, reader_sync_func) writer_async_func = FunctionDescriptor( __name__, self.on_writer_message.__name__, self.__class__.__name__) writer_sync_func = FunctionDescriptor( __name__, self.on_writer_message_sync.__name__, self.__class__.__name__) self.writer_client = _streaming.WriterClient( core_worker, writer_async_func, writer_sync_func) if len(self.input_channels) > 0: self.input_gate = DataInput(env, self.input_channels) self.input_gate.init() if len(self.output_channels) > 0: self.output_gate = DataOutput( env, self.output_channels, self.operator.partitioning_strategies) self.output_gate.init() logger.info("init operator instance %s succeed", self.processor_name) return True # Starts the actor def start(self): self.t = threading.Thread(target=self.run, daemon=True) self.t.start() actor_id = ray.worker.global_worker.actor_id logger.info("%s %s started, actor id %s", self.__class__.__name__, self.processor_name, actor_id) def run(self): logger.info("%s start running", self.processor_name) self.processor_instance.run(self.input_gate, self.output_gate) logger.info("%s finished running", self.processor_name) self.close() def close(self): if self.input_gate: self.input_gate.close() if self.output_gate: self.output_gate.close() def is_finished(self): return not self.t.is_alive() def on_reader_message(self, buffer: bytes): """used in direct call mode""" self.reader_client.on_reader_message(buffer) def on_reader_message_sync(self, buffer: bytes): """used in direct call mode""" if self.reader_client is None: return b" " * 4 # special flag to indicate this actor not ready result = self.reader_client.on_reader_message_sync(buffer) return result.to_pybytes() def on_writer_message(self, buffer: bytes): """used in direct call mode""" self.writer_client.on_writer_message(buffer) def on_writer_message_sync(self, buffer: bytes): """used in direct call mode""" if self.writer_client is None: return b" " * 4 # special flag to indicate this actor not ready result = self.writer_client.on_writer_message_sync(buffer) return result.to_pybytes()
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/operator.py
Python
import enum import logging import cloudpickle logger = logging.getLogger(__name__) logger.setLevel("DEBUG") # Stream partitioning schemes class PScheme: def __init__(self, strategy, partition_fn=None): self.strategy = strategy self.partition_fn = partition_fn def __repr__(self): return "({},{})".format(self.strategy, self.partition_fn) # Partitioning strategies class PStrategy(enum.Enum): Forward = 0 # Default Shuffle = 1 Rescale = 2 RoundRobin = 3 Broadcast = 4 Custom = 5 ShuffleByKey = 6 # ... # Operator types class OpType(enum.Enum): Source = 0 Map = 1 FlatMap = 2 Filter = 3 TimeWindow = 4 KeyBy = 5 Sink = 6 WindowJoin = 7 Inspect = 8 ReadTextFile = 9 Reduce = 10 Sum = 11 # ... # A logical dataflow operator class Operator: def __init__(self, id, op_type, processor_class, name="", logic=None, num_instances=1, other=None, state_actor=None): self.id = id self.type = op_type self.processor_class = processor_class self.name = name self._logic = cloudpickle.dumps(logic) # The operator's logic self.num_instances = num_instances # One partitioning strategy per downstream operator (default: forward) self.partitioning_strategies = {} self.other_args = other # Depends on the type of the operator self.state_actor = state_actor # Actor to query state # Sets the partitioning scheme for an output stream of the operator def _set_partition_strategy(self, stream_id, partitioning_scheme, dest_operator=None): self.partitioning_strategies[stream_id] = (partitioning_scheme, dest_operator) # Retrieves the partitioning scheme for the given # output stream of the operator # Returns None is no strategy has been defined for the particular stream def _get_partition_strategy(self, stream_id): return self.partitioning_strategies.get(stream_id) # Cleans metatada from all partitioning strategies that lack a # destination operator # Valid entries are re-organized as # 'destination operator id -> partitioning scheme' # Should be called only after the logical dataflow has been constructed def _clean(self): strategies = {} for _, v in self.partitioning_strategies.items(): strategy, destination_operator = v if destination_operator is not None: strategies.setdefault(destination_operator, strategy) self.partitioning_strategies = strategies def print(self): log = "Operator<\nID = {}\nName = {}\nprocessor_class = {}\n" log += "Logic = {}\nNumber_of_Instances = {}\n" log += "Partitioning_Scheme = {}\nOther_Args = {}>\n" logger.debug( log.format(self.id, self.name, self.processor_class, self.logic, self.num_instances, self.partitioning_strategies, self.other_args)) @property def logic(self): return cloudpickle.loads(self._logic)
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/processor.py
Python
import logging import sys import time import types logger = logging.getLogger(__name__) logger.setLevel("INFO") def _identity(element): return element class ReadTextFile: """A source operator instance that reads a text file line by line. Attributes: filepath (string): The path to the input file. """ def __init__(self, operator): self.filepath = operator.other_args # TODO (john): Handle possible exception here self.reader = open(self.filepath, "r") # Read input file line by line def run(self, input_gate, output_gate): while True: record = self.reader.readline() # Reader returns empty string ('') on EOF if not record: self.reader.close() return output_gate.push( record[:-1]) # Push after removing newline characters class Map: """A map operator instance that applies a user-defined stream transformation. A map produces exactly one output record for each record in the input stream. """ def __init__(self, operator): self.map_fn = operator.logic # Applies the mapper each record of the input stream(s) # and pushes resulting records to the output stream(s) def run(self, input_gate, output_gate): elements = 0 while True: record = input_gate.pull() if record is None: return output_gate.push(self.map_fn(record)) elements += 1 class FlatMap: """A map operator instance that applies a user-defined stream transformation. A flatmap produces one or more output records for each record in the input stream. Attributes: flatmap_fn (function): The user-defined function. """ def __init__(self, operator): self.flatmap_fn = operator.logic # Applies the splitter to the records of the input stream(s) # and pushes resulting records to the output stream(s) def run(self, input_gate, output_gate): while True: record = input_gate.pull() if record is None: return output_gate.push_all(self.flatmap_fn(record)) class Filter: """A filter operator instance that applies a user-defined filter to each record of the stream. Output records are those that pass the filter, i.e. those for which the filter function returns True. Attributes: filter_fn (function): The user-defined boolean function. """ def __init__(self, operator): self.filter_fn = operator.logic # Applies the filter to the records of the input stream(s) # and pushes resulting records to the output stream(s) def run(self, input_gate, output_gate): while True: record = input_gate.pull() if record is None: return if self.filter_fn(record): output_gate.push(record) class Inspect: """A inspect operator instance that inspects the content of the stream. Inspect is useful for printing the records in the stream. """ def __init__(self, operator): self.inspect_fn = operator.logic def run(self, input_gate, output_gate): # Applies the inspect logic (e.g. print) to the records of # the input stream(s) # and leaves stream unaffected by simply pushing the records to # the output stream(s) while True: record = input_gate.pull() if record is None: return if output_gate: output_gate.push(record) self.inspect_fn(record) class Reduce: """A reduce operator instance that combines a new value for a key with the last reduced one according to a user-defined logic. """ def __init__(self, operator): self.reduce_fn = operator.logic # Set the attribute selector self.attribute_selector = operator.other_args if self.attribute_selector is None: self.attribute_selector = _identity elif isinstance(self.attribute_selector, int): self.key_index = self.attribute_selector self.attribute_selector =\ lambda record: record[self.attribute_selector] elif isinstance(self.attribute_selector, str): self.attribute_selector =\ lambda record: vars(record)[self.attribute_selector] elif not isinstance(self.attribute_selector, types.FunctionType): sys.exit("Unrecognized or unsupported key selector.") self.state = {} # key -> value # Combines the input value for a key with the last reduced # value for that key to produce a new value. # Outputs the result as (key,new value) def run(self, input_gate, output_gate): while True: record = input_gate.pull() if record is None: return key, rest = record new_value = self.attribute_selector(rest) # TODO (john): Is there a way to update state with # a single dictionary lookup? try: old_value = self.state[key] new_value = self.reduce_fn(old_value, new_value) self.state[key] = new_value except KeyError: # Key does not exist in state self.state.setdefault(key, new_value) output_gate.push((key, new_value)) # Returns the state of the actor def get_state(self): return self.state class KeyBy: """A key_by operator instance that physically partitions the stream based on a key. """ def __init__(self, operator): # Set the key selector self.key_selector = operator.other_args if isinstance(self.key_selector, int): self.key_selector = lambda r: r[self.key_selector] elif isinstance(self.key_selector, str): self.key_selector = lambda record: vars(record)[self.key_selector] elif not isinstance(self.key_selector, types.FunctionType): sys.exit("Unrecognized or unsupported key selector.") # The actual partitioning is done by the output gate def run(self, input_gate, output_gate): while True: record = input_gate.pull() if record is None: return key = self.key_selector(record) output_gate.push((key, record)) # A custom source actor class Source: def __init__(self, operator): # The user-defined source with a get_next() method self.source = operator.logic # Starts the source by calling get_next() repeatedly def run(self, input_gate, output_gate): start = time.time() elements = 0 while True: record = self.source.get_next() if not record: logger.debug("[writer] puts per second: {}".format( elements / (time.time() - start))) return output_gate.push(record) elements += 1
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/runtime/transfer.py
Python
import logging import random from queue import Queue from typing import List import ray import ray.streaming._streaming as _streaming import ray.streaming.generated.streaming_pb2 as streaming_pb from ray.actor import ActorHandle, ActorID from ray.streaming.config import Config CHANNEL_ID_LEN = 20 class ChannelID: """ ChannelID is used to identify a transfer channel between a upstream worker and downstream worker. """ def __init__(self, channel_id_str: str): """ Args: channel_id_str: string representation of channel id """ self.channel_id_str = channel_id_str self.object_qid = ray.ObjectID(channel_id_str_to_bytes(channel_id_str)) def __eq__(self, other): if other is None: return False if type(other) is ChannelID: return self.channel_id_str == other.channel_id_str else: return False def __hash__(self): return hash(self.channel_id_str) def __repr__(self): return self.channel_id_str @staticmethod def gen_random_id(): """Generate a random channel id string """ res = "" for i in range(CHANNEL_ID_LEN * 2): res += str(chr(random.randint(0, 5) + ord("A"))) return res @staticmethod def gen_id(from_index, to_index, ts): """Generate channel id, which is 20 character""" channel_id = bytearray(20) for i in range(11, 7, -1): channel_id[i] = ts & 0xff ts >>= 8 channel_id[16] = (from_index & 0xffff) >> 8 channel_id[17] = (from_index & 0xff) channel_id[18] = (to_index & 0xffff) >> 8 channel_id[19] = (to_index & 0xff) return channel_bytes_to_str(bytes(channel_id)) def channel_id_str_to_bytes(channel_id_str): """ Args: channel_id_str: string representation of channel id Returns: bytes representation of channel id """ assert type(channel_id_str) in [str, bytes] if isinstance(channel_id_str, bytes): return channel_id_str qid_bytes = bytes.fromhex(channel_id_str) assert len(qid_bytes) == CHANNEL_ID_LEN return qid_bytes def channel_bytes_to_str(id_bytes): """ Args: id_bytes: bytes representation of channel id Returns: string representation of channel id """ assert type(id_bytes) in [str, bytes] if isinstance(id_bytes, str): return id_bytes return bytes.hex(id_bytes) class DataMessage: """ DataMessage represents data between upstream and downstream operator """ def __init__(self, body, timestamp, channel_id, message_id_, is_empty_message=False): self.__body = body self.__timestamp = timestamp self.__channel_id = channel_id self.__message_id = message_id_ self.__is_empty_message = is_empty_message def __len__(self): return len(self.__body) def body(self): """Message data""" return self.__body def timestamp(self): """Get timestamp when item is written by upstream DataWriter """ return self.__timestamp def channel_id(self): """Get string id of channel where data is coming from """ return self.__channel_id def is_empty_message(self): """Whether this message is an empty message. Upstream DataWriter will send an empty message when this is no data in specified interval. """ return self.__is_empty_message @property def message_id(self): return self.__message_id logger = logging.getLogger(__name__) class DataWriter: """Data Writer is a wrapper of streaming c++ DataWriter, which sends data to downstream workers """ def __init__(self, output_channels, to_actors: List[ActorHandle], conf: dict): """Get DataWriter of output channels Args: output_channels: output channels ids to_actors: downstream output actors Returns: DataWriter """ assert len(output_channels) > 0 py_output_channels = [ channel_id_str_to_bytes(qid_str) for qid_str in output_channels ] output_actor_ids: List[ActorID] = [ handle._ray_actor_id for handle in to_actors ] channel_size = conf.get(Config.CHANNEL_SIZE, Config.CHANNEL_SIZE_DEFAULT) py_msg_ids = [0 for _ in range(len(output_channels))] config_bytes = _to_native_conf(conf) is_mock = conf[Config.CHANNEL_TYPE] == Config.MEMORY_CHANNEL self.writer = _streaming.DataWriter.create( py_output_channels, output_actor_ids, channel_size, py_msg_ids, config_bytes, is_mock) logger.info("create DataWriter succeed") def write(self, channel_id: ChannelID, item: bytes): """Write data into native channel Args: channel_id: channel id item: bytes data Returns: msg_id """ assert type(item) == bytes msg_id = self.writer.write(channel_id.object_qid, item) return msg_id def stop(self): logger.info("stopping channel writer.") self.writer.stop() # destruct DataWriter self.writer = None def close(self): logger.info("closing channel writer.") class DataReader: """Data Reader is wrapper of streaming c++ DataReader, which read data from channels of upstream workers """ def __init__(self, input_channels: List, from_actors: List[ActorHandle], conf: dict): """Get DataReader of input channels Args: input_channels: input channels from_actors: upstream input actors Returns: DataReader """ assert len(input_channels) > 0 py_input_channels = [ channel_id_str_to_bytes(qid_str) for qid_str in input_channels ] input_actor_ids: List[ActorID] = [ handle._ray_actor_id for handle in from_actors ] py_seq_ids = [0 for _ in range(len(input_channels))] py_msg_ids = [0 for _ in range(len(input_channels))] timer_interval = int(conf.get(Config.TIMER_INTERVAL_MS, -1)) is_recreate = bool(conf.get(Config.IS_RECREATE, False)) config_bytes = _to_native_conf(conf) self.__queue = Queue(10000) is_mock = conf[Config.CHANNEL_TYPE] == Config.MEMORY_CHANNEL self.reader = _streaming.DataReader.create( py_input_channels, input_actor_ids, py_seq_ids, py_msg_ids, timer_interval, is_recreate, config_bytes, is_mock) logger.info("create DataReader succeed") def read(self, timeout_millis): """Read data from channel Args: timeout_millis: timeout millis when there is no data in channel for this duration Returns: channel item """ if self.__queue.empty(): msgs = self.reader.read(timeout_millis) for msg in msgs: msg_bytes, msg_id, timestamp, qid_bytes = msg data_msg = DataMessage(msg_bytes, timestamp, channel_bytes_to_str(qid_bytes), msg_id) self.__queue.put(data_msg) if self.__queue.empty(): return None return self.__queue.get() def stop(self): logger.info("stopping Data Reader.") self.reader.stop() # destruct DataReader self.reader = None def close(self): logger.info("closing Data Reader.") def _to_native_conf(conf): config = streaming_pb.StreamingConfig() if Config.STREAMING_JOB_NAME in conf: config.job_name = conf[Config.STREAMING_JOB_NAME] if Config.TASK_JOB_ID in conf: job_id = conf[Config.TASK_JOB_ID] config.task_job_id = job_id.hex() if Config.STREAMING_WORKER_NAME in conf: config.worker_name = conf[Config.STREAMING_WORKER_NAME] if Config.STREAMING_OP_NAME in conf: config.op_name = conf[Config.STREAMING_OP_NAME] # TODO set operator type if Config.STREAMING_RING_BUFFER_CAPACITY in conf: config.ring_buffer_capacity = \ conf[Config.STREAMING_RING_BUFFER_CAPACITY] if Config.STREAMING_EMPTY_MESSAGE_INTERVAL in conf: config.empty_message_interval = \ conf[Config.STREAMING_EMPTY_MESSAGE_INTERVAL] logger.info("conf: %s", str(config)) return config.SerializeToString() class ChannelInitException(Exception): def __init__(self, msg, abnormal_channels): self.abnormal_channels = abnormal_channels self.msg = msg class ChannelInterruptException(Exception): def __init__(self, msg=None): self.msg = msg
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/streaming.py
Python
import logging import pickle import sys import time import networkx as nx import ray import ray.streaming.processor as processor import ray.streaming.runtime.transfer as transfer from ray.streaming.communication import DataChannel from ray.streaming.config import Config from ray.streaming.jobworker import JobWorker from ray.streaming.operator import Operator, OpType from ray.streaming.operator import PScheme, PStrategy logger = logging.getLogger(__name__) logger.setLevel("INFO") # Rolling sum's logic def _sum(value_1, value_2): return value_1 + value_2 # Partitioning strategies that require all-to-all instance communication all_to_all_strategies = [ PStrategy.Shuffle, PStrategy.ShuffleByKey, PStrategy.Broadcast, PStrategy.RoundRobin ] # Environment configuration class Conf: """Environment configuration. This class includes all information about the configuration of the streaming environment. """ def __init__(self, parallelism=1, channel_type=Config.MEMORY_CHANNEL): self.parallelism = parallelism self.channel_type = channel_type # ... class ExecutionGraph: def __init__(self, env): self.env = env self.physical_topo = nx.DiGraph() # DAG # Handles to all actors in the physical dataflow self.actor_handles = [] # (op_id, op_instance_index) -> ActorID self.actors_map = {} # execution graph build time: milliseconds since epoch self.build_time = 0 self.task_id_counter = 0 self.task_ids = {} self.input_channels = {} # operator id -> input channels self.output_channels = {} # operator id -> output channels # Constructs and deploys a Ray actor of a specific type # TODO (john): Actor placement information should be specified in # the environment's configuration def __generate_actor(self, instance_index, operator, input_channels, output_channels): """Generates an actor that will execute a particular instance of the logical operator Attributes: instance_index: The index of the instance the actor will execute. operator: The metadata of the logical operator. input_channels: The input channels of the instance. output_channels The output channels of the instance. """ worker_id = (operator.id, instance_index) # Record the physical dataflow graph (for debugging purposes) self.__add_channel(worker_id, output_channels) # Note direct_call only support pass by value return JobWorker._remote( args=[worker_id, operator, input_channels, output_channels], is_direct_call=True) # Constructs and deploys a Ray actor for each instance of # the given operator def __generate_actors(self, operator, upstream_channels, downstream_channels): """Generates one actor for each instance of the given logical operator. Attributes: operator (Operator): The logical operator metadata. upstream_channels (list): A list of all upstream channels for all instances of the operator. downstream_channels (list): A list of all downstream channels for all instances of the operator. """ num_instances = operator.num_instances logger.info("Generating {} actors of type {}...".format( num_instances, operator.type)) handles = [] for i in range(num_instances): # Collect input and output channels for the particular instance ip = [c for c in upstream_channels if c.dst_instance_index == i] op = [c for c in downstream_channels if c.src_instance_index == i] log = "Constructed {} input and {} output channels " log += "for the {}-th instance of the {} operator." logger.debug(log.format(len(ip), len(op), i, operator.type)) handle = self.__generate_actor(i, operator, ip, op) if handle: handles.append(handle) self.actors_map[(operator.id, i)] = handle return handles # Adds a channel/edge to the physical dataflow graph def __add_channel(self, actor_id, output_channels): for c in output_channels: dest_actor_id = (c.dst_operator_id, c.dst_instance_index) self.physical_topo.add_edge(actor_id, dest_actor_id) # Generates all required data channels between an operator # and its downstream operators def _generate_channels(self, operator): """Generates all output data channels (see: DataChannel in communication.py) for all instances of the given logical operator. The function constructs one data channel for each pair of communicating operator instances (instance_1,instance_2), where instance_1 is an instance of the given operator and instance_2 is an instance of a direct downstream operator. The number of total channels generated depends on the partitioning strategy specified by the user. """ channels = {} # destination operator id -> channels strategies = operator.partitioning_strategies for dst_operator, p_scheme in strategies.items(): num_dest_instances = self.env.operators[dst_operator].num_instances entry = channels.setdefault(dst_operator, []) if p_scheme.strategy == PStrategy.Forward: for i in range(operator.num_instances): # ID of destination instance to connect id = i % num_dest_instances qid = self._gen_str_qid(operator.id, i, dst_operator, id) c = DataChannel(operator.id, i, dst_operator, id, qid) entry.append(c) elif p_scheme.strategy in all_to_all_strategies: for i in range(operator.num_instances): for j in range(num_dest_instances): qid = self._gen_str_qid(operator.id, i, dst_operator, j) c = DataChannel(operator.id, i, dst_operator, j, qid) entry.append(c) else: # TODO (john): Add support for other partitioning strategies sys.exit("Unrecognized or unsupported partitioning strategy.") return channels def _gen_str_qid(self, src_operator_id, src_instance_index, dst_operator_id, dst_instance_index): from_task_id = self.env.execution_graph.get_task_id( src_operator_id, src_instance_index) to_task_id = self.env.execution_graph.get_task_id( dst_operator_id, dst_instance_index) return transfer.ChannelID.gen_id(from_task_id, to_task_id, self.build_time) def _gen_task_id(self): task_id = self.task_id_counter self.task_id_counter += 1 return task_id def get_task_id(self, op_id, op_instance_id): return self.task_ids[(op_id, op_instance_id)] def get_actor(self, op_id, op_instance_id): return self.actors_map[(op_id, op_instance_id)] # Prints the physical dataflow graph def print_physical_graph(self): logger.info("===================================") logger.info("======Physical Dataflow Graph======") logger.info("===================================") # Print all data channels between operator instances log = "(Source Operator ID,Source Operator Name,Source Instance ID)" log += " --> " log += "(Destination Operator ID,Destination Operator Name," log += "Destination Instance ID)" logger.info(log) for src_actor_id, dst_actor_id in self.physical_topo.edges: src_operator_id, src_instance_index = src_actor_id dst_operator_id, dst_instance_index = dst_actor_id logger.info("({},{},{}) --> ({},{},{})".format( src_operator_id, self.env.operators[src_operator_id].name, src_instance_index, dst_operator_id, self.env.operators[dst_operator_id].name, dst_instance_index)) def build_graph(self): self.build_channels() # to support cyclic reference serialization try: ray.register_custom_serializer(Environment, use_pickle=True) ray.register_custom_serializer(ExecutionGraph, use_pickle=True) ray.register_custom_serializer(OpType, use_pickle=True) ray.register_custom_serializer(PStrategy, use_pickle=True) except Exception: # local mode can't use pickle pass # Each operator instance is implemented as a Ray actor # Actors are deployed in topological order, as we traverse the # logical dataflow from sources to sinks. for node in nx.topological_sort(self.env.logical_topo): operator = self.env.operators[node] # Instantiate Ray actors handles = self.__generate_actors( operator, self.input_channels.get(node, []), self.output_channels.get(node, [])) if handles: self.actor_handles.extend(handles) def build_channels(self): self.build_time = int(time.time() * 1000) # gen auto-incremented unique task id for every operator instance for node in nx.topological_sort(self.env.logical_topo): operator = self.env.operators[node] for i in range(operator.num_instances): operator_instance_id = (operator.id, i) self.task_ids[operator_instance_id] = self._gen_task_id() channels = {} for node in nx.topological_sort(self.env.logical_topo): operator = self.env.operators[node] # Generate downstream data channels downstream_channels = self._generate_channels(operator) channels[node] = downstream_channels # op_id -> channels input_channels = {} output_channels = {} for op_id, all_downstream_channels in channels.items(): for dst_op_channels in all_downstream_channels.values(): for c in dst_op_channels: dst = input_channels.setdefault(c.dst_operator_id, []) dst.append(c) src = output_channels.setdefault(c.src_operator_id, []) src.append(c) self.input_channels = input_channels self.output_channels = output_channels # The execution environment for a streaming job class Environment: """A streaming environment. This class is responsible for constructing the logical and the physical dataflow. Attributes: logical_topo (DiGraph): The user-defined logical topology in NetworkX DiGRaph format. (See: https://networkx.github.io) physical_topo (DiGraph): The physical topology in NetworkX DiGRaph format. The physical dataflow is constructed by the environment based on logical_topo. operators (dict): A mapping from operator ids to operator metadata (See: Operator in operator.py). config (Config): The environment's configuration. topo_cleaned (bool): A flag that indicates whether the logical topology is garbage collected (True) or not (False). actor_handles (list): A list of all Ray actor handles that execute the streaming dataflow. """ def __init__(self, config=Conf()): self.logical_topo = nx.DiGraph() # DAG self.operators = {} # operator id --> operator object self.config = config # Environment's configuration self.topo_cleaned = False self.operator_id_counter = 0 self.execution_graph = None # set when executed def gen_operator_id(self): op_id = self.operator_id_counter self.operator_id_counter += 1 return op_id # An edge denotes a flow of data between logical operators # and may correspond to multiple data channels in the physical dataflow def _add_edge(self, source, destination): self.logical_topo.add_edge(source, destination) # Cleans the logical dataflow graph to construct and # deploy the physical dataflow def _collect_garbage(self): if self.topo_cleaned is True: return for node in self.logical_topo: self.operators[node]._clean() self.topo_cleaned = True # Sets the level of parallelism for a registered operator # Overwrites the environment parallelism (if set) def _set_parallelism(self, operator_id, level_of_parallelism): self.operators[operator_id].num_instances = level_of_parallelism # Sets the same level of parallelism for all operators in the environment def set_parallelism(self, parallelism): self.config.parallelism = parallelism # Creates and registers a user-defined data source # TODO (john): There should be different types of sources, e.g. sources # reading from Kafka, text files, etc. # TODO (john): Handle case where environment parallelism is set def source(self, source): source_id = self.gen_operator_id() source_stream = DataStream(self, source_id) self.operators[source_id] = Operator( source_id, OpType.Source, processor.Source, "Source", logic=source) return source_stream # Creates and registers a new data source that reads a # text file line by line # TODO (john): There should be different types of sources, # e.g. sources reading from Kafka, text files, etc. # TODO (john): Handle case where environment parallelism is set def read_text_file(self, filepath): source_id = self.gen_operator_id() source_stream = DataStream(self, source_id) self.operators[source_id] = Operator( source_id, OpType.ReadTextFile, processor.ReadTextFile, "Read Text File", other=filepath) return source_stream # Constructs and deploys the physical dataflow def execute(self): """Deploys and executes the physical dataflow.""" self._collect_garbage() # Make sure everything is clean # TODO (john): Check if dataflow has any 'logical inconsistencies' # For example, if there is a forward partitioning strategy but # the number of downstream instances is larger than the number of # upstream instances, some of the downstream instances will not be # used at all self.execution_graph = ExecutionGraph(self) self.execution_graph.build_graph() logger.info("init...") # init init_waits = [] for actor_handle in self.execution_graph.actor_handles: init_waits.append(actor_handle.init.remote(pickle.dumps(self))) for wait in init_waits: assert ray.get(wait) is True logger.info("running...") # start exec_handles = [] for actor_handle in self.execution_graph.actor_handles: exec_handles.append(actor_handle.start.remote()) return exec_handles def wait_finish(self): for actor_handle in self.execution_graph.actor_handles: while not ray.get(actor_handle.is_finished.remote()): time.sleep(1) # Prints the logical dataflow graph def print_logical_graph(self): self._collect_garbage() logger.info("==================================") logger.info("======Logical Dataflow Graph======") logger.info("==================================") # Print operators in topological order for node in nx.topological_sort(self.logical_topo): downstream_neighbors = list(self.logical_topo.neighbors(node)) logger.info("======Current Operator======") operator = self.operators[node] operator.print() logger.info("======Downstream Operators======") if len(downstream_neighbors) == 0: logger.info("None\n") for downstream_node in downstream_neighbors: self.operators[downstream_node].print() # TODO (john): We also need KeyedDataStream and WindowedDataStream as # subclasses of DataStream to prevent ill-defined logical dataflows # A DataStream corresponds to an edge in the logical dataflow class DataStream: """A data stream. This class contains all information about a logical stream, i.e. an edge in the logical topology. It is the main class exposed to the user. Attributes: id (UUID): The id of the stream env (Environment): The environment the stream belongs to. src_operator_id (UUID): The id of the source operator of the stream. dst_operator_id (UUID): The id of the destination operator of the stream. is_partitioned (bool): Denotes if there is a partitioning strategy (e.g. shuffle) for the stream or not (default stategy: Forward). """ stream_id_counter = 0 def __init__(self, environment, source_id=None, dest_id=None, is_partitioned=False): self.env = environment self.id = DataStream.stream_id_counter DataStream.stream_id_counter += 1 self.src_operator_id = source_id self.dst_operator_id = dest_id # True if a partitioning strategy for this stream exists, # false otherwise self.is_partitioned = is_partitioned # Generates a new stream after a data transformation is applied def __expand(self): stream = DataStream(self.env) assert (self.dst_operator_id is not None) stream.src_operator_id = self.dst_operator_id stream.dst_operator_id = None return stream # Assigns the partitioning strategy to a new 'open-ended' stream # and returns the stream. At this point, the partitioning strategy # is not associated with any destination operator. We expect this to # be done later, as we continue assembling the dataflow graph def __partition(self, strategy, partition_fn=None): scheme = PScheme(strategy, partition_fn) source_operator = self.env.operators[self.src_operator_id] new_stream = DataStream( self.env, source_id=source_operator.id, is_partitioned=True) source_operator._set_partition_strategy(new_stream.id, scheme) return new_stream # Registers the operator to the environment and returns a new # 'open-ended' stream. The registered operator serves as the destination # of the previously 'open' stream def __register(self, operator): """Registers the given logical operator to the environment and connects it to its upstream operator (if any). A call to this function adds a new edge to the logical topology. Attributes: operator (Operator): The metadata of the logical operator. """ self.env.operators[operator.id] = operator self.dst_operator_id = operator.id logger.debug("Adding new dataflow edge ({},{}) --> ({},{})".format( self.src_operator_id, self.env.operators[self.src_operator_id].name, self.dst_operator_id, self.env.operators[self.dst_operator_id].name)) # Update logical dataflow graphs self.env._add_edge(self.src_operator_id, self.dst_operator_id) # Keep track of the partitioning strategy and the destination operator src_operator = self.env.operators[self.src_operator_id] if self.is_partitioned is True: partitioning, _ = src_operator._get_partition_strategy(self.id) src_operator._set_partition_strategy(self.id, partitioning, operator.id) elif src_operator.type == OpType.KeyBy: # Set the output partitioning strategy to shuffle by key partitioning = PScheme(PStrategy.ShuffleByKey) src_operator._set_partition_strategy(self.id, partitioning, operator.id) else: # No partitioning strategy has been defined - set default partitioning = PScheme(PStrategy.Forward) src_operator._set_partition_strategy(self.id, partitioning, operator.id) return self.__expand() # Sets the level of parallelism for an operator, i.e. its total # number of instances. Each operator instance corresponds to an actor # in the physical dataflow def set_parallelism(self, num_instances): """Sets the number of instances for the source operator of the stream. Attributes: num_instances (int): The level of parallelism for the source operator of the stream. """ assert (num_instances > 0) self.env._set_parallelism(self.src_operator_id, num_instances) return self # Stream Partitioning Strategies # # TODO (john): Currently, only forward (default), shuffle, # and broadcast are supported # Hash-based record shuffling def shuffle(self): """Registers a shuffling partitioning strategy for the stream.""" return self.__partition(PStrategy.Shuffle) # Broadcasts each record to all downstream instances def broadcast(self): """Registers a broadcast partitioning strategy for the stream.""" return self.__partition(PStrategy.Broadcast) # Rescales load to downstream instances def rescale(self): """Registers a rescale partitioning strategy for the stream. Same as Flink's rescale (see: https://ci.apache.org/projects/flink/ flink-docs-stable/dev/stream/operators/#physical-partitioning). """ return self.__partition(PStrategy.Rescale) # Round-robin partitioning def round_robin(self): """Registers a round-robin partitioning strategy for the stream.""" return self.__partition(PStrategy.RoundRobin) # User-defined partitioning def partition(self, partition_fn): """Registers a user-defined partitioning strategy for the stream. Attributes: partition_fn (function): The user-defined partitioning function. """ return self.__partition(PStrategy.Custom, partition_fn) # Data Trasnformations # # TODO (john): Expand set of supported operators. # TODO (john): To support event-time windows we need a mechanism for # generating and processing watermarks # Registers map operator to the environment def map(self, map_fn, name="Map"): """Applies a map operator to the stream. Attributes: map_fn (function): The user-defined logic of the map. """ op = Operator( self.env.gen_operator_id(), OpType.Map, processor.Map, name, map_fn, num_instances=self.env.config.parallelism) return self.__register(op) # Registers flatmap operator to the environment def flat_map(self, flatmap_fn): """Applies a flatmap operator to the stream. Attributes: flatmap_fn (function): The user-defined logic of the flatmap (e.g. split()). """ op = Operator( self.env.gen_operator_id(), OpType.FlatMap, processor.FlatMap, "FlatMap", flatmap_fn, num_instances=self.env.config.parallelism) return self.__register(op) # Registers keyBy operator to the environment # TODO (john): This should returned a KeyedDataStream def key_by(self, key_selector): """Applies a key_by operator to the stream. Attributes: key_attribute_index (int): The index of the key attributed (assuming tuple records). """ op = Operator( self.env.gen_operator_id(), OpType.KeyBy, processor.KeyBy, "KeyBy", other=key_selector, num_instances=self.env.config.parallelism) return self.__register(op) # Registers Reduce operator to the environment def reduce(self, reduce_fn): """Applies a rolling sum operator to the stream. Attributes: sum_attribute_index (int): The index of the attribute to sum (assuming tuple records). """ op = Operator( self.env.gen_operator_id(), OpType.Reduce, processor.Reduce, "Sum", reduce_fn, num_instances=self.env.config.parallelism) return self.__register(op) # Registers Sum operator to the environment def sum(self, attribute_selector, state_keeper=None): """Applies a rolling sum operator to the stream. Attributes: sum_attribute_index (int): The index of the attribute to sum (assuming tuple records). """ op = Operator( self.env.gen_operator_id(), OpType.Sum, processor.Reduce, "Sum", _sum, other=attribute_selector, state_actor=state_keeper, num_instances=self.env.config.parallelism) return self.__register(op) # Registers window operator to the environment. # This is a system time window # TODO (john): This should return a WindowedDataStream def time_window(self, window_width_ms): """Applies a system time window to the stream. Attributes: window_width_ms (int): The length of the window in ms. """ raise Exception("time_window is unsupported") # Registers filter operator to the environment def filter(self, filter_fn): """Applies a filter to the stream. Attributes: filter_fn (function): The user-defined filter function. """ op = Operator( self.env.gen_operator_id(), OpType.Filter, processor.Filter, "Filter", filter_fn, num_instances=self.env.config.parallelism) return self.__register(op) # TODO (john): Registers window join operator to the environment def window_join(self, other_stream, join_attribute, window_width): op = Operator( self.env.gen_operator_id(), OpType.WindowJoin, processor.WindowJoin, "WindowJoin", num_instances=self.env.config.parallelism) return self.__register(op) # Registers inspect operator to the environment def inspect(self, inspect_logic): """Inspects the content of the stream. Attributes: inspect_logic (function): The user-defined inspect function. """ op = Operator( self.env.gen_operator_id(), OpType.Inspect, processor.Inspect, "Inspect", inspect_logic, num_instances=self.env.config.parallelism) return self.__register(op) # Registers sink operator to the environment # TODO (john): A sink now just drops records but it should be able to # export data to other systems def sink(self): """Closes the stream with a sink operator.""" op = Operator( self.env.gen_operator_id(), OpType.Sink, processor.Sink, "Sink", num_instances=self.env.config.parallelism) return self.__register(op)
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/tests/test_direct_transfer.py
Python
import pickle import threading import time import ray import ray.streaming._streaming as _streaming import ray.streaming.runtime.transfer as transfer from ray.function_manager import FunctionDescriptor from ray.streaming.config import Config @ray.remote class Worker: def __init__(self): core_worker = ray.worker.global_worker.core_worker writer_async_func = FunctionDescriptor( __name__, self.on_writer_message.__name__, self.__class__.__name__) writer_sync_func = FunctionDescriptor( __name__, self.on_writer_message_sync.__name__, self.__class__.__name__) self.writer_client = _streaming.WriterClient( core_worker, writer_async_func, writer_sync_func) reader_async_func = FunctionDescriptor( __name__, self.on_reader_message.__name__, self.__class__.__name__) reader_sync_func = FunctionDescriptor( __name__, self.on_reader_message_sync.__name__, self.__class__.__name__) self.reader_client = _streaming.ReaderClient( core_worker, reader_async_func, reader_sync_func) self.writer = None self.output_channel_id = None self.reader = None def init_writer(self, output_channel, reader_actor): conf = { Config.TASK_JOB_ID: ray.runtime_context._get_runtime_context() .current_driver_id, Config.CHANNEL_TYPE: Config.NATIVE_CHANNEL } self.writer = transfer.DataWriter([output_channel], [pickle.loads(reader_actor)], conf) self.output_channel_id = transfer.ChannelID(output_channel) def init_reader(self, input_channel, writer_actor): conf = { Config.TASK_JOB_ID: ray.runtime_context._get_runtime_context() .current_driver_id, Config.CHANNEL_TYPE: Config.NATIVE_CHANNEL } self.reader = transfer.DataReader([input_channel], [pickle.loads(writer_actor)], conf) def start_write(self, msg_nums): self.t = threading.Thread( target=self.run_writer, args=[msg_nums], daemon=True) self.t.start() def run_writer(self, msg_nums): for i in range(msg_nums): self.writer.write(self.output_channel_id, pickle.dumps(i)) print("WriterWorker done.") def start_read(self, msg_nums): self.t = threading.Thread( target=self.run_reader, args=[msg_nums], daemon=True) self.t.start() def run_reader(self, msg_nums): count = 0 msg = None while count != msg_nums: item = self.reader.read(100) if item is None: time.sleep(0.01) else: msg = pickle.loads(item.body()) count += 1 assert msg == msg_nums - 1 print("ReaderWorker done.") def is_finished(self): return not self.t.is_alive() def on_reader_message(self, buffer: bytes): """used in direct call mode""" self.reader_client.on_reader_message(buffer) def on_reader_message_sync(self, buffer: bytes): """used in direct call mode""" if self.reader_client is None: return b" " * 4 # special flag to indicate this actor not ready result = self.reader_client.on_reader_message_sync(buffer) return result.to_pybytes() def on_writer_message(self, buffer: bytes): """used in direct call mode""" self.writer_client.on_writer_message(buffer) def on_writer_message_sync(self, buffer: bytes): """used in direct call mode""" if self.writer_client is None: return b" " * 4 # special flag to indicate this actor not ready result = self.writer_client.on_writer_message_sync(buffer) return result.to_pybytes() def test_queue(): ray.init() writer = Worker._remote(is_direct_call=True) reader = Worker._remote(is_direct_call=True) channel_id_str = transfer.ChannelID.gen_random_id() inits = [ writer.init_writer.remote(channel_id_str, pickle.dumps(reader)), reader.init_reader.remote(channel_id_str, pickle.dumps(writer)) ] ray.get(inits) msg_nums = 1000 print("start read/write") reader.start_read.remote(msg_nums) writer.start_write.remote(msg_nums) while not ray.get(reader.is_finished.remote()): time.sleep(0.1) ray.shutdown() if __name__ == "__main__": test_queue()
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/tests/test_logical_graph.py
Python
from ray.streaming.streaming import Environment, ExecutionGraph from ray.streaming.operator import OpType, PStrategy def test_parallelism(): """Tests operator parallelism.""" env = Environment() # Try setting a common parallelism for all operators env.set_parallelism(2) stream = env.source(None).map(None).filter(None).flat_map(None) env._collect_garbage() for operator in env.operators.values(): if operator.type == OpType.Source: # TODO (john): Currently each source has only one instance assert operator.num_instances == 1, (operator.num_instances, 1) else: assert operator.num_instances == 2, (operator.num_instances, 2) # Check again after adding an operator with different parallelism stream.map(None, "Map1").shuffle().set_parallelism(3).map( None, "Map2").set_parallelism(4) env._collect_garbage() for operator in env.operators.values(): if operator.type == OpType.Source: assert operator.num_instances == 1, (operator.num_instances, 1) elif operator.name != "Map1" and operator.name != "Map2": assert operator.num_instances == 2, (operator.num_instances, 2) elif operator.name != "Map2": assert operator.num_instances == 3, (operator.num_instances, 3) else: assert operator.num_instances == 4, (operator.num_instances, 4) def test_partitioning(): """Tests stream partitioning.""" env = Environment() # Try defining multiple partitioning strategies for the same stream _ = env.source(None).shuffle().rescale().broadcast().map( None).broadcast().shuffle() env._collect_garbage() for operator in env.operators.values(): p_schemes = operator.partitioning_strategies for scheme in p_schemes.values(): # Only last defined strategy should be kept if operator.type == OpType.Source: assert scheme.strategy == PStrategy.Broadcast, ( scheme.strategy, PStrategy.Broadcast) else: assert scheme.strategy == PStrategy.Shuffle, ( scheme.strategy, PStrategy.Shuffle) def test_forking(): """Tests stream forking.""" env = Environment() # Try forking a stream stream = env.source(None).map(None).set_parallelism(2) # First branch with a shuffle partitioning strategy _ = stream.shuffle().key_by(0).sum(1) # Second branch with the default partitioning strategy _ = stream.key_by(1).sum(2) env._collect_garbage() # Operator ids source_id = None map_id = None keyby1_id = None keyby2_id = None sum1_id = None sum2_id = None # Collect ids for id, operator in env.operators.items(): if operator.type == OpType.Source: source_id = id elif operator.type == OpType.Map: map_id = id elif operator.type == OpType.KeyBy: if operator.other_args == 0: keyby1_id = id else: assert operator.other_args == 1, (operator.other_args, 1) keyby2_id = id elif operator.type == OpType.Sum: if operator.other_args == 1: sum1_id = id else: assert operator.other_args == 2, (operator.other_args, 2) sum2_id = id # Check generated streams and their partitioning for source, destination in env.logical_topo.edges: operator = env.operators[source] if source == source_id: assert destination == map_id, (destination, map_id) elif source == map_id: p_scheme = operator.partitioning_strategies[destination] strategy = p_scheme.strategy key_index = env.operators[destination].other_args if key_index == 0: # This must be the first branch assert strategy == PStrategy.Shuffle, (strategy, PStrategy.Shuffle) assert destination == keyby1_id, (destination, keyby1_id) else: # This must be the second branch assert key_index == 1, (key_index, 1) assert strategy == PStrategy.Forward, (strategy, PStrategy.Forward) assert destination == keyby2_id, (destination, keyby2_id) elif source == keyby1_id or source == keyby2_id: p_scheme = operator.partitioning_strategies[destination] strategy = p_scheme.strategy key_index = env.operators[destination].other_args if key_index == 1: # This must be the first branch assert strategy == PStrategy.ShuffleByKey, ( strategy, PStrategy.ShuffleByKey) assert destination == sum1_id, (destination, sum1_id) else: # This must be the second branch assert key_index == 2, (key_index, 2) assert strategy == PStrategy.ShuffleByKey, ( strategy, PStrategy.ShuffleByKey) assert destination == sum2_id, (destination, sum2_id) else: # This must be a sum operator assert operator.type == OpType.Sum, (operator.type, OpType.Sum) def _test_shuffle_channels(): """Tests shuffling connectivity.""" env = Environment() # Try defining a shuffle _ = env.source(None).shuffle().map(None).set_parallelism(4) expected = [(0, 0), (0, 1), (0, 2), (0, 3)] _test_channels(env, expected) def _test_forward_channels(): """Tests forward connectivity.""" env = Environment() # Try the default partitioning strategy _ = env.source(None).set_parallelism(4).map(None).set_parallelism(2) expected = [(0, 0), (1, 1), (2, 0), (3, 1)] _test_channels(env, expected) def _test_broadcast_channels(): """Tests broadcast connectivity.""" env = Environment() # Try broadcasting _ = env.source(None).set_parallelism(4).broadcast().map( None).set_parallelism(2) expected = [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)] _test_channels(env, expected) def _test_round_robin_channels(): """Tests round-robin connectivity.""" env = Environment() # Try broadcasting _ = env.source(None).round_robin().map(None).set_parallelism(2) expected = [(0, 0), (0, 1)] _test_channels(env, expected) def _test_channels(environment, expected_channels): """Tests operator connectivity.""" environment._collect_garbage() map_id = None # Get id for id, operator in environment.operators.items(): if operator.type == OpType.Map: map_id = id # Collect channels environment.execution_graph = ExecutionGraph(environment) environment.execution_graph.build_channels() channels_per_destination = [] for operator in environment.operators.values(): channels_per_destination.append( environment.execution_graph._generate_channels(operator)) # Check actual connectivity actual = [] for destination in channels_per_destination: for channels in destination.values(): for channel in channels: src_instance_index = channel.src_instance_index dst_instance_index = channel.dst_instance_index connection = (src_instance_index, dst_instance_index) assert channel.dst_operator_id == map_id, ( channel.dst_operator_id, map_id) actual.append(connection) # Make sure connections are as expected set_1 = set(expected_channels) set_2 = set(actual) assert set_1 == set_2, (set_1, set_2) def test_channel_generation(): """Tests data channel generation.""" _test_shuffle_channels() _test_broadcast_channels() _test_round_robin_channels() _test_forward_channels() # TODO (john): Add simple wordcount test def test_wordcount(): """Tests a simple streaming wordcount.""" pass if __name__ == "__main__": test_channel_generation()
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/python/tests/test_word_count.py
Python
import ray from ray.streaming.config import Config from ray.streaming.streaming import Environment, Conf def test_word_count(): ray.init() env = Environment(config=Conf(channel_type=Config.NATIVE_CHANNEL)) env.read_text_file(__file__) \ .set_parallelism(1) \ .filter(lambda x: "word" in x) \ .inspect(lambda x: print("result", x)) env_handle = env.execute() ray.get(env_handle) # Stay alive until execution finishes env.wait_finish() ray.shutdown() if __name__ == "__main__": test_word_count()
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/channel.cc
C++
#include "channel.h" #include <unordered_map> namespace ray { namespace streaming { ProducerChannel::ProducerChannel(std::shared_ptr<Config> &transfer_config, ProducerChannelInfo &p_channel_info) : transfer_config_(transfer_config), channel_info(p_channel_info) {} ConsumerChannel::ConsumerChannel(std::shared_ptr<Config> &transfer_config, ConsumerChannelInfo &c_channel_info) : transfer_config_(transfer_config), channel_info(c_channel_info) {} StreamingQueueProducer::StreamingQueueProducer(std::shared_ptr<Config> &transfer_config, ProducerChannelInfo &p_channel_info) : ProducerChannel(transfer_config, p_channel_info) { STREAMING_LOG(INFO) << "Producer Init"; } StreamingQueueProducer::~StreamingQueueProducer() { STREAMING_LOG(INFO) << "Producer Destory"; } StreamingStatus StreamingQueueProducer::CreateTransferChannel() { CreateQueue(); uint64_t queue_last_seq_id = 0; uint64_t last_message_id_in_queue = 0; if (!last_message_id_in_queue) { if (last_message_id_in_queue < channel_info.current_message_id) { STREAMING_LOG(WARNING) << "last message id in queue : " << last_message_id_in_queue << " is less than message checkpoint loaded id : " << channel_info.current_message_id << ", an old queue object " << channel_info.channel_id << " was fond in store"; } last_message_id_in_queue = channel_info.current_message_id; } if (queue_last_seq_id == static_cast<uint64_t>(-1)) { queue_last_seq_id = 0; } channel_info.current_seq_id = queue_last_seq_id; STREAMING_LOG(WARNING) << "existing last message id => " << last_message_id_in_queue << ", message id in channel => " << channel_info.current_message_id << ", queue last seq id => " << queue_last_seq_id; channel_info.message_last_commit_id = last_message_id_in_queue; return StreamingStatus::OK; } StreamingStatus StreamingQueueProducer::CreateQueue() { STREAMING_LOG(INFO) << "CreateQueue qid: " << channel_info.channel_id << " data_size: " << channel_info.queue_size; auto upstream_handler = ray::streaming::UpstreamQueueMessageHandler::GetService(); if (upstream_handler->UpstreamQueueExists(channel_info.channel_id)) { RAY_LOG(INFO) << "StreamingQueueWriter::CreateQueue duplicate!!!"; return StreamingStatus::OK; } upstream_handler->SetPeerActorID(channel_info.channel_id, channel_info.actor_id); queue_ = upstream_handler->CreateUpstreamQueue( channel_info.channel_id, channel_info.actor_id, channel_info.queue_size); STREAMING_CHECK(queue_ != nullptr); std::vector<ObjectID> queue_ids, failed_queues; queue_ids.push_back(channel_info.channel_id); upstream_handler->WaitQueues(queue_ids, 10 * 1000, failed_queues); STREAMING_LOG(INFO) << "q id => " << channel_info.channel_id << ", queue size => " << channel_info.queue_size; return StreamingStatus::OK; } StreamingStatus StreamingQueueProducer::DestroyTransferChannel() { return StreamingStatus::OK; } StreamingStatus StreamingQueueProducer::ClearTransferCheckpoint( uint64_t checkpoint_id, uint64_t checkpoint_offset) { return StreamingStatus::OK; } StreamingStatus StreamingQueueProducer::NotifyChannelConsumed(uint64_t channel_offset) { queue_->SetQueueEvictionLimit(channel_offset); return StreamingStatus::OK; } StreamingStatus StreamingQueueProducer::ProduceItemToChannel(uint8_t *data, uint32_t data_size) { Status status = PushQueueItem(channel_info.current_seq_id + 1, data, data_size, current_time_ms()); if (status.code() != StatusCode::OK) { STREAMING_LOG(DEBUG) << channel_info.channel_id << " => Queue is full" << " meesage => " << status.message(); // Assume that only status OutOfMemory and OK are acceptable. // OutOfMemory means queue is full at that moment. STREAMING_CHECK(status.code() == StatusCode::OutOfMemory) << "status => " << status.message() << ", perhaps data block is so large that it can't be stored in" << ", data block size => " << data_size; return StreamingStatus::FullChannel; } return StreamingStatus::OK; } Status StreamingQueueProducer::PushQueueItem(uint64_t seq_id, uint8_t *data, uint32_t data_size, uint64_t timestamp) { STREAMING_LOG(INFO) << "StreamingQueueProducer::PushQueueItem:" << " qid: " << channel_info.channel_id << " seq_id: " << seq_id << " data_size: " << data_size; Status status = queue_->Push(seq_id, data, data_size, timestamp, false); if (status.IsOutOfMemory()) { status = queue_->TryEvictItems(); if (!status.ok()) { STREAMING_LOG(INFO) << "Evict fail."; return status; } status = queue_->Push(seq_id, data, data_size, timestamp, false); } queue_->Send(); return status; } StreamingQueueConsumer::StreamingQueueConsumer(std::shared_ptr<Config> &transfer_config, ConsumerChannelInfo &c_channel_info) : ConsumerChannel(transfer_config, c_channel_info) { STREAMING_LOG(INFO) << "Consumer Init"; } StreamingQueueConsumer::~StreamingQueueConsumer() { STREAMING_LOG(INFO) << "Consumer Destroy"; } StreamingStatus StreamingQueueConsumer::CreateTransferChannel() { auto downstream_handler = ray::streaming::DownstreamQueueMessageHandler::GetService(); STREAMING_LOG(INFO) << "GetQueue qid: " << channel_info.channel_id << " start_seq_id: " << channel_info.current_seq_id + 1; if (downstream_handler->DownstreamQueueExists(channel_info.channel_id)) { RAY_LOG(INFO) << "StreamingQueueReader::GetQueue duplicate!!!"; return StreamingStatus::OK; } downstream_handler->SetPeerActorID(channel_info.channel_id, channel_info.actor_id); STREAMING_LOG(INFO) << "Create ReaderQueue " << channel_info.channel_id << " pull from start_seq_id: " << channel_info.current_seq_id + 1; queue_ = downstream_handler->CreateDownstreamQueue(channel_info.channel_id, channel_info.actor_id); return StreamingStatus::OK; } StreamingStatus StreamingQueueConsumer::DestroyTransferChannel() { return StreamingStatus::OK; } StreamingStatus StreamingQueueConsumer::ClearTransferCheckpoint( uint64_t checkpoint_id, uint64_t checkpoint_offset) { return StreamingStatus::OK; } StreamingStatus StreamingQueueConsumer::ConsumeItemFromChannel(uint64_t &offset_id, uint8_t *&data, uint32_t &data_size, uint32_t timeout) { STREAMING_LOG(INFO) << "GetQueueItem qid: " << channel_info.channel_id; STREAMING_CHECK(queue_ != nullptr); QueueItem item = queue_->PopPendingBlockTimeout(timeout * 1000); if (item.SeqId() == QUEUE_INVALID_SEQ_ID) { STREAMING_LOG(INFO) << "GetQueueItem timeout."; data = nullptr; data_size = 0; offset_id = QUEUE_INVALID_SEQ_ID; return StreamingStatus::OK; } data = item.Buffer()->Data(); offset_id = item.SeqId(); data_size = item.Buffer()->Size(); STREAMING_LOG(DEBUG) << "GetQueueItem qid: " << channel_info.channel_id << " seq_id: " << offset_id << " msg_id: " << item.MaxMsgId() << " data_size: " << data_size; return StreamingStatus::OK; } StreamingStatus StreamingQueueConsumer::NotifyChannelConsumed(uint64_t offset_id) { STREAMING_CHECK(queue_ != nullptr); queue_->OnConsumed(offset_id); return StreamingStatus::OK; } // For mock queue transfer struct MockQueueItem { uint64_t seq_id; uint32_t data_size; std::shared_ptr<uint8_t> data; }; class MockQueue { public: std::unordered_map<ObjectID, std::shared_ptr<AbstractRingBufferImpl<MockQueueItem>>> message_buffer_; std::unordered_map<ObjectID, std::shared_ptr<AbstractRingBufferImpl<MockQueueItem>>> consumed_buffer_; static std::mutex mutex; static MockQueue &GetMockQueue() { static MockQueue mock_queue; return mock_queue; } }; std::mutex MockQueue::mutex; StreamingStatus MockProducer::CreateTransferChannel() { std::unique_lock<std::mutex> lock(MockQueue::mutex); MockQueue &mock_queue = MockQueue::GetMockQueue(); mock_queue.message_buffer_[channel_info.channel_id] = std::make_shared<RingBufferImplThreadSafe<MockQueueItem>>(500); mock_queue.consumed_buffer_[channel_info.channel_id] = std::make_shared<RingBufferImplThreadSafe<MockQueueItem>>(500); return StreamingStatus::OK; } StreamingStatus MockProducer::DestroyTransferChannel() { std::unique_lock<std::mutex> lock(MockQueue::mutex); MockQueue &mock_queue = MockQueue::GetMockQueue(); mock_queue.message_buffer_.erase(channel_info.channel_id); mock_queue.consumed_buffer_.erase(channel_info.channel_id); return StreamingStatus::OK; } StreamingStatus MockProducer::ProduceItemToChannel(uint8_t *data, uint32_t data_size) { std::unique_lock<std::mutex> lock(MockQueue::mutex); MockQueue &mock_queue = MockQueue::GetMockQueue(); auto &ring_buffer = mock_queue.message_buffer_[channel_info.channel_id]; if (ring_buffer->Full()) { return StreamingStatus::OutOfMemory; } MockQueueItem item; item.seq_id = channel_info.current_seq_id + 1; item.data.reset(new uint8_t[data_size]); item.data_size = data_size; std::memcpy(item.data.get(), data, data_size); ring_buffer->Push(item); return StreamingStatus::OK; } StreamingStatus MockConsumer::ConsumeItemFromChannel(uint64_t &offset_id, uint8_t *&data, uint32_t &data_size, uint32_t timeout) { std::unique_lock<std::mutex> lock(MockQueue::mutex); MockQueue &mock_queue = MockQueue::GetMockQueue(); auto &channel_id = channel_info.channel_id; if (mock_queue.message_buffer_.find(channel_id) == mock_queue.message_buffer_.end()) { return StreamingStatus::NoSuchItem; } if (mock_queue.message_buffer_[channel_id]->Empty()) { return StreamingStatus::NoSuchItem; } MockQueueItem item = mock_queue.message_buffer_[channel_id]->Front(); mock_queue.message_buffer_[channel_id]->Pop(); mock_queue.consumed_buffer_[channel_id]->Push(item); offset_id = item.seq_id; data = item.data.get(); data_size = item.data_size; return StreamingStatus::OK; } StreamingStatus MockConsumer::NotifyChannelConsumed(uint64_t offset_id) { std::unique_lock<std::mutex> lock(MockQueue::mutex); MockQueue &mock_queue = MockQueue::GetMockQueue(); auto &channel_id = channel_info.channel_id; auto &ring_buffer = mock_queue.consumed_buffer_[channel_id]; while (!ring_buffer->Empty() && ring_buffer->Front().seq_id <= offset_id) { ring_buffer->Pop(); } return StreamingStatus::OK; } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/channel.h
C/C++ Header
#ifndef RAY_CHANNEL_H #define RAY_CHANNEL_H #include "config/streaming_config.h" #include "queue/queue_handler.h" #include "ring_buffer.h" #include "status.h" #include "util/streaming_util.h" namespace ray { namespace streaming { struct StreamingQueueInfo { uint64_t first_seq_id = 0; uint64_t last_seq_id = 0; uint64_t target_seq_id = 0; uint64_t consumed_seq_id = 0; }; /// PrducerChannelinfo and ConsumerChannelInfo contains channel information and /// its metrics that help us to debug or show important messages in logging. struct ProducerChannelInfo { ObjectID channel_id; StreamingRingBufferPtr writer_ring_buffer; uint64_t current_message_id; uint64_t current_seq_id; uint64_t message_last_commit_id; StreamingQueueInfo queue_info; uint32_t queue_size; int64_t message_pass_by_ts; ActorID actor_id; }; struct ConsumerChannelInfo { ObjectID channel_id; uint64_t current_message_id; uint64_t current_seq_id; uint64_t barrier_id; uint64_t partial_barrier_id; StreamingQueueInfo queue_info; uint64_t last_queue_item_delay; uint64_t last_queue_item_latency; uint64_t last_queue_target_diff; uint64_t get_queue_item_times; ActorID actor_id; }; /// Two types of channel are presented: /// * ProducerChannel is supporting all writing operations for upperlevel. /// * ConsumerChannel is for all reader operations. /// They share similar interfaces: /// * ClearTransferCheckpoint(it's empty and unsupported now, we will add /// implementation in next PR) /// * NotifychannelConsumed (notify owner of channel which range data should // be release to avoid out of memory) /// but some differences in read/write function.(named ProduceItemTochannel and /// ConsumeItemFrom channel) class ProducerChannel { public: explicit ProducerChannel(std::shared_ptr<Config> &transfer_config, ProducerChannelInfo &p_channel_info); virtual ~ProducerChannel() = default; virtual StreamingStatus CreateTransferChannel() = 0; virtual StreamingStatus DestroyTransferChannel() = 0; virtual StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) = 0; virtual StreamingStatus ProduceItemToChannel(uint8_t *data, uint32_t data_size) = 0; virtual StreamingStatus NotifyChannelConsumed(uint64_t channel_offset) = 0; protected: std::shared_ptr<Config> transfer_config_; ProducerChannelInfo &channel_info; }; class ConsumerChannel { public: explicit ConsumerChannel(std::shared_ptr<Config> &transfer_config, ConsumerChannelInfo &c_channel_info); virtual ~ConsumerChannel() = default; virtual StreamingStatus CreateTransferChannel() = 0; virtual StreamingStatus DestroyTransferChannel() = 0; virtual StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) = 0; virtual StreamingStatus ConsumeItemFromChannel(uint64_t &offset_id, uint8_t *&data, uint32_t &data_size, uint32_t timeout) = 0; virtual StreamingStatus NotifyChannelConsumed(uint64_t offset_id) = 0; protected: std::shared_ptr<Config> transfer_config_; ConsumerChannelInfo &channel_info; }; class StreamingQueueProducer : public ProducerChannel { public: explicit StreamingQueueProducer(std::shared_ptr<Config> &transfer_config, ProducerChannelInfo &p_channel_info); ~StreamingQueueProducer() override; StreamingStatus CreateTransferChannel() override; StreamingStatus DestroyTransferChannel() override; StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) override; StreamingStatus ProduceItemToChannel(uint8_t *data, uint32_t data_size) override; StreamingStatus NotifyChannelConsumed(uint64_t offset_id) override; private: StreamingStatus CreateQueue(); Status PushQueueItem(uint64_t seq_id, uint8_t *data, uint32_t data_size, uint64_t timestamp); private: std::shared_ptr<WriterQueue> queue_; }; class StreamingQueueConsumer : public ConsumerChannel { public: explicit StreamingQueueConsumer(std::shared_ptr<Config> &transfer_config, ConsumerChannelInfo &c_channel_info); ~StreamingQueueConsumer() override; StreamingStatus CreateTransferChannel() override; StreamingStatus DestroyTransferChannel() override; StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) override; StreamingStatus ConsumeItemFromChannel(uint64_t &offset_id, uint8_t *&data, uint32_t &data_size, uint32_t timeout) override; StreamingStatus NotifyChannelConsumed(uint64_t offset_id) override; private: std::shared_ptr<ReaderQueue> queue_; }; /// MockProducer and Mockconsumer are independent implementation of channels that /// conduct a very simple memory channel for unit tests or intergation test. class MockProducer : public ProducerChannel { public: explicit MockProducer(std::shared_ptr<Config> &transfer_config, ProducerChannelInfo &channel_info) : ProducerChannel(transfer_config, channel_info){}; StreamingStatus CreateTransferChannel() override; StreamingStatus DestroyTransferChannel() override; StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) override { return StreamingStatus::OK; } StreamingStatus ProduceItemToChannel(uint8_t *data, uint32_t data_size) override; StreamingStatus NotifyChannelConsumed(uint64_t channel_offset) override { return StreamingStatus::OK; } }; class MockConsumer : public ConsumerChannel { public: explicit MockConsumer(std::shared_ptr<Config> &transfer_config, ConsumerChannelInfo &c_channel_info) : ConsumerChannel(transfer_config, c_channel_info){}; StreamingStatus CreateTransferChannel() override { return StreamingStatus::OK; } StreamingStatus DestroyTransferChannel() override { return StreamingStatus::OK; } StreamingStatus ClearTransferCheckpoint(uint64_t checkpoint_id, uint64_t checkpoint_offset) override { return StreamingStatus::OK; } StreamingStatus ConsumeItemFromChannel(uint64_t &offset_id, uint8_t *&data, uint32_t &data_size, uint32_t timeout) override; StreamingStatus NotifyChannelConsumed(uint64_t offset_id) override; }; } // namespace streaming } // namespace ray #endif // RAY_CHANNEL_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/config/streaming_config.cc
C++
#include <unistd.h> #include "streaming_config.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { uint64_t StreamingConfig::TIME_WAIT_UINT = 1; uint32_t StreamingConfig::DEFAULT_RING_BUFFER_CAPACITY = 500; uint32_t StreamingConfig::DEFAULT_EMPTY_MESSAGE_TIME_INTERVAL = 20; // Time to force clean if barrier in queue, default 0ms const uint32_t StreamingConfig::MESSAGE_BUNDLE_MAX_SIZE = 2048; void StreamingConfig::FromProto(const uint8_t *data, uint32_t size) { proto::StreamingConfig config; STREAMING_CHECK(config.ParseFromArray(data, size)) << "Parse streaming conf failed"; if (!config.job_name().empty()) { SetJobName(config.job_name()); } if (!config.task_job_id().empty()) { STREAMING_CHECK(config.task_job_id().size() == 2 * JobID::Size()); SetTaskJobId(config.task_job_id()); } if (!config.worker_name().empty()) { SetWorkerName(config.worker_name()); } if (!config.op_name().empty()) { SetOpName(config.op_name()); } if (config.role() != proto::OperatorType::UNKNOWN) { SetOperatorType(config.role()); } if (config.ring_buffer_capacity() != 0) { SetRingBufferCapacity(config.ring_buffer_capacity()); } if (config.empty_message_interval() != 0) { SetEmptyMessageTimeInterval(config.empty_message_interval()); } } uint32_t StreamingConfig::GetRingBufferCapacity() const { return ring_buffer_capacity_; } void StreamingConfig::SetRingBufferCapacity(uint32_t ring_buffer_capacity) { StreamingConfig::ring_buffer_capacity_ = std::min(ring_buffer_capacity, StreamingConfig::MESSAGE_BUNDLE_MAX_SIZE); } uint32_t StreamingConfig::GetEmptyMessageTimeInterval() const { return empty_message_time_interval_; } void StreamingConfig::SetEmptyMessageTimeInterval(uint32_t empty_message_time_interval) { StreamingConfig::empty_message_time_interval_ = empty_message_time_interval; } streaming::proto::OperatorType StreamingConfig::GetOperatorType() const { return operator_type_; } void StreamingConfig::SetOperatorType(streaming::proto::OperatorType type) { StreamingConfig::operator_type_ = type; } const std::string &StreamingConfig::GetJobName() const { return job_name_; } void StreamingConfig::SetJobName(const std::string &job_name) { StreamingConfig::job_name_ = job_name; } const std::string &StreamingConfig::GetOpName() const { return op_name_; } void StreamingConfig::SetOpName(const std::string &op_name) { StreamingConfig::op_name_ = op_name; } const std::string &StreamingConfig::GetWorkerName() const { return worker_name_; } void StreamingConfig::SetWorkerName(const std::string &worker_name) { StreamingConfig::worker_name_ = worker_name; } const std::string &StreamingConfig::GetTaskJobId() const { return task_job_id_; } void StreamingConfig::SetTaskJobId(const std::string &task_job_id) { StreamingConfig::task_job_id_ = task_job_id; } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/config/streaming_config.h
C/C++ Header
#ifndef RAY_STREAMING_CONFIG_H #define RAY_STREAMING_CONFIG_H #include <cstdint> #include <string> #include "protobuf/streaming.pb.h" #include "ray/common/id.h" namespace ray { namespace streaming { class StreamingConfig { public: static uint64_t TIME_WAIT_UINT; static uint32_t DEFAULT_RING_BUFFER_CAPACITY; static uint32_t DEFAULT_EMPTY_MESSAGE_TIME_INTERVAL; static const uint32_t MESSAGE_BUNDLE_MAX_SIZE; private: uint32_t ring_buffer_capacity_ = DEFAULT_RING_BUFFER_CAPACITY; uint32_t empty_message_time_interval_ = DEFAULT_EMPTY_MESSAGE_TIME_INTERVAL; streaming::proto::OperatorType operator_type_ = streaming::proto::OperatorType::TRANSFORM; std::string job_name_ = "DEFAULT_JOB_NAME"; std::string op_name_ = "DEFAULT_OP_NAME"; std::string worker_name_ = "DEFAULT_WORKER_NAME"; std::string task_job_id_ = JobID::Nil().Hex(); public: void FromProto(const uint8_t *, uint32_t size); const std::string &GetTaskJobId() const; void SetTaskJobId(const std::string &task_job_id); const std::string &GetWorkerName() const; void SetWorkerName(const std::string &worker_name); const std::string &GetOpName() const; void SetOpName(const std::string &op_name); uint32_t GetEmptyMessageTimeInterval() const; void SetEmptyMessageTimeInterval(uint32_t empty_message_time_interval); uint32_t GetRingBufferCapacity() const; void SetRingBufferCapacity(uint32_t ring_buffer_capacity); streaming::proto::OperatorType GetOperatorType() const; void SetOperatorType(streaming::proto::OperatorType type); const std::string &GetJobName() const; void SetJobName(const std::string &job_name); }; } // namespace streaming } // namespace ray #endif // RAY_STREAMING_CONFIG_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/data_reader.cc
C++
#include <algorithm> #include <chrono> #include <cstdlib> #include <iostream> #include <memory> #include <thread> #include "ray/util/logging.h" #include "ray/util/util.h" #include "data_reader.h" #include "message/message_bundle.h" namespace ray { namespace streaming { const uint32_t DataReader::kReadItemTimeout = 1000; void DataReader::Init(const std::vector<ObjectID> &input_ids, const std::vector<ActorID> &actor_ids, const std::vector<uint64_t> &queue_seq_ids, const std::vector<uint64_t> &streaming_msg_ids, int64_t timer_interval) { Init(input_ids, actor_ids, timer_interval); for (size_t i = 0; i < input_ids.size(); ++i) { auto &q_id = input_ids[i]; channel_info_map_[q_id].current_seq_id = queue_seq_ids[i]; channel_info_map_[q_id].current_message_id = streaming_msg_ids[i]; } } void DataReader::Init(const std::vector<ObjectID> &input_ids, const std::vector<ActorID> &actor_ids, int64_t timer_interval) { STREAMING_LOG(INFO) << input_ids.size() << " queue to init."; transfer_config_->Set(ConfigEnum::QUEUE_ID_VECTOR, input_ids); last_fetched_queue_item_ = nullptr; timer_interval_ = timer_interval; last_message_ts_ = 0; input_queue_ids_ = input_ids; last_message_latency_ = 0; last_bundle_unit_ = 0; for (size_t i = 0; i < input_ids.size(); ++i) { ObjectID q_id = input_ids[i]; STREAMING_LOG(INFO) << "[Reader] Init queue id: " << q_id; auto &channel_info = channel_info_map_[q_id]; channel_info.channel_id = q_id; channel_info.actor_id = actor_ids[i]; channel_info.last_queue_item_delay = 0; channel_info.last_queue_item_latency = 0; channel_info.last_queue_target_diff = 0; channel_info.get_queue_item_times = 0; } /// Make the input id location stable. sort(input_queue_ids_.begin(), input_queue_ids_.end(), [](const ObjectID &a, const ObjectID &b) { return a.Hash() < b.Hash(); }); std::copy(input_ids.begin(), input_ids.end(), std::back_inserter(unready_queue_ids_)); InitChannel(); } StreamingStatus DataReader::InitChannel() { STREAMING_LOG(INFO) << "[Reader] Getting queues. total queue num " << input_queue_ids_.size() << ", unready queue num => " << unready_queue_ids_.size(); for (const auto &input_channel : unready_queue_ids_) { auto &channel_info = channel_info_map_[input_channel]; std::shared_ptr<ConsumerChannel> channel; if (runtime_context_->IsMockTest()) { channel = std::make_shared<MockConsumer>(transfer_config_, channel_info); } else { channel = std::make_shared<StreamingQueueConsumer>(transfer_config_, channel_info); } channel_map_.emplace(input_channel, channel); StreamingStatus status = channel->CreateTransferChannel(); if (StreamingStatus::OK != status) { STREAMING_LOG(ERROR) << "Initialize queue failed, id => " << input_channel; } } runtime_context_->SetRuntimeStatus(RuntimeStatus::Running); STREAMING_LOG(INFO) << "[Reader] Reader construction done!"; return StreamingStatus::OK; } StreamingStatus DataReader::InitChannelMerger() { STREAMING_LOG(INFO) << "[Reader] Initializing queue merger."; // Init reader merger by given comparator when it's first created. StreamingReaderMsgPtrComparator comparator; if (!reader_merger_) { reader_merger_.reset( new PriorityQueue<std::shared_ptr<DataBundle>, StreamingReaderMsgPtrComparator>( comparator)); } // An old item in merger vector must be evicted before new queue item has been // pushed. if (!unready_queue_ids_.empty() && last_fetched_queue_item_) { STREAMING_LOG(INFO) << "pop old item from => " << last_fetched_queue_item_->from; RETURN_IF_NOT_OK(StashNextMessage(last_fetched_queue_item_)) last_fetched_queue_item_.reset(); } // Create initial heap for priority queue. for (auto &input_queue : unready_queue_ids_) { std::shared_ptr<DataBundle> msg = std::make_shared<DataBundle>(); RETURN_IF_NOT_OK(GetMessageFromChannel(channel_info_map_[input_queue], msg)) channel_info_map_[msg->from].current_seq_id = msg->seq_id; channel_info_map_[msg->from].current_message_id = msg->meta->GetLastMessageId(); reader_merger_->push(msg); } STREAMING_LOG(INFO) << "[Reader] Initializing merger done."; return StreamingStatus::OK; } StreamingStatus DataReader::GetMessageFromChannel(ConsumerChannelInfo &channel_info, std::shared_ptr<DataBundle> &message) { auto &qid = channel_info.channel_id; last_read_q_id_ = qid; STREAMING_LOG(DEBUG) << "[Reader] send get request queue seq id => " << qid; while (RuntimeStatus::Running == runtime_context_->GetRuntimeStatus() && !message->data) { auto status = channel_map_[channel_info.channel_id]->ConsumeItemFromChannel( message->seq_id, message->data, message->data_size, kReadItemTimeout); channel_info.get_queue_item_times++; if (!message->data) { STREAMING_LOG(DEBUG) << "[Reader] Queue " << qid << " status " << status << " get item timeout, resend notify " << channel_info.current_seq_id; // TODO(lingxuan.zlx): notify consumed when it's timeout. } } if (RuntimeStatus::Interrupted == runtime_context_->GetRuntimeStatus()) { return StreamingStatus::Interrupted; } STREAMING_LOG(DEBUG) << "[Reader] recevied queue seq id => " << message->seq_id << ", queue id => " << qid; message->from = qid; message->meta = StreamingMessageBundleMeta::FromBytes(message->data); return StreamingStatus::OK; } StreamingStatus DataReader::StashNextMessage(std::shared_ptr<DataBundle> &message) { // Push new message into priority queue and record the channel metrics in // channel info. std::shared_ptr<DataBundle> new_msg = std::make_shared<DataBundle>(); auto &channel_info = channel_info_map_[message->from]; reader_merger_->pop(); int64_t cur_time = current_time_ms(); RETURN_IF_NOT_OK(GetMessageFromChannel(channel_info, new_msg)) reader_merger_->push(new_msg); channel_info.last_queue_item_delay = new_msg->meta->GetMessageBundleTs() - message->meta->GetMessageBundleTs(); channel_info.last_queue_item_latency = current_time_ms() - cur_time; return StreamingStatus::OK; } StreamingStatus DataReader::GetMergedMessageBundle(std::shared_ptr<DataBundle> &message, bool &is_valid_break) { int64_t cur_time = current_time_ms(); if (last_fetched_queue_item_) { RETURN_IF_NOT_OK(StashNextMessage(last_fetched_queue_item_)) } message = reader_merger_->top(); last_fetched_queue_item_ = message; auto &offset_info = channel_info_map_[message->from]; uint64_t cur_queue_previous_msg_id = offset_info.current_message_id; STREAMING_LOG(DEBUG) << "[Reader] [Bundle] from q_id =>" << message->from << "cur => " << cur_queue_previous_msg_id << ", message list size" << message->meta->GetMessageListSize() << ", lst message id =>" << message->meta->GetLastMessageId() << ", q seq id => " << message->seq_id << ", last barrier id => " << message->data_size << ", " << message->meta->GetMessageBundleTs(); if (message->meta->IsBundle()) { last_message_ts_ = cur_time; is_valid_break = true; } else if (timer_interval_ != -1 && cur_time - last_message_ts_ > timer_interval_) { // Throw empty message when reaching timer_interval. last_message_ts_ = cur_time; is_valid_break = true; } offset_info.current_message_id = message->meta->GetLastMessageId(); offset_info.current_seq_id = message->seq_id; last_bundle_ts_ = message->meta->GetMessageBundleTs(); STREAMING_LOG(DEBUG) << "[Reader] [Bundle] message type =>" << static_cast<int>(message->meta->GetBundleType()) << " from id => " << message->from << ", queue seq id =>" << message->seq_id << ", message id => " << message->meta->GetLastMessageId(); return StreamingStatus::OK; } StreamingStatus DataReader::GetBundle(const uint32_t timeout_ms, std::shared_ptr<DataBundle> &message) { // Notify consumed every item in this mode. if (last_fetched_queue_item_) { NotifyConsumedItem(channel_info_map_[last_fetched_queue_item_->from], last_fetched_queue_item_->seq_id); } /// DataBundle will be returned to the upper layer in the following cases: /// a batch of data is returned when the real data is read, or an empty message /// is returned to the upper layer when the given timeout period is reached to /// avoid blocking for too long. auto start_time = current_time_ms(); bool is_valid_break = false; uint32_t empty_bundle_cnt = 0; while (!is_valid_break) { if (RuntimeStatus::Interrupted == runtime_context_->GetRuntimeStatus()) { return StreamingStatus::Interrupted; } auto cur_time = current_time_ms(); auto dur = cur_time - start_time; if (dur > timeout_ms) { return StreamingStatus::GetBundleTimeOut; } if (!unready_queue_ids_.empty()) { StreamingStatus status = InitChannel(); switch (status) { case StreamingStatus::InitQueueFailed: break; case StreamingStatus::WaitQueueTimeOut: STREAMING_LOG(ERROR) << "Wait upstream queue timeout, maybe some actors in deadlock"; break; default: STREAMING_LOG(INFO) << "Init reader queue in GetBundle"; } if (StreamingStatus::OK != status) { return status; } RETURN_IF_NOT_OK(InitChannelMerger()) unready_queue_ids_.clear(); auto &merge_vec = reader_merger_->getRawVector(); for (auto &bundle : merge_vec) { STREAMING_LOG(INFO) << "merger vector item => " << bundle->from; } } RETURN_IF_NOT_OK(GetMergedMessageBundle(message, is_valid_break)); if (!is_valid_break) { empty_bundle_cnt++; NotifyConsumedItem(channel_info_map_[message->from], message->seq_id); } } last_message_latency_ += current_time_ms() - start_time; if (message->meta->GetMessageListSize() > 0) { last_bundle_unit_ = message->data_size * 1.0 / message->meta->GetMessageListSize(); } return StreamingStatus::OK; } void DataReader::GetOffsetInfo( std::unordered_map<ObjectID, ConsumerChannelInfo> *&offset_map) { offset_map = &channel_info_map_; for (auto &offset_info : channel_info_map_) { STREAMING_LOG(INFO) << "[Reader] [GetOffsetInfo], q id " << offset_info.first << ", seq id => " << offset_info.second.current_seq_id << ", message id => " << offset_info.second.current_message_id; } } void DataReader::NotifyConsumedItem(ConsumerChannelInfo &channel_info, uint64_t offset) { channel_map_[channel_info.channel_id]->NotifyChannelConsumed(offset); if (offset == channel_info.queue_info.last_seq_id) { STREAMING_LOG(DEBUG) << "notify seq id equal to last seq id => " << offset; } } DataReader::DataReader(std::shared_ptr<RuntimeContext> &runtime_context) : transfer_config_(new Config()), runtime_context_(runtime_context) {} DataReader::~DataReader() { STREAMING_LOG(INFO) << "Streaming reader deconstruct."; } void DataReader::Stop() { runtime_context_->SetRuntimeStatus(RuntimeStatus::Interrupted); } bool StreamingReaderMsgPtrComparator::operator()(const std::shared_ptr<DataBundle> &a, const std::shared_ptr<DataBundle> &b) { STREAMING_CHECK(a->meta); // We use hash value of id for stability of message in sorting. if (a->meta->GetMessageBundleTs() == b->meta->GetMessageBundleTs()) { return a->from.Hash() > b->from.Hash(); } return a->meta->GetMessageBundleTs() > b->meta->GetMessageBundleTs(); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/data_reader.h
C/C++ Header
#ifndef RAY_DATA_READER_H #define RAY_DATA_READER_H #include <cstdlib> #include <functional> #include <queue> #include <string> #include <unordered_map> #include <vector> #include "channel.h" #include "message/message_bundle.h" #include "message/priority_queue.h" #include "runtime_context.h" namespace ray { namespace streaming { /// Databundle is super-bundle that contains channel information (upstream /// channel id & bundle meta data) and raw buffer pointer. struct DataBundle { uint8_t *data = nullptr; uint32_t data_size; ObjectID from; uint64_t seq_id; StreamingMessageBundleMetaPtr meta; }; /// This is implementation of merger policy in StreamingReaderMsgPtrComparator. struct StreamingReaderMsgPtrComparator { StreamingReaderMsgPtrComparator() = default; bool operator()(const std::shared_ptr<DataBundle> &a, const std::shared_ptr<DataBundle> &b); }; /// DataReader will fetch data bundles from channels of upstream workers, once /// invoked by user thread. Firstly put them into a priority queue ordered by bundle /// comparator that's related meta-data, then pop out the top bunlde to user /// thread every time, so that the order of the message can be guranteed, which /// will also facilitate our future implementation of fault tolerance. Finally /// user thread can extract messages from the bundle and process one by one. class DataReader { private: std::vector<ObjectID> input_queue_ids_; std::vector<ObjectID> unready_queue_ids_; std::unique_ptr< PriorityQueue<std::shared_ptr<DataBundle>, StreamingReaderMsgPtrComparator>> reader_merger_; std::shared_ptr<DataBundle> last_fetched_queue_item_; int64_t timer_interval_; int64_t last_bundle_ts_; int64_t last_message_ts_; int64_t last_message_latency_; int64_t last_bundle_unit_; ObjectID last_read_q_id_; static const uint32_t kReadItemTimeout; protected: std::unordered_map<ObjectID, ConsumerChannelInfo> channel_info_map_; std::unordered_map<ObjectID, std::shared_ptr<ConsumerChannel>> channel_map_; std::shared_ptr<Config> transfer_config_; std::shared_ptr<RuntimeContext> runtime_context_; public: explicit DataReader(std::shared_ptr<RuntimeContext> &runtime_context); virtual ~DataReader(); /// During initialization, only the channel parameters and necessary member properties /// are assigned. All channels will be connected in the first reading operation. /// \param input_ids /// \param actor_ids /// \param channel_seq_ids /// \param msg_ids /// \param timer_interval void Init(const std::vector<ObjectID> &input_ids, const std::vector<ActorID> &actor_ids, const std::vector<uint64_t> &channel_seq_ids, const std::vector<uint64_t> &msg_ids, int64_t timer_interval); void Init(const std::vector<ObjectID> &input_ids, const std::vector<ActorID> &actor_ids, int64_t timer_interval); /// Get latest message from input queues. /// \param timeout_ms /// \param message, return the latest message StreamingStatus GetBundle(uint32_t timeout_ms, std::shared_ptr<DataBundle> &message); /// Get offset information about channels for checkpoint. /// \param offset_map (return value) void GetOffsetInfo(std::unordered_map<ObjectID, ConsumerChannelInfo> *&offset_map); void Stop(); /// Notify input queues to clear data whose seq id is equal or less than offset. /// It's used when checkpoint is done. /// \param channel_info /// \param offset /// void NotifyConsumedItem(ConsumerChannelInfo &channel_info, uint64_t offset); private: /// Create channels and connect to all upstream. StreamingStatus InitChannel(); /// One item from every channel will be popped out, then collecting /// them to a merged queue. High prioprity items will be fetched one by one. /// When item pop from one channel where must produce new item for placeholder /// in merged queue. StreamingStatus InitChannelMerger(); StreamingStatus StashNextMessage(std::shared_ptr<DataBundle> &message); StreamingStatus GetMessageFromChannel(ConsumerChannelInfo &channel_info, std::shared_ptr<DataBundle> &message); /// Get top item from prioprity queue. StreamingStatus GetMergedMessageBundle(std::shared_ptr<DataBundle> &message, bool &is_valid_break); }; } // namespace streaming } // namespace ray #endif // RAY_DATA_READER_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/data_writer.cc
C++
#include <memory> #include <memory> #include <signal.h> #include <unistd.h> #include <chrono> #include <functional> #include <list> #include <numeric> #include "data_writer.h" #include "util/streaming_util.h" namespace ray { namespace streaming { void DataWriter::WriterLoopForward() { STREAMING_CHECK(RuntimeStatus::Running == runtime_context_->GetRuntimeStatus()); while (true) { int64_t min_passby_message_ts = std::numeric_limits<int64_t>::max(); uint32_t empty_messge_send_count = 0; for (auto &output_queue : output_queue_ids_) { if (RuntimeStatus::Running != runtime_context_->GetRuntimeStatus()) { return; } ProducerChannelInfo &channel_info = channel_info_map_[output_queue]; bool is_push_empty_message = false; StreamingStatus write_status = WriteChannelProcess(channel_info, &is_push_empty_message); int64_t current_ts = current_time_ms(); if (StreamingStatus::OK == write_status) { channel_info.message_pass_by_ts = current_ts; if (is_push_empty_message) { min_passby_message_ts = std::min(channel_info.message_pass_by_ts, min_passby_message_ts); empty_messge_send_count++; } } else if (StreamingStatus::FullChannel == write_status) { } else { if (StreamingStatus::EmptyRingBuffer != write_status) { STREAMING_LOG(DEBUG) << "write buffer status => " << static_cast<uint32_t>(write_status) << ", is push empty message => " << is_push_empty_message; } } } if (empty_messge_send_count == output_queue_ids_.size()) { // Sleep if empty message was sent in all channel. uint64_t sleep_time_ = current_time_ms() - min_passby_message_ts; // Sleep_time can be bigger than time interval because of network jitter. if (sleep_time_ <= runtime_context_->GetConfig().GetEmptyMessageTimeInterval()) { std::this_thread::sleep_for(std::chrono::milliseconds( runtime_context_->GetConfig().GetEmptyMessageTimeInterval() - sleep_time_)); } } } } StreamingStatus DataWriter::WriteChannelProcess(ProducerChannelInfo &channel_info, bool *is_empty_message) { // No message in buffer, empty message will be sent to downstream queue. uint64_t buffer_remain = 0; StreamingStatus write_queue_flag = WriteBufferToChannel(channel_info, buffer_remain); int64_t current_ts = current_time_ms(); if (write_queue_flag == StreamingStatus::EmptyRingBuffer && current_ts - channel_info.message_pass_by_ts >= runtime_context_->GetConfig().GetEmptyMessageTimeInterval()) { write_queue_flag = WriteEmptyMessage(channel_info); *is_empty_message = true; STREAMING_LOG(DEBUG) << "send empty message bundle in q_id =>" << channel_info.channel_id; } return write_queue_flag; } StreamingStatus DataWriter::WriteBufferToChannel(ProducerChannelInfo &channel_info, uint64_t &buffer_remain) { StreamingRingBufferPtr &buffer_ptr = channel_info.writer_ring_buffer; if (!IsMessageAvailableInBuffer(channel_info)) { return StreamingStatus::EmptyRingBuffer; } // Flush transient buffer to queue first. if (buffer_ptr->IsTransientAvaliable()) { return WriteTransientBufferToChannel(channel_info); } STREAMING_CHECK(CollectFromRingBuffer(channel_info, buffer_remain)) << "empty data in ringbuffer, q id => " << channel_info.channel_id; return WriteTransientBufferToChannel(channel_info); } void DataWriter::Run() { STREAMING_LOG(INFO) << "WriterLoopForward start"; loop_thread_ = std::make_shared<std::thread>(&DataWriter::WriterLoopForward, this); } /// Since every memory ring buffer's size is limited, when the writing buffer is /// full, the user thread will be blocked, which will cause backpressure /// naturally. uint64_t DataWriter::WriteMessageToBufferRing(const ObjectID &q_id, uint8_t *data, uint32_t data_size, StreamingMessageType message_type) { STREAMING_LOG(DEBUG) << "WriteMessageToBufferRing q_id: " << q_id << " data_size: " << data_size; // TODO(lingxuan.zlx): currently, unsafe in multithreads ProducerChannelInfo &channel_info = channel_info_map_[q_id]; // Write message id stands for current lastest message id and differs from // channel.current_message_id if it's barrier message. uint64_t &write_message_id = channel_info.current_message_id; write_message_id++; auto &ring_buffer_ptr = channel_info.writer_ring_buffer; while (ring_buffer_ptr->IsFull() && runtime_context_->GetRuntimeStatus() == RuntimeStatus::Running) { std::this_thread::sleep_for( std::chrono::milliseconds(StreamingConfig::TIME_WAIT_UINT)); } if (runtime_context_->GetRuntimeStatus() != RuntimeStatus::Running) { STREAMING_LOG(WARNING) << "stop in write message to ringbuffer"; return 0; } ring_buffer_ptr->Push(std::make_shared<StreamingMessage>( data, data_size, write_message_id, message_type)); return write_message_id; } StreamingStatus DataWriter::InitChannel(const ObjectID &q_id, const ActorID &actor_id, uint64_t channel_message_id, uint64_t queue_size) { ProducerChannelInfo &channel_info = channel_info_map_[q_id]; channel_info.current_message_id = channel_message_id; channel_info.channel_id = q_id; channel_info.actor_id = actor_id; channel_info.queue_size = queue_size; STREAMING_LOG(WARNING) << " Init queue [" << q_id << "]"; channel_info.writer_ring_buffer = std::make_shared<StreamingRingBuffer>( runtime_context_->GetConfig().GetRingBufferCapacity(), StreamingRingBufferType::SPSC); channel_info.message_pass_by_ts = current_time_ms(); std::shared_ptr<ProducerChannel> channel; if (runtime_context_->IsMockTest()) { channel = std::make_shared<MockProducer>(transfer_config_, channel_info); } else { channel = std::make_shared<StreamingQueueProducer>(transfer_config_, channel_info); } channel_map_.emplace(q_id, channel); RETURN_IF_NOT_OK(channel->CreateTransferChannel()) return StreamingStatus::OK; } StreamingStatus DataWriter::Init(const std::vector<ObjectID> &queue_id_vec, const std::vector<ActorID> &actor_ids, const std::vector<uint64_t> &channel_message_id_vec, const std::vector<uint64_t> &queue_size_vec) { STREAMING_CHECK(!queue_id_vec.empty() && !channel_message_id_vec.empty()); ray::JobID job_id = JobID::FromBinary(Util::Hexqid2str(runtime_context_->GetConfig().GetTaskJobId())); STREAMING_LOG(INFO) << "Job name => " << runtime_context_->GetConfig().GetJobName() << ", job id => " << job_id; output_queue_ids_ = queue_id_vec; transfer_config_->Set(ConfigEnum::QUEUE_ID_VECTOR, queue_id_vec); for (size_t i = 0; i < queue_id_vec.size(); ++i) { StreamingStatus status = InitChannel(queue_id_vec[i], actor_ids[i], channel_message_id_vec[i], queue_size_vec[i]); if (status != StreamingStatus::OK) { return status; } } runtime_context_->SetRuntimeStatus(RuntimeStatus::Running); return StreamingStatus::OK; } DataWriter::DataWriter(std::shared_ptr<RuntimeContext> &runtime_context) : transfer_config_(new Config()), runtime_context_(runtime_context) {} DataWriter::~DataWriter() { // Return if fail to init streaming writer if (runtime_context_->GetRuntimeStatus() == RuntimeStatus::Init) { return; } runtime_context_->SetRuntimeStatus(RuntimeStatus::Interrupted); if (loop_thread_->joinable()) { STREAMING_LOG(INFO) << "Writer loop thread waiting for join"; loop_thread_->join(); } STREAMING_LOG(INFO) << "Writer client queue disconnect."; } bool DataWriter::IsMessageAvailableInBuffer(ProducerChannelInfo &channel_info) { return channel_info.writer_ring_buffer->IsTransientAvaliable() || !channel_info.writer_ring_buffer->IsEmpty(); } StreamingStatus DataWriter::WriteEmptyMessage(ProducerChannelInfo &channel_info) { auto &q_id = channel_info.channel_id; if (channel_info.message_last_commit_id < channel_info.current_message_id) { // Abort to send empty message if ring buffer is not empty now. STREAMING_LOG(DEBUG) << "q_id =>" << q_id << " abort to send empty, last commit id =>" << channel_info.message_last_commit_id << ", channel max id => " << channel_info.current_message_id; return StreamingStatus::SkipSendEmptyMessage; } // Make an empty bundle, use old ts from reloaded meta if it's not nullptr. StreamingMessageBundlePtr bundle_ptr = std::make_shared<StreamingMessageBundle>( channel_info.current_message_id, current_time_ms()); auto &q_ringbuffer = channel_info.writer_ring_buffer; q_ringbuffer->ReallocTransientBuffer(bundle_ptr->ClassBytesSize()); bundle_ptr->ToBytes(q_ringbuffer->GetTransientBufferMutable()); StreamingStatus status = channel_map_[q_id]->ProduceItemToChannel( const_cast<uint8_t *>(q_ringbuffer->GetTransientBuffer()), q_ringbuffer->GetTransientBufferSize()); STREAMING_LOG(DEBUG) << "q_id =>" << q_id << " send empty message, meta info =>" << bundle_ptr->ToString(); q_ringbuffer->FreeTransientBuffer(); RETURN_IF_NOT_OK(status) channel_info.current_seq_id++; channel_info.message_pass_by_ts = current_time_ms(); return StreamingStatus::OK; } StreamingStatus DataWriter::WriteTransientBufferToChannel( ProducerChannelInfo &channel_info) { StreamingRingBufferPtr &buffer_ptr = channel_info.writer_ring_buffer; StreamingStatus status = channel_map_[channel_info.channel_id]->ProduceItemToChannel( buffer_ptr->GetTransientBufferMutable(), buffer_ptr->GetTransientBufferSize()); RETURN_IF_NOT_OK(status) channel_info.current_seq_id++; auto transient_bundle_meta = StreamingMessageBundleMeta::FromBytes(buffer_ptr->GetTransientBuffer()); bool is_barrier_bundle = transient_bundle_meta->IsBarrier(); // Force delete to avoid super block memory isn't released so long // if it's barrier bundle. buffer_ptr->FreeTransientBuffer(is_barrier_bundle); channel_info.message_last_commit_id = transient_bundle_meta->GetLastMessageId(); return StreamingStatus::OK; } bool DataWriter::CollectFromRingBuffer(ProducerChannelInfo &channel_info, uint64_t &buffer_remain) { StreamingRingBufferPtr &buffer_ptr = channel_info.writer_ring_buffer; auto &q_id = channel_info.channel_id; std::list<StreamingMessagePtr> message_list; uint64_t bundle_buffer_size = 0; const uint32_t max_queue_item_size = channel_info.queue_size; while (message_list.size() < runtime_context_->GetConfig().GetRingBufferCapacity() && !buffer_ptr->IsEmpty()) { StreamingMessagePtr &message_ptr = buffer_ptr->Front(); uint32_t message_total_size = message_ptr->ClassBytesSize(); if (!message_list.empty() && bundle_buffer_size + message_total_size >= max_queue_item_size) { STREAMING_LOG(DEBUG) << "message total size " << message_total_size << " max queue item size => " << max_queue_item_size; break; } if (!message_list.empty() && message_list.back()->GetMessageType() != message_ptr->GetMessageType()) { break; } // ClassBytesSize = DataSize + MetaDataSize // bundle_buffer_size += message_ptr->GetDataSize(); bundle_buffer_size += message_total_size; message_list.push_back(message_ptr); buffer_ptr->Pop(); buffer_remain = buffer_ptr->Size(); } if (bundle_buffer_size >= channel_info.queue_size) { STREAMING_LOG(ERROR) << "bundle buffer is too large to store q id => " << q_id << ", bundle size => " << bundle_buffer_size << ", queue size => " << channel_info.queue_size; } StreamingMessageBundlePtr bundle_ptr; bundle_ptr = std::make_shared<StreamingMessageBundle>( std::move(message_list), current_time_ms(), message_list.back()->GetMessageSeqId(), StreamingMessageBundleType::Bundle, bundle_buffer_size); buffer_ptr->ReallocTransientBuffer(bundle_ptr->ClassBytesSize()); bundle_ptr->ToBytes(buffer_ptr->GetTransientBufferMutable()); STREAMING_CHECK(bundle_ptr->ClassBytesSize() == buffer_ptr->GetTransientBufferSize()); return true; } void DataWriter::Stop() { for (auto &output_queue : output_queue_ids_) { ProducerChannelInfo &channel_info = channel_info_map_[output_queue]; while (!channel_info.writer_ring_buffer->IsEmpty()) { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } } std::this_thread::sleep_for(std::chrono::milliseconds(200)); runtime_context_->SetRuntimeStatus(RuntimeStatus::Interrupted); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/data_writer.h
C/C++ Header
#ifndef RAY_DATA_WRITER_H #define RAY_DATA_WRITER_H #include <cstring> #include <mutex> #include <string> #include <thread> #include <vector> #include "channel.h" #include "config/streaming_config.h" #include "message/message_bundle.h" #include "runtime_context.h" namespace ray { namespace streaming { /// DataWriter is designed for data transporting between upstream and downstream. /// After the user sends the data, it does not immediately send the data to /// downstream, but caches it in the corresponding memory ring buffer. There is /// a spearate transfer thread (setup in WriterLoopForward function) to collect /// the messages from all the ringbuffers, and write them to the corresponding /// transmission channels, which is backed by StreamingQueue. Actually, the /// advantage is that the user thread will not be affected by the transmission /// speed during the data transfer. And also the transfer thread can automatically /// batch the catched data from memory buffer into a data bundle to reduce /// transmission overhead. In addtion, when there is no data in the ringbuffer, /// it will also send an empty bundle, so downstream can know that and process /// accordingly. It will sleep for a short interval to save cpu if all ring /// buffers have no data in that moment. class DataWriter { private: std::shared_ptr<std::thread> loop_thread_; // One channel have unique identity. std::vector<ObjectID> output_queue_ids_; protected: // ProducerTransfer is middle broker for data transporting. std::unordered_map<ObjectID, ProducerChannelInfo> channel_info_map_; std::unordered_map<ObjectID, std::shared_ptr<ProducerChannel>> channel_map_; std::shared_ptr<Config> transfer_config_; std::shared_ptr<RuntimeContext> runtime_context_; private: bool IsMessageAvailableInBuffer(ProducerChannelInfo &channel_info); /// This function handles two scenarios. When there is data in the transient /// buffer, the existing data is written into the channel first, otherwise a /// certain amount of message is first collected from the buffer and serialized /// into the transient buffer, and finally written to the channel. /// \\param channel_info /// \\param buffer_remain StreamingStatus WriteBufferToChannel(ProducerChannelInfo &channel_info, uint64_t &buffer_remain); /// Start the loop forward thread for collecting messages from all channels. /// Invoking stack: /// WriterLoopForward /// -- WriteChannelProcess /// -- WriteBufferToChannel /// -- CollectFromRingBuffer /// -- WriteTransientBufferToChannel /// -- WriteEmptyMessage(if WriteChannelProcess return empty state) void WriterLoopForward(); /// Push empty message when no valid message or bundle was produced each time /// interval. /// \param channel_info StreamingStatus WriteEmptyMessage(ProducerChannelInfo &channel_info); /// Flush all data from transient buffer to channel for transporting. /// \param channel_info StreamingStatus WriteTransientBufferToChannel(ProducerChannelInfo &channel_info); bool CollectFromRingBuffer(ProducerChannelInfo &channel_info, uint64_t &buffer_remain); StreamingStatus WriteChannelProcess(ProducerChannelInfo &channel_info, bool *is_empty_message); StreamingStatus InitChannel(const ObjectID &q_id, const ActorID &actor_id, uint64_t channel_message_id, uint64_t queue_size); public: explicit DataWriter(std::shared_ptr<RuntimeContext> &runtime_context); virtual ~DataWriter(); /// Streaming writer client initialization. /// \param queue_id_vec queue id vector /// \param channel_message_id_vec channel seq id is related with message checkpoint /// \param queue_size queue size (memory size not length) StreamingStatus Init(const std::vector<ObjectID> &channel_ids, const std::vector<ActorID> &actor_ids, const std::vector<uint64_t> &channel_message_id_vec, const std::vector<uint64_t> &queue_size_vec); /// To increase throughout, we employed an output buffer for message transformation, /// which means we merge a lot of message to a message bundle and no message will be /// pushed into queue directly util daemon thread does this action. /// Additionally, writing will block when buffer ring is full intentionly. /// \param q_id /// \param data /// \param data_size /// \param message_type /// \return message seq iq uint64_t WriteMessageToBufferRing( const ObjectID &q_id, uint8_t *data, uint32_t data_size, StreamingMessageType message_type = StreamingMessageType::Message); void Run(); void Stop(); }; } // namespace streaming } // namespace ray #endif // RAY_DATA_WRITER_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_ChannelID.cc
C++
#include "org_ray_streaming_runtime_transfer_ChannelID.h" #include "streaming_jni_common.h" using namespace ray::streaming; JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_ChannelID_createNativeID( JNIEnv *env, jclass cls, jlong qid_address) { auto id = ray::ObjectID::FromBinary( std::string(reinterpret_cast<const char *>(qid_address), ray::ObjectID::Size())); return reinterpret_cast<jlong>(new ray::ObjectID(id)); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_ChannelID_destroyNativeID( JNIEnv *env, jclass cls, jlong native_id_ptr) { auto id = reinterpret_cast<ray::ObjectID *>(native_id_ptr); STREAMING_CHECK(id != nullptr); delete id; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_ChannelID.h
C/C++ Header
/* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class org_ray_streaming_runtime_transfer_ChannelID */ #ifndef _Included_org_ray_streaming_runtime_transfer_ChannelID #define _Included_org_ray_streaming_runtime_transfer_ChannelID #ifdef __cplusplus extern "C" { #endif #undef org_ray_streaming_runtime_transfer_ChannelID_ID_LENGTH #define org_ray_streaming_runtime_transfer_ChannelID_ID_LENGTH 20L /* * Class: org_ray_streaming_runtime_transfer_ChannelID * Method: createNativeID * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_ChannelID_createNativeID (JNIEnv *, jclass, jlong); /* * Class: org_ray_streaming_runtime_transfer_ChannelID * Method: destroyNativeID * Signature: (J)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_ChannelID_destroyNativeID (JNIEnv *, jclass, jlong); #ifdef __cplusplus } #endif #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_DataReader.cc
C++
#include "org_ray_streaming_runtime_transfer_DataReader.h" #include <cstdlib> #include "data_reader.h" #include "runtime_context.h" #include "streaming_jni_common.h" using namespace ray; using namespace ray::streaming; JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_createDataReaderNative( JNIEnv *env, jclass, jobjectArray input_channels, jobjectArray input_actor_ids, jlongArray seq_id_array, jlongArray msg_id_array, jlong timer_interval, jboolean isRecreate, jbyteArray config_bytes, jboolean is_mock) { STREAMING_LOG(INFO) << "[JNI]: create DataReader."; std::vector<ray::ObjectID> input_channels_ids = jarray_to_object_id_vec(env, input_channels); std::vector<ray::ActorID> actor_ids = jarray_to_actor_id_vec(env, input_actor_ids); std::vector<uint64_t> seq_ids = LongVectorFromJLongArray(env, seq_id_array).data; std::vector<uint64_t> msg_ids = LongVectorFromJLongArray(env, msg_id_array).data; auto ctx = std::make_shared<RuntimeContext>(); RawDataFromJByteArray conf(env, config_bytes); if (conf.data_size > 0) { STREAMING_LOG(INFO) << "load config, config bytes size: " << conf.data_size; ctx->SetConfig(conf.data, conf.data_size); } if (is_mock) { ctx->MarkMockTest(); } auto reader = new DataReader(ctx); reader->Init(input_channels_ids, actor_ids, seq_ids, msg_ids, timer_interval); STREAMING_LOG(INFO) << "create native DataReader succeed"; return reinterpret_cast<jlong>(reader); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_getBundleNative( JNIEnv *env, jobject, jlong reader_ptr, jlong timeout_millis, jlong out, jlong meta_addr) { std::shared_ptr<ray::streaming::DataBundle> bundle; auto reader = reinterpret_cast<ray::streaming::DataReader *>(reader_ptr); auto status = reader->GetBundle((uint32_t)timeout_millis, bundle); // over timeout, return empty array. if (StreamingStatus::Interrupted == status) { throwChannelInterruptException(env, "reader interrupted."); } else if (StreamingStatus::GetBundleTimeOut == status) { } else if (StreamingStatus::InitQueueFailed == status) { throwRuntimeException(env, "init channel failed"); } else if (StreamingStatus::WaitQueueTimeOut == status) { throwRuntimeException(env, "wait channel object timeout"); } if (StreamingStatus::OK != status) { *reinterpret_cast<uint64_t *>(out) = 0; *reinterpret_cast<uint32_t *>(out + 8) = 0; return; } // bundle data // In streaming queue, bundle data and metadata will be different args of direct call, // so we separate it here for future extensibility. *reinterpret_cast<uint64_t *>(out) = reinterpret_cast<uint64_t>(bundle->data + kMessageBundleHeaderSize); *reinterpret_cast<uint32_t *>(out + 8) = bundle->data_size - kMessageBundleHeaderSize; // bundle metadata auto meta = reinterpret_cast<uint8_t *>(meta_addr); // bundle header written by writer std::memcpy(meta, bundle->data, kMessageBundleHeaderSize); // append qid std::memcpy(meta + kMessageBundleHeaderSize, bundle->from.Data(), kUniqueIDSize); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_stopReaderNative(JNIEnv *env, jobject thisObj, jlong ptr) { auto reader = reinterpret_cast<DataReader *>(ptr); reader->Stop(); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_closeReaderNative(JNIEnv *env, jobject thisObj, jlong ptr) { delete reinterpret_cast<DataReader *>(ptr); }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_DataReader.h
C/C++ Header
/* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class org_ray_streaming_runtime_transfer_DataReader */ #ifndef _Included_org_ray_streaming_runtime_transfer_DataReader #define _Included_org_ray_streaming_runtime_transfer_DataReader #ifdef __cplusplus extern "C" { #endif /* * Class: org_ray_streaming_runtime_transfer_DataReader * Method: createDataReaderNative * Signature: ([[B[[B[J[JJZ[BZ)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_createDataReaderNative (JNIEnv *, jclass, jobjectArray, jobjectArray, jlongArray, jlongArray, jlong, jboolean, jbyteArray, jboolean); /* * Class: org_ray_streaming_runtime_transfer_DataReader * Method: getBundleNative * Signature: (JJJJ)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_getBundleNative (JNIEnv *, jobject, jlong, jlong, jlong, jlong); /* * Class: org_ray_streaming_runtime_transfer_DataReader * Method: stopReaderNative * Signature: (J)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_stopReaderNative (JNIEnv *, jobject, jlong); /* * Class: org_ray_streaming_runtime_transfer_DataReader * Method: closeReaderNative * Signature: (J)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataReader_closeReaderNative (JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_DataWriter.cc
C++
#include "org_ray_streaming_runtime_transfer_DataWriter.h" #include "config/streaming_config.h" #include "data_writer.h" #include "streaming_jni_common.h" using namespace ray::streaming; JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_createWriterNative( JNIEnv *env, jclass, jobjectArray output_queue_ids, jobjectArray output_actor_ids, jlongArray msg_ids, jlong channel_size, jbyteArray conf_bytes_array, jboolean is_mock) { STREAMING_LOG(INFO) << "[JNI]: createDataWriterNative."; std::vector<ray::ObjectID> queue_id_vec = jarray_to_object_id_vec(env, output_queue_ids); for (auto id : queue_id_vec) { STREAMING_LOG(INFO) << "output channel id: " << id.Hex(); } STREAMING_LOG(INFO) << "total channel size: " << channel_size << "*" << queue_id_vec.size() << "=" << queue_id_vec.size() * channel_size; LongVectorFromJLongArray long_array_obj(env, msg_ids); std::vector<uint64_t> msg_ids_vec = LongVectorFromJLongArray(env, msg_ids).data; std::vector<uint64_t> queue_size_vec(long_array_obj.data.size(), channel_size); std::vector<ray::ObjectID> remain_id_vec; std::vector<ray::ActorID> actor_ids = jarray_to_actor_id_vec(env, output_actor_ids); STREAMING_LOG(INFO) << "actor_ids: " << actor_ids[0]; RawDataFromJByteArray conf(env, conf_bytes_array); STREAMING_CHECK(conf.data != nullptr); auto runtime_context = std::make_shared<RuntimeContext>(); if (conf.data_size > 0) { runtime_context->SetConfig(conf.data, conf.data_size); } if (is_mock) { runtime_context->MarkMockTest(); } auto *data_writer = new DataWriter(runtime_context); auto status = data_writer->Init(queue_id_vec, actor_ids, msg_ids_vec, queue_size_vec); if (status != StreamingStatus::OK) { STREAMING_LOG(WARNING) << "DataWriter init failed."; } else { STREAMING_LOG(INFO) << "DataWriter init success"; } data_writer->Run(); return reinterpret_cast<jlong>(data_writer); } JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_writeMessageNative( JNIEnv *env, jobject, jlong writer_ptr, jlong qid_ptr, jlong address, jint size) { auto *data_writer = reinterpret_cast<DataWriter *>(writer_ptr); auto qid = *reinterpret_cast<ray::ObjectID *>(qid_ptr); auto data = reinterpret_cast<uint8_t *>(address); auto data_size = static_cast<uint32_t>(size); jlong result = data_writer->WriteMessageToBufferRing(qid, data, data_size, StreamingMessageType::Message); if (result == 0) { STREAMING_LOG(INFO) << "writer interrupted, return 0."; throwChannelInterruptException(env, "writer interrupted."); } return result; } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_stopWriterNative(JNIEnv *env, jobject thisObj, jlong ptr) { STREAMING_LOG(INFO) << "jni: stop writer."; auto *data_writer = reinterpret_cast<DataWriter *>(ptr); data_writer->Stop(); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_closeWriterNative(JNIEnv *env, jobject thisObj, jlong ptr) { auto *data_writer = reinterpret_cast<DataWriter *>(ptr); delete data_writer; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_DataWriter.h
C/C++ Header
/* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class org_ray_streaming_runtime_transfer_DataWriter */ #ifndef _Included_org_ray_streaming_runtime_transfer_DataWriter #define _Included_org_ray_streaming_runtime_transfer_DataWriter #ifdef __cplusplus extern "C" { #endif /* * Class: org_ray_streaming_runtime_transfer_DataWriter * Method: createWriterNative * Signature: ([[B[[B[JJ[BZ)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_createWriterNative (JNIEnv *, jclass, jobjectArray, jobjectArray, jlongArray, jlong, jbyteArray, jboolean); /* * Class: org_ray_streaming_runtime_transfer_DataWriter * Method: writeMessageNative * Signature: (JJJI)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_writeMessageNative (JNIEnv *, jobject, jlong, jlong, jlong, jint); /* * Class: org_ray_streaming_runtime_transfer_DataWriter * Method: stopWriterNative * Signature: (J)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_stopWriterNative (JNIEnv *, jobject, jlong); /* * Class: org_ray_streaming_runtime_transfer_DataWriter * Method: closeWriterNative * Signature: (J)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_DataWriter_closeWriterNative (JNIEnv *, jobject, jlong); #ifdef __cplusplus } #endif #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_TransferHandler.cc
C++
#include "org_ray_streaming_runtime_transfer_TransferHandler.h" #include "queue/queue_client.h" #include "streaming_jni_common.h" using namespace ray::streaming; static std::shared_ptr<ray::LocalMemoryBuffer> JByteArrayToBuffer(JNIEnv *env, jbyteArray bytes) { RawDataFromJByteArray buf(env, bytes); STREAMING_CHECK(buf.data != nullptr); return std::make_shared<ray::LocalMemoryBuffer>(buf.data, buf.data_size, true); } JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_createWriterClientNative( JNIEnv *env, jobject this_obj, jlong core_worker_ptr, jobject async_func, jobject sync_func) { auto ray_async_func = FunctionDescriptorToRayFunction(env, async_func); auto ray_sync_func = FunctionDescriptorToRayFunction(env, sync_func); auto *writer_client = new WriterClient(reinterpret_cast<ray::CoreWorker *>(core_worker_ptr), ray_async_func, ray_sync_func); return reinterpret_cast<jlong>(writer_client); } JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_createReaderClientNative( JNIEnv *env, jobject this_obj, jlong core_worker_ptr, jobject async_func, jobject sync_func) { ray::RayFunction ray_async_func = FunctionDescriptorToRayFunction(env, async_func); ray::RayFunction ray_sync_func = FunctionDescriptorToRayFunction(env, sync_func); auto *reader_client = new ReaderClient(reinterpret_cast<ray::CoreWorker *>(core_worker_ptr), ray_async_func, ray_sync_func); return reinterpret_cast<jlong>(reader_client); } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleWriterMessageNative( JNIEnv *env, jobject this_obj, jlong ptr, jbyteArray bytes) { auto *writer_client = reinterpret_cast<WriterClient *>(ptr); writer_client->OnWriterMessage(JByteArrayToBuffer(env, bytes)); } JNIEXPORT jbyteArray JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleWriterMessageSyncNative( JNIEnv *env, jobject this_obj, jlong ptr, jbyteArray bytes) { auto *writer_client = reinterpret_cast<WriterClient *>(ptr); std::shared_ptr<ray::LocalMemoryBuffer> result_buffer = writer_client->OnWriterMessageSync(JByteArrayToBuffer(env, bytes)); jbyteArray arr = env->NewByteArray(result_buffer->Size()); env->SetByteArrayRegion(arr, 0, result_buffer->Size(), reinterpret_cast<jbyte *>(result_buffer->Data())); return arr; } JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleReaderMessageNative( JNIEnv *env, jobject this_obj, jlong ptr, jbyteArray bytes) { auto *reader_client = reinterpret_cast<ReaderClient *>(ptr); reader_client->OnReaderMessage(JByteArrayToBuffer(env, bytes)); } JNIEXPORT jbyteArray JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleReaderMessageSyncNative( JNIEnv *env, jobject this_obj, jlong ptr, jbyteArray bytes) { auto *reader_client = reinterpret_cast<ReaderClient *>(ptr); auto result_buffer = reader_client->OnReaderMessageSync(JByteArrayToBuffer(env, bytes)); jbyteArray arr = env->NewByteArray(result_buffer->Size()); env->SetByteArrayRegion(arr, 0, result_buffer->Size(), reinterpret_cast<jbyte *>(result_buffer->Data())); return arr; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/org_ray_streaming_runtime_transfer_TransferHandler.h
C/C++ Header
/* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class org_ray_streaming_runtime_transfer_TransferHandler */ #ifndef _Included_org_ray_streaming_runtime_transfer_TransferHandler #define _Included_org_ray_streaming_runtime_transfer_TransferHandler #ifdef __cplusplus extern "C" { #endif /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: createWriterClientNative * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_createWriterClientNative (JNIEnv *, jobject, jlong, jobject, jobject); /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: createReaderClientNative * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_createReaderClientNative (JNIEnv *, jobject, jlong, jobject, jobject); /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: handleWriterMessageNative * Signature: (J[B)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleWriterMessageNative (JNIEnv *, jobject, jlong, jbyteArray); /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: handleWriterMessageSyncNative * Signature: (J[B)[B */ JNIEXPORT jbyteArray JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleWriterMessageSyncNative (JNIEnv *, jobject, jlong, jbyteArray); /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: handleReaderMessageNative * Signature: (J[B)V */ JNIEXPORT void JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleReaderMessageNative (JNIEnv *, jobject, jlong, jbyteArray); /* * Class: org_ray_streaming_runtime_transfer_TransferHandler * Method: handleReaderMessageSyncNative * Signature: (J[B)[B */ JNIEXPORT jbyteArray JNICALL Java_org_ray_streaming_runtime_transfer_TransferHandler_handleReaderMessageSyncNative (JNIEnv *, jobject, jlong, jbyteArray); #ifdef __cplusplus } #endif #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/streaming_jni_common.cc
C++
#include "streaming_jni_common.h" std::vector<ray::ObjectID> jarray_to_object_id_vec(JNIEnv *env, jobjectArray jarr) { int stringCount = env->GetArrayLength(jarr); std::vector<ray::ObjectID> object_id_vec; for (int i = 0; i < stringCount; i++) { auto jstr = (jbyteArray) (env->GetObjectArrayElement(jarr, i)); UniqueIdFromJByteArray idFromJByteArray(env, jstr); object_id_vec.push_back(idFromJByteArray.PID); } return object_id_vec; } std::vector<ray::ActorID> jarray_to_actor_id_vec(JNIEnv *env, jobjectArray jarr) { int count = env->GetArrayLength(jarr); std::vector<ray::ActorID> actor_id_vec; for (int i = 0; i < count; i++) { auto bytes = (jbyteArray)(env->GetObjectArrayElement(jarr, i)); std::string id_str(ray::ActorID::Size(), 0); env->GetByteArrayRegion(bytes, 0, ray::ActorID::Size(), reinterpret_cast<jbyte *>(&id_str.front())); actor_id_vec.push_back(ActorID::FromBinary(id_str)); } return actor_id_vec; } jint throwRuntimeException(JNIEnv *env, const char *message) { jclass exClass; char className[] = "java/lang/RuntimeException"; exClass = env->FindClass(className); return env->ThrowNew(exClass, message); } jint throwChannelInitException(JNIEnv *env, const char *message, const std::vector<ray::ObjectID> &abnormal_queues) { jclass array_list_class = env->FindClass("java/util/ArrayList"); jmethodID array_list_constructor = env->GetMethodID(array_list_class, "<init>", "()V"); jmethodID array_list_add = env->GetMethodID(array_list_class, "add", "(Ljava/lang/Object;)Z"); jobject array_list = env->NewObject(array_list_class, array_list_constructor); for (auto &q_id : abnormal_queues) { jbyteArray jbyte_array = env->NewByteArray(kUniqueIDSize); env->SetByteArrayRegion(jbyte_array, 0, kUniqueIDSize, const_cast<jbyte*>(reinterpret_cast<const jbyte *>(q_id.Data()))); env->CallBooleanMethod(array_list, array_list_add, jbyte_array); } jclass ex_class = env->FindClass("org/ray/streaming/runtime/transfer/ChannelInitException"); jmethodID ex_constructor = env->GetMethodID(ex_class, "<init>", "(Ljava/lang/String;Ljava/util/List;)V"); jstring message_jstr = env->NewStringUTF(message); jobject ex_obj = env->NewObject(ex_class, ex_constructor, message_jstr, array_list); env->DeleteLocalRef(message_jstr); return env->Throw((jthrowable)ex_obj); } jint throwChannelInterruptException(JNIEnv *env, const char *message) { jclass ex_class = env->FindClass("org/ray/streaming/runtime/transfer/ChannelInterruptException"); return env->ThrowNew(ex_class, message); } jclass LoadClass(JNIEnv *env, const char *class_name) { jclass tempLocalClassRef = env->FindClass(class_name); jclass ret = (jclass)env->NewGlobalRef(tempLocalClassRef); STREAMING_CHECK(ret) << "Can't load Java class " << class_name; env->DeleteLocalRef(tempLocalClassRef); return ret; } template <typename NativeT> void JavaListToNativeVector( JNIEnv *env, jobject java_list, std::vector<NativeT> *native_vector, std::function<NativeT(JNIEnv *, jobject)> element_converter) { jclass java_list_class = LoadClass(env, "java/util/List"); jmethodID java_list_size = env->GetMethodID(java_list_class, "size", "()I"); jmethodID java_list_get = env->GetMethodID(java_list_class, "get", "(I)Ljava/lang/Object;"); int size = env->CallIntMethod(java_list, java_list_size); native_vector->clear(); for (int i = 0; i < size; i++) { native_vector->emplace_back( element_converter(env, env->CallObjectMethod(java_list, java_list_get, (jint)i))); } } /// Convert a Java String to C++ std::string. std::string JavaStringToNativeString(JNIEnv *env, jstring jstr) { const char *c_str = env->GetStringUTFChars(jstr, nullptr); std::string result(c_str); env->ReleaseStringUTFChars(static_cast<jstring>(jstr), c_str); return result; } /// Convert a Java List<String> to C++ std::vector<std::string>. void JavaStringListToNativeStringVector(JNIEnv *env, jobject java_list, std::vector<std::string> *native_vector) { JavaListToNativeVector<std::string>( env, java_list, native_vector, [](JNIEnv *env, jobject jstr) { return JavaStringToNativeString(env, static_cast<jstring>(jstr)); }); } ray::RayFunction FunctionDescriptorToRayFunction(JNIEnv *env, jobject functionDescriptor) { jclass java_language_class = LoadClass(env, "org/ray/runtime/generated/Common$Language"); jclass java_function_descriptor_class = LoadClass(env, "org/ray/runtime/functionmanager/FunctionDescriptor"); jmethodID java_language_get_number = env->GetMethodID(java_language_class, "getNumber", "()I"); jmethodID java_function_descriptor_get_language = env->GetMethodID(java_function_descriptor_class, "getLanguage", "()Lorg/ray/runtime/generated/Common$Language;"); jobject java_language = env->CallObjectMethod(functionDescriptor, java_function_descriptor_get_language); int language = env->CallIntMethod(java_language, java_language_get_number); std::vector<std::string> function_descriptor; jmethodID java_function_descriptor_to_list = env->GetMethodID(java_function_descriptor_class, "toList", "()Ljava/util/List;"); JavaStringListToNativeStringVector( env, env->CallObjectMethod(functionDescriptor, java_function_descriptor_to_list), &function_descriptor); ray::RayFunction ray_function{static_cast<::Language>(language), function_descriptor}; return ray_function; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/lib/java/streaming_jni_common.h
C/C++ Header
#ifndef RAY_STREAMING_JNI_COMMON_H #define RAY_STREAMING_JNI_COMMON_H #include <jni.h> #include <string> #include "ray/core_worker/common.h" #include "util/streaming_logging.h" class UniqueIdFromJByteArray { private: JNIEnv *_env; jbyteArray _bytes; jbyte *b; public: ray::ObjectID PID; UniqueIdFromJByteArray(JNIEnv *env, jbyteArray wid) { _env = env; _bytes = wid; b = reinterpret_cast<jbyte *>(_env->GetByteArrayElements(_bytes, nullptr)); PID = ray::ObjectID::FromBinary( std::string(reinterpret_cast<const char*>(b), ray::ObjectID::Size())); } ~UniqueIdFromJByteArray() { _env->ReleaseByteArrayElements(_bytes, b, 0); } }; class RawDataFromJByteArray { private: JNIEnv *_env; jbyteArray _bytes; public: uint8_t *data; uint32_t data_size; RawDataFromJByteArray(JNIEnv *env, jbyteArray bytes) { _env = env; _bytes = bytes; data_size = _env->GetArrayLength(_bytes); jbyte *b = reinterpret_cast<jbyte *>(_env->GetByteArrayElements(_bytes, nullptr)); data = reinterpret_cast<uint8_t *>(b); } ~RawDataFromJByteArray() { _env->ReleaseByteArrayElements(_bytes, reinterpret_cast<jbyte *>(data), 0); } }; class StringFromJString { private: JNIEnv *_env; const char *j_str; jstring jni_str; public: std::string str; StringFromJString(JNIEnv *env, jstring jni_str_) { jni_str = jni_str_; _env = env; j_str = env->GetStringUTFChars(jni_str, nullptr); str = std::string(j_str); } ~StringFromJString() { _env->ReleaseStringUTFChars(jni_str, j_str); } }; class LongVectorFromJLongArray { private: JNIEnv *_env; jlongArray long_array; jlong *long_array_ptr = nullptr; public: std::vector<uint64_t> data; LongVectorFromJLongArray(JNIEnv *env, jlongArray long_array_) { _env = env; long_array = long_array_; long_array_ptr = env->GetLongArrayElements(long_array, nullptr); jsize seq_id_size = env->GetArrayLength(long_array); data = std::vector<uint64_t>(long_array_ptr, long_array_ptr + seq_id_size); } ~LongVectorFromJLongArray() { _env->ReleaseLongArrayElements(long_array, long_array_ptr, 0); } }; std::vector<ray::ObjectID> jarray_to_object_id_vec(JNIEnv *env, jobjectArray jarr); std::vector<ray::ActorID> jarray_to_actor_id_vec(JNIEnv *env, jobjectArray jarr); jint throwRuntimeException(JNIEnv *env, const char *message); jint throwChannelInitException(JNIEnv *env, const char *message, const std::vector<ray::ObjectID> &abnormal_queues); jint throwChannelInterruptException(JNIEnv *env, const char *message); ray::RayFunction FunctionDescriptorToRayFunction(JNIEnv *env, jobject functionDescriptor); #endif //RAY_STREAMING_JNI_COMMON_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/message/message.cc
C++
#include <utility> #include <cstring> #include <string> #include "message.h" #include "ray/common/status.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { StreamingMessage::StreamingMessage(std::shared_ptr<uint8_t> &data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type) : message_data_(data), data_size_(data_size), message_type_(message_type), message_id_(seq_id) {} StreamingMessage::StreamingMessage(std::shared_ptr<uint8_t> &&data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type) : message_data_(data), data_size_(data_size), message_type_(message_type), message_id_(seq_id) {} StreamingMessage::StreamingMessage(const uint8_t *data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type) : data_size_(data_size), message_type_(message_type), message_id_(seq_id) { message_data_.reset(new uint8_t[data_size], std::default_delete<uint8_t[]>()); std::memcpy(message_data_.get(), data, data_size_); } StreamingMessage::StreamingMessage(const StreamingMessage &msg) { data_size_ = msg.data_size_; message_data_ = msg.message_data_; message_id_ = msg.message_id_; message_type_ = msg.message_type_; } StreamingMessagePtr StreamingMessage::FromBytes(const uint8_t *bytes, bool verifer_check) { uint32_t byte_offset = 0; uint32_t data_size = *reinterpret_cast<const uint32_t *>(bytes + byte_offset); byte_offset += sizeof(data_size); uint64_t seq_id = *reinterpret_cast<const uint64_t *>(bytes + byte_offset); byte_offset += sizeof(seq_id); StreamingMessageType msg_type = *reinterpret_cast<const StreamingMessageType *>(bytes + byte_offset); byte_offset += sizeof(msg_type); auto buf = new uint8_t[data_size]; std::memcpy(buf, bytes + byte_offset, data_size); auto data_ptr = std::shared_ptr<uint8_t>(buf, std::default_delete<uint8_t[]>()); return std::make_shared<StreamingMessage>(data_ptr, data_size, seq_id, msg_type); } void StreamingMessage::ToBytes(uint8_t *serlizable_data) { uint32_t byte_offset = 0; std::memcpy(serlizable_data + byte_offset, reinterpret_cast<char *>(&data_size_), sizeof(data_size_)); byte_offset += sizeof(data_size_); std::memcpy(serlizable_data + byte_offset, reinterpret_cast<char *>(&message_id_), sizeof(message_id_)); byte_offset += sizeof(message_id_); std::memcpy(serlizable_data + byte_offset, reinterpret_cast<char *>(&message_type_), sizeof(message_type_)); byte_offset += sizeof(message_type_); std::memcpy(serlizable_data + byte_offset, reinterpret_cast<char *>(message_data_.get()), data_size_); byte_offset += data_size_; STREAMING_CHECK(byte_offset == this->ClassBytesSize()); } bool StreamingMessage::operator==(const StreamingMessage &message) const { return GetDataSize() == message.GetDataSize() && GetMessageSeqId() == message.GetMessageSeqId() && GetMessageType() == message.GetMessageType() && !std::memcmp(RawData(), message.RawData(), data_size_); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/message/message.h
C/C++ Header
#ifndef RAY_MESSAGE_H #define RAY_MESSAGE_H #include <memory> namespace ray { namespace streaming { class StreamingMessage; typedef std::shared_ptr<StreamingMessage> StreamingMessagePtr; enum class StreamingMessageType : uint32_t { Barrier = 1, Message = 2, MIN = Barrier, MAX = Message }; constexpr uint32_t kMessageHeaderSize = sizeof(uint32_t) + sizeof(uint64_t) + sizeof(StreamingMessageType); /// All messages should be wrapped by this protocol. // DataSize means length of raw data, message id is increasing from [1, +INF]. // MessageType will be used for barrier transporting and checkpoint. /// +----------------+ /// | DataSize=U32 | /// +----------------+ /// | MessageId=U64 | /// +----------------+ /// | MessageType=U32| /// +----------------+ /// | Data=var | /// +----------------+ class StreamingMessage { private: std::shared_ptr<uint8_t> message_data_; uint32_t data_size_; StreamingMessageType message_type_; uint64_t message_id_; public: /// Copy raw data from outside shared buffer. /// \param data raw data from user buffer /// \param data_size raw data size /// \param seq_id message id /// \param message_type StreamingMessage(std::shared_ptr<uint8_t> &data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type); /// Move outsite raw data to message data. /// \param data raw data from user buffer /// \param data_size raw data size /// \param seq_id message id /// \param message_type StreamingMessage(std::shared_ptr<uint8_t> &&data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type); /// Copy raw data from outside buffer. /// \param data raw data from user buffer /// \param data_size raw data size /// \param seq_id message id /// \param message_type StreamingMessage(const uint8_t *data, uint32_t data_size, uint64_t seq_id, StreamingMessageType message_type); StreamingMessage(const StreamingMessage &); StreamingMessage operator=(const StreamingMessage &) = delete; virtual ~StreamingMessage() = default; inline uint8_t *RawData() const { return message_data_.get(); } inline uint32_t GetDataSize() const { return data_size_; } inline StreamingMessageType GetMessageType() const { return message_type_; } inline uint64_t GetMessageSeqId() const { return message_id_; } inline bool IsMessage() { return StreamingMessageType::Message == message_type_; } inline bool IsBarrier() { return StreamingMessageType::Barrier == message_type_; } bool operator==(const StreamingMessage &) const; virtual void ToBytes(uint8_t *data); static StreamingMessagePtr FromBytes(const uint8_t *data, bool verifer_check = true); inline virtual uint32_t ClassBytesSize() { return kMessageHeaderSize + data_size_; } }; } // namespace streaming } // namespace ray #endif // RAY_MESSAGE_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/message/message_bundle.cc
C++
#include <cstring> #include <string> #include "ray/common/status.h" #include "config/streaming_config.h" #include "message_bundle.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { StreamingMessageBundle::StreamingMessageBundle(uint64_t last_offset_seq_id, uint64_t message_bundle_ts) : StreamingMessageBundleMeta(message_bundle_ts, last_offset_seq_id, 0, StreamingMessageBundleType::Empty) { this->raw_bundle_size_ = 0; } StreamingMessageBundleMeta::StreamingMessageBundleMeta(const uint8_t *bytes) { std::memcpy(GetFirstMemberAddress(), bytes, kMessageBundleMetaHeaderSize - sizeof(uint32_t)); } StreamingMessageBundleMeta::StreamingMessageBundleMeta( const uint64_t message_bundle_ts, const uint64_t last_offset_seq_id, const uint32_t message_list_size, const StreamingMessageBundleType bundle_type) : message_bundle_ts_(message_bundle_ts), last_message_id_(last_offset_seq_id), message_list_size_(message_list_size), bundle_type_(bundle_type) { STREAMING_CHECK(message_list_size <= StreamingConfig::MESSAGE_BUNDLE_MAX_SIZE); } void StreamingMessageBundleMeta::ToBytes(uint8_t *bytes) { uint32_t magicNum = StreamingMessageBundleMeta::StreamingMessageBundleMagicNum; std::memcpy(bytes, reinterpret_cast<const uint8_t *>(&magicNum), sizeof(uint32_t)); std::memcpy(bytes + sizeof(uint32_t), GetFirstMemberAddress(), kMessageBundleMetaHeaderSize - sizeof(uint32_t)); } StreamingMessageBundleMetaPtr StreamingMessageBundleMeta::FromBytes(const uint8_t *bytes, bool check) { STREAMING_CHECK(bytes); uint32_t byte_offset = 0; STREAMING_CHECK(CheckBundleMagicNum(bytes)); byte_offset += sizeof(uint32_t); auto result = std::make_shared<StreamingMessageBundleMeta>(bytes + byte_offset); STREAMING_CHECK(result->GetMessageListSize() <= StreamingConfig::MESSAGE_BUNDLE_MAX_SIZE); return result; } bool StreamingMessageBundleMeta::operator==(StreamingMessageBundleMeta &meta) const { return this->message_list_size_ == meta.GetMessageListSize() && this->message_bundle_ts_ == meta.GetMessageBundleTs() && this->bundle_type_ == meta.GetBundleType() && this->last_message_id_ == meta.GetLastMessageId(); } bool StreamingMessageBundleMeta::operator==(StreamingMessageBundleMeta *meta) const { return operator==(*meta); } StreamingMessageBundleMeta::StreamingMessageBundleMeta() : bundle_type_(StreamingMessageBundleType::Empty) {} StreamingMessageBundle::StreamingMessageBundle( std::list<StreamingMessagePtr> &&message_list, uint64_t message_ts, uint64_t last_offset_seq_id, StreamingMessageBundleType bundle_type, uint32_t raw_data_size) : StreamingMessageBundleMeta(message_ts, last_offset_seq_id, message_list.size(), bundle_type), raw_bundle_size_(raw_data_size), message_list_(message_list) { if (bundle_type_ != StreamingMessageBundleType::Empty) { if (!raw_bundle_size_) { raw_bundle_size_ = std::accumulate( message_list_.begin(), message_list_.end(), 0, [](uint32_t x, StreamingMessagePtr &y) { return x + y->ClassBytesSize(); }); } } } StreamingMessageBundle::StreamingMessageBundle( std::list<StreamingMessagePtr> &message_list, uint64_t message_ts, uint64_t last_offset_seq_id, StreamingMessageBundleType bundle_type, uint32_t raw_data_size) : StreamingMessageBundle(std::list<StreamingMessagePtr>(message_list), message_ts, last_offset_seq_id, bundle_type, raw_data_size) {} StreamingMessageBundle::StreamingMessageBundle(StreamingMessageBundle &bundle) { message_bundle_ts_ = bundle.message_bundle_ts_; message_list_size_ = bundle.message_list_size_; raw_bundle_size_ = bundle.raw_bundle_size_; bundle_type_ = bundle.bundle_type_; last_message_id_ = bundle.last_message_id_; message_list_ = bundle.message_list_; } void StreamingMessageBundle::ToBytes(uint8_t *bytes) { uint32_t byte_offset = 0; StreamingMessageBundleMeta::ToBytes(bytes + byte_offset); byte_offset += StreamingMessageBundleMeta::ClassBytesSize(); std::memcpy(bytes + byte_offset, reinterpret_cast<char *>(&raw_bundle_size_), sizeof(uint32_t)); byte_offset += sizeof(uint32_t); if (raw_bundle_size_ > 0) { ConvertMessageListToRawData(message_list_, raw_bundle_size_, bytes + byte_offset); } } StreamingMessageBundlePtr StreamingMessageBundle::FromBytes(const uint8_t *bytes, bool verifer_check) { uint32_t byte_offset = 0; StreamingMessageBundleMetaPtr meta_ptr = StreamingMessageBundleMeta::FromBytes(bytes + byte_offset); byte_offset += meta_ptr->ClassBytesSize(); uint32_t raw_data_size = *reinterpret_cast<const uint32_t *>(bytes + byte_offset); byte_offset += sizeof(uint32_t); std::list<StreamingMessagePtr> message_list; // only message bundle own raw data if (meta_ptr->GetBundleType() != StreamingMessageBundleType::Empty) { GetMessageListFromRawData(bytes + byte_offset, raw_data_size, meta_ptr->GetMessageListSize(), message_list); byte_offset += raw_data_size; } auto result = std::make_shared<StreamingMessageBundle>( message_list, meta_ptr->GetMessageBundleTs(), meta_ptr->GetLastMessageId(), meta_ptr->GetBundleType()); STREAMING_CHECK(byte_offset == result->ClassBytesSize()); return result; } void StreamingMessageBundle::GetMessageListFromRawData( const uint8_t *bytes, uint32_t byte_size, uint32_t message_list_size, std::list<StreamingMessagePtr> &message_list) { uint32_t byte_offset = 0; // only message bundle own raw data for (size_t i = 0; i < message_list_size; ++i) { StreamingMessagePtr item = StreamingMessage::FromBytes(bytes + byte_offset); message_list.push_back(item); byte_offset += item->ClassBytesSize(); } STREAMING_CHECK(byte_offset == byte_size); } void StreamingMessageBundle::GetMessageList( std::list<StreamingMessagePtr> &message_list) { message_list = message_list_; } void StreamingMessageBundle::ConvertMessageListToRawData( const std::list<StreamingMessagePtr> &message_list, uint32_t raw_data_size, uint8_t *raw_data) { uint32_t byte_offset = 0; for (auto &message : message_list) { message->ToBytes(raw_data + byte_offset); byte_offset += message->ClassBytesSize(); } STREAMING_CHECK(byte_offset == raw_data_size); } bool StreamingMessageBundle::operator==(StreamingMessageBundle &bundle) const { if (!(StreamingMessageBundleMeta::operator==(&bundle) && this->GetRawBundleSize() == bundle.GetRawBundleSize() && this->GetMessageListSize() == bundle.GetMessageListSize())) { return false; } auto it1 = message_list_.begin(); auto it2 = bundle.message_list_.begin(); while (it1 != message_list_.end() && it2 != bundle.message_list_.end()) { if (!((*it1).get()->operator==(*(*it2).get()))) { return false; } it1++; it2++; } return true; } bool StreamingMessageBundle::operator==(StreamingMessageBundle *bundle) const { return this->operator==(*bundle); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/message/message_bundle.h
C/C++ Header
#ifndef RAY_MESSAGE_BUNDLE_H #define RAY_MESSAGE_BUNDLE_H #include <ctime> #include <list> #include <numeric> #include "message.h" namespace ray { namespace streaming { enum class StreamingMessageBundleType : uint32_t { Empty = 1, Barrier = 2, Bundle = 3, MIN = Empty, MAX = Bundle }; class StreamingMessageBundleMeta; class StreamingMessageBundle; typedef std::shared_ptr<StreamingMessageBundle> StreamingMessageBundlePtr; typedef std::shared_ptr<StreamingMessageBundleMeta> StreamingMessageBundleMetaPtr; constexpr uint32_t kMessageBundleMetaHeaderSize = sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(StreamingMessageBundleType); constexpr uint32_t kMessageBundleHeaderSize = kMessageBundleMetaHeaderSize + sizeof(uint32_t); class StreamingMessageBundleMeta { public: static const uint32_t StreamingMessageBundleMagicNum = 0xCAFEBABA; protected: uint64_t message_bundle_ts_; uint64_t last_message_id_; uint32_t message_list_size_; StreamingMessageBundleType bundle_type_; private: /// To speed up memory copy and serilization, we use memory layout of compiler related /// member variables. It's must be modified if any field is going to be inserted before /// first member property. /// Reference /// :/http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1113r0.html#2254). inline uint8_t *GetFirstMemberAddress() { return reinterpret_cast<uint8_t *>(&message_bundle_ts_); } public: explicit StreamingMessageBundleMeta(const uint8_t *bytes); explicit StreamingMessageBundleMeta(const uint64_t message_bunddle_tes, const uint64_t last_offset_seq_id, const uint32_t message_list_size, const StreamingMessageBundleType bundle_type); explicit StreamingMessageBundleMeta(StreamingMessageBundleMeta *); explicit StreamingMessageBundleMeta(); virtual ~StreamingMessageBundleMeta() = default; bool operator==(StreamingMessageBundleMeta &) const; bool operator==(StreamingMessageBundleMeta *) const; inline uint64_t GetMessageBundleTs() const { return message_bundle_ts_; } inline uint64_t GetLastMessageId() const { return last_message_id_; } inline uint32_t GetMessageListSize() const { return message_list_size_; } inline StreamingMessageBundleType GetBundleType() const { return bundle_type_; } inline bool IsBarrier() { return StreamingMessageBundleType::Barrier == bundle_type_; } inline bool IsBundle() { return StreamingMessageBundleType::Bundle == bundle_type_; } virtual void ToBytes(uint8_t *data); static StreamingMessageBundleMetaPtr FromBytes(const uint8_t *data, bool verifer_check = true); inline virtual uint32_t ClassBytesSize() { return kMessageBundleMetaHeaderSize; } inline static bool CheckBundleMagicNum(const uint8_t *bytes) { const uint32_t *magic_num = reinterpret_cast<const uint32_t *>(bytes); return *magic_num == StreamingMessageBundleMagicNum; } std::string ToString() { return std::to_string(last_message_id_) + "," + std::to_string(message_list_size_) + "," + std::to_string(message_bundle_ts_) + "," + std::to_string(static_cast<uint32_t>(bundle_type_)); } }; /// StreamingMessageBundle inherits from metadata class (StreamingMessageBundleMeta) /// with the following protocol: MagicNum = 0xcafebaba Timestamp 64bits timestamp /// (milliseconds from 1970) LastMessageId( the last id of bundle) (0,INF] /// MessageListSize(bundle len of message) /// BundleType(a. bundle = 3 , b. barrier =2, c. empty = 1) /// RawBundleSize(binary length of data) /// RawData ( binary data) /// /// +--------------------+ /// | MagicNum=U32 | /// +--------------------+ /// | BundleTs=U64 | /// +--------------------+ /// | LastMessageId=U64 | /// +--------------------+ /// | MessageListSize=U32| /// +--------------------+ /// | BundleType=U32 | /// +--------------------+ /// | RawBundleSize=U32 | /// +--------------------+ /// | RawData=var(N*Msg) | /// +--------------------+ /// It should be noted that StreamingMessageBundle and StreamingMessageBundleMeta share /// almost same protocol but the last two fields (RawBundleSize and RawData). class StreamingMessageBundle : public StreamingMessageBundleMeta { private: uint32_t raw_bundle_size_; // Lazy serlization/deserlization. std::list<StreamingMessagePtr> message_list_; public: explicit StreamingMessageBundle(std::list<StreamingMessagePtr> &&message_list, uint64_t bundle_ts, uint64_t offset, StreamingMessageBundleType bundle_type, uint32_t raw_data_size = 0); // Duplicated copy if left reference in constructor. explicit StreamingMessageBundle(std::list<StreamingMessagePtr> &message_list, uint64_t bundle_ts, uint64_t offset, StreamingMessageBundleType bundle_type, uint32_t raw_data_size = 0); // New a empty bundle by passing last message id and timestamp. explicit StreamingMessageBundle(uint64_t, uint64_t); explicit StreamingMessageBundle(StreamingMessageBundle &bundle); virtual ~StreamingMessageBundle() = default; inline uint32_t GetRawBundleSize() const { return raw_bundle_size_; } bool operator==(StreamingMessageBundle &bundle) const; bool operator==(StreamingMessageBundle *bundle_ptr) const; void GetMessageList(std::list<StreamingMessagePtr> &message_list); const std::list<StreamingMessagePtr> &GetMessageList() const { return message_list_; } virtual void ToBytes(uint8_t *data); static StreamingMessageBundlePtr FromBytes(const uint8_t *data, bool verifer_check = true); inline virtual uint32_t ClassBytesSize() { return kMessageBundleHeaderSize + raw_bundle_size_; }; static void GetMessageListFromRawData(const uint8_t *bytes, uint32_t bytes_size, uint32_t message_list_size, std::list<StreamingMessagePtr> &message_list); static void ConvertMessageListToRawData( const std::list<StreamingMessagePtr> &message_list, uint32_t raw_data_size, uint8_t *raw_data); }; } // namespace streaming } // namespace ray #endif // RAY_MESSAGE_BUNDLE_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/message/priority_queue.h
C/C++ Header
#ifndef RAY_PRIORITY_QUEUE_H #define RAY_PRIORITY_QUEUE_H #include <algorithm> #include <memory> #include <vector> #include "util/streaming_logging.h" namespace ray { namespace streaming { template <class T, class C> class PriorityQueue { private: std::vector<T> merge_vec_; C comparator_; public: PriorityQueue(C &comparator) : comparator_(comparator){}; inline void push(T &&item) { merge_vec_.push_back(std::forward<T>(item)); std::push_heap(merge_vec_.begin(), merge_vec_.end(), comparator_); } inline void push(const T &item) { merge_vec_.push_back(item); std::push_heap(merge_vec_.begin(), merge_vec_.end(), comparator_); } inline void pop() { STREAMING_CHECK(!isEmpty()); std::pop_heap(merge_vec_.begin(), merge_vec_.end(), comparator_); merge_vec_.pop_back(); } inline void makeHeap() { std::make_heap(merge_vec_.begin(), merge_vec_.end(), comparator_); } inline T &top() { return merge_vec_.front(); } inline uint32_t size() { return merge_vec_.size(); } inline bool isEmpty() { return merge_vec_.empty(); } std::vector<T> &getRawVector() { return merge_vec_; } }; } // namespace streaming } // namespace ray #endif // RAY_PRIORITY_QUEUE_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/protobuf/streaming.proto
Protocol Buffers
syntax = "proto3"; package ray.streaming.proto; option java_package = "org.ray.streaming.runtime.generated"; enum OperatorType { UNKNOWN = 0; TRANSFORM = 1; SOURCE = 2; SINK = 3; } // all string in this message is ASCII string message StreamingConfig { string job_name = 1; string task_job_id = 2; string worker_name = 3; string op_name = 4; OperatorType role = 5; uint32 ring_buffer_capacity = 6; uint32 empty_message_interval = 7; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/protobuf/streaming_queue.proto
Protocol Buffers
syntax = "proto3"; package ray.streaming.queue.protobuf; enum StreamingQueueMessageType { StreamingQueueDataMsgType = 0; StreamingQueueCheckMsgType = 1; StreamingQueueCheckRspMsgType = 2; StreamingQueueNotificationMsgType = 3; StreamingQueueTestInitMsgType = 4; StreamingQueueTestCheckStatusRspMsgType = 5; } enum StreamingQueueError { OK = 0; QUEUE_NOT_EXIST = 1; NO_VALID_DATA_TO_PULL = 2; } message StreamingQueueDataMsg { bytes src_actor_id = 1; bytes dst_actor_id = 2; bytes queue_id = 3; uint64 seq_id = 4; uint64 length = 5; bool raw = 6; } message StreamingQueueCheckMsg { bytes src_actor_id = 1; bytes dst_actor_id = 2; bytes queue_id = 3; } message StreamingQueueCheckRspMsg { bytes src_actor_id = 1; bytes dst_actor_id = 2; bytes queue_id = 3; StreamingQueueError err_code = 4; } message StreamingQueueNotificationMsg { bytes src_actor_id = 1; bytes dst_actor_id = 2; bytes queue_id = 3; uint64 seq_id = 4; } // for test enum StreamingQueueTestRole { WRITER = 0; READER = 1; } message StreamingQueueTestInitMsg { StreamingQueueTestRole role = 1; bytes src_actor_id = 2; bytes dst_actor_id = 3; bytes actor_handle = 4; repeated bytes queue_ids = 5; repeated bytes rescale_queue_ids = 6; string test_suite_name = 7; string test_name = 8; uint64 param = 9; } message StreamingQueueTestCheckStatusRspMsg { string test_name = 1; bool status = 2; }
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/message.cc
C++
#include "message.h" namespace ray { namespace streaming { const uint32_t Message::MagicNum = 0xBABA0510; std::unique_ptr<LocalMemoryBuffer> Message::ToBytes() { uint8_t *bytes = nullptr; std::string pboutput; ToProtobuf(&pboutput); int64_t fbs_length = pboutput.length(); queue::protobuf::StreamingQueueMessageType type = Type(); size_t total_len = sizeof(Message::MagicNum) + sizeof(type) + sizeof(fbs_length) + fbs_length; if (buffer_ != nullptr) { total_len += buffer_->Size(); } bytes = new uint8_t[total_len]; STREAMING_CHECK(bytes != nullptr) << "allocate bytes fail."; uint8_t *p_cur = bytes; memcpy(p_cur, &Message::MagicNum, sizeof(Message::MagicNum)); p_cur += sizeof(Message::MagicNum); memcpy(p_cur, &type, sizeof(type)); p_cur += sizeof(type); memcpy(p_cur, &fbs_length, sizeof(fbs_length)); p_cur += sizeof(fbs_length); uint8_t *fbs_bytes = (uint8_t *)pboutput.data(); memcpy(p_cur, fbs_bytes, fbs_length); p_cur += fbs_length; if (buffer_ != nullptr) { memcpy(p_cur, buffer_->Data(), buffer_->Size()); } // COPY std::unique_ptr<LocalMemoryBuffer> buffer = std::unique_ptr<LocalMemoryBuffer>(new LocalMemoryBuffer(bytes, total_len, true)); delete bytes; return buffer; } void DataMessage::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueDataMsg msg; msg.set_src_actor_id(actor_id_.Binary()); msg.set_dst_actor_id(peer_actor_id_.Binary()); msg.set_queue_id(queue_id_.Binary()); msg.set_seq_id(seq_id_); msg.set_length(buffer_->Size()); msg.set_raw(raw_); msg.SerializeToString(output); } std::shared_ptr<DataMessage> DataMessage::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *fbs_length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *fbs_length); queue::protobuf::StreamingQueueDataMsg message; message.ParseFromString(inputpb); ActorID src_actor_id = ActorID::FromBinary(message.src_actor_id()); ActorID dst_actor_id = ActorID::FromBinary(message.dst_actor_id()); ObjectID queue_id = ObjectID::FromBinary(message.queue_id()); uint64_t seq_id = message.seq_id(); uint64_t length = message.length(); bool raw = message.raw(); bytes += *fbs_length; /// Copy data and create a new buffer for streaming queue. std::shared_ptr<LocalMemoryBuffer> buffer = std::make_shared<LocalMemoryBuffer>(bytes, (size_t)length, true); std::shared_ptr<DataMessage> data_msg = std::make_shared<DataMessage>( src_actor_id, dst_actor_id, queue_id, seq_id, buffer, raw); return data_msg; } void NotificationMessage::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueNotificationMsg msg; msg.set_src_actor_id(actor_id_.Binary()); msg.set_dst_actor_id(peer_actor_id_.Binary()); msg.set_queue_id(queue_id_.Binary()); msg.set_seq_id(seq_id_); msg.SerializeToString(output); } std::shared_ptr<NotificationMessage> NotificationMessage::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *length); queue::protobuf::StreamingQueueNotificationMsg message; message.ParseFromString(inputpb); STREAMING_LOG(INFO) << "message.src_actor_id: " << message.src_actor_id(); ActorID src_actor_id = ActorID::FromBinary(message.src_actor_id()); ActorID dst_actor_id = ActorID::FromBinary(message.dst_actor_id()); ObjectID queue_id = ObjectID::FromBinary(message.queue_id()); uint64_t seq_id = message.seq_id(); std::shared_ptr<NotificationMessage> notify_msg = std::make_shared<NotificationMessage>(src_actor_id, dst_actor_id, queue_id, seq_id); return notify_msg; } void CheckMessage::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueCheckMsg msg; msg.set_src_actor_id(actor_id_.Binary()); msg.set_dst_actor_id(peer_actor_id_.Binary()); msg.set_queue_id(queue_id_.Binary()); msg.SerializeToString(output); } std::shared_ptr<CheckMessage> CheckMessage::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *length); queue::protobuf::StreamingQueueCheckMsg message; message.ParseFromString(inputpb); ActorID src_actor_id = ActorID::FromBinary(message.src_actor_id()); ActorID dst_actor_id = ActorID::FromBinary(message.dst_actor_id()); ObjectID queue_id = ObjectID::FromBinary(message.queue_id()); std::shared_ptr<CheckMessage> check_msg = std::make_shared<CheckMessage>(src_actor_id, dst_actor_id, queue_id); return check_msg; } void CheckRspMessage::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueCheckRspMsg msg; msg.set_src_actor_id(actor_id_.Binary()); msg.set_dst_actor_id(peer_actor_id_.Binary()); msg.set_queue_id(queue_id_.Binary()); msg.set_err_code(err_code_); msg.SerializeToString(output); } std::shared_ptr<CheckRspMessage> CheckRspMessage::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *length); queue::protobuf::StreamingQueueCheckRspMsg message; message.ParseFromString(inputpb); ActorID src_actor_id = ActorID::FromBinary(message.src_actor_id()); ActorID dst_actor_id = ActorID::FromBinary(message.dst_actor_id()); ObjectID queue_id = ObjectID::FromBinary(message.queue_id()); queue::protobuf::StreamingQueueError err_code = message.err_code(); std::shared_ptr<CheckRspMessage> check_rsp_msg = std::make_shared<CheckRspMessage>(src_actor_id, dst_actor_id, queue_id, err_code); return check_rsp_msg; } void TestInitMessage::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueTestInitMsg msg; msg.set_role(role_); msg.set_src_actor_id(actor_id_.Binary()); msg.set_dst_actor_id(peer_actor_id_.Binary()); msg.set_actor_handle(actor_handle_serialized_); for (auto &queue_id : queue_ids_) { msg.add_queue_ids(queue_id.Binary()); } for (auto &queue_id : rescale_queue_ids_) { msg.add_rescale_queue_ids(queue_id.Binary()); } msg.set_test_suite_name(test_suite_name_); msg.set_test_name(test_name_); msg.set_param(param_); msg.SerializeToString(output); } std::shared_ptr<TestInitMessage> TestInitMessage::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *length); queue::protobuf::StreamingQueueTestInitMsg message; message.ParseFromString(inputpb); queue::protobuf::StreamingQueueTestRole role = message.role(); ActorID src_actor_id = ActorID::FromBinary(message.src_actor_id()); ActorID dst_actor_id = ActorID::FromBinary(message.dst_actor_id()); std::string actor_handle_serialized = message.actor_handle(); std::vector<ObjectID> queue_ids; for (int i = 0; i < message.queue_ids_size(); i++) { queue_ids.push_back(ObjectID::FromBinary(message.queue_ids(i))); } std::vector<ObjectID> rescale_queue_ids; for (int i = 0; i < message.rescale_queue_ids_size(); i++) { rescale_queue_ids.push_back(ObjectID::FromBinary(message.rescale_queue_ids(i))); } std::string test_suite_name = message.test_suite_name(); std::string test_name = message.test_name(); uint64_t param = message.param(); std::shared_ptr<TestInitMessage> test_init_msg = std::make_shared<TestInitMessage>( role, src_actor_id, dst_actor_id, actor_handle_serialized, queue_ids, rescale_queue_ids, test_suite_name, test_name, param); return test_init_msg; } void TestCheckStatusRspMsg::ToProtobuf(std::string *output) { queue::protobuf::StreamingQueueTestCheckStatusRspMsg msg; msg.set_test_name(test_name_); msg.set_status(status_); msg.SerializeToString(output); } std::shared_ptr<TestCheckStatusRspMsg> TestCheckStatusRspMsg::FromBytes(uint8_t *bytes) { bytes += sizeof(uint32_t) + sizeof(queue::protobuf::StreamingQueueMessageType); uint64_t *length = (uint64_t *)bytes; bytes += sizeof(uint64_t); std::string inputpb(reinterpret_cast<char const *>(bytes), *length); queue::protobuf::StreamingQueueTestCheckStatusRspMsg message; message.ParseFromString(inputpb); std::string test_name = message.test_name(); bool status = message.status(); std::shared_ptr<TestCheckStatusRspMsg> test_check_msg = std::make_shared<TestCheckStatusRspMsg>(test_name, status); return test_check_msg; } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/message.h
C/C++ Header
#ifndef _STREAMING_QUEUE_MESSAGE_H_ #define _STREAMING_QUEUE_MESSAGE_H_ #include "protobuf/streaming_queue.pb.h" #include "ray/common/buffer.h" #include "ray/common/id.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { /// Base class of all message classes. /// All payloads transferred through direct actor call are packed into a unified package, /// consisting of protobuf-formatted metadata and data, including data and control /// messages. These message classes wrap the package defined in /// protobuf/streaming_queue.proto respectively. class Message { public: /// Construct a Message instance. /// \param[in] actor_id ActorID of message sender. /// \param[in] peer_actor_id ActorID of message receiver. /// \param[in] queue_id queue id to identify which queue the message is sent to. /// \param[in] buffer an optional param, a chunk of data to send. Message(const ActorID &actor_id, const ActorID &peer_actor_id, const ObjectID &queue_id, std::shared_ptr<LocalMemoryBuffer> buffer = nullptr) : actor_id_(actor_id), peer_actor_id_(peer_actor_id), queue_id_(queue_id), buffer_(buffer) {} Message() {} virtual ~Message() {} ActorID ActorId() { return actor_id_; } ActorID PeerActorId() { return peer_actor_id_; } ObjectID QueueId() { return queue_id_; } std::shared_ptr<LocalMemoryBuffer> Buffer() { return buffer_; } /// Serialize all meta data and data to a LocalMemoryBuffer, which can be sent through /// direct actor call. \return serialized buffer . std::unique_ptr<LocalMemoryBuffer> ToBytes(); /// Get message type. /// \return message type. virtual queue::protobuf::StreamingQueueMessageType Type() = 0; /// All subclasses should implement `ToProtobuf` to serialize its own protobuf data. virtual void ToProtobuf(std::string *output) = 0; protected: ActorID actor_id_; ActorID peer_actor_id_; ObjectID queue_id_; std::shared_ptr<LocalMemoryBuffer> buffer_; public: /// A magic number to identify a valid message. static const uint32_t MagicNum; }; /// Wrap StreamingQueueDataMsg in streaming_queue.proto. /// DataMessage encapsulates the memory buffer of QueueItem, a one-to-one relationship /// exists between DataMessage and QueueItem. class DataMessage : public Message { public: DataMessage(const ActorID &actor_id, const ActorID &peer_actor_id, ObjectID queue_id, uint64_t seq_id, std::shared_ptr<LocalMemoryBuffer> buffer, bool raw) : Message(actor_id, peer_actor_id, queue_id, buffer), seq_id_(seq_id), raw_(raw) {} virtual ~DataMessage() {} static std::shared_ptr<DataMessage> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); uint64_t SeqId() { return seq_id_; } bool IsRaw() { return raw_; } queue::protobuf::StreamingQueueMessageType Type() { return type_; } private: uint64_t seq_id_; bool raw_; const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueDataMsgType; }; /// Wrap StreamingQueueNotificationMsg in streaming_queue.proto. /// NotificationMessage, downstream queues sends to upstream queues, for the data reader /// to inform the data writer of the consumed offset. class NotificationMessage : public Message { public: NotificationMessage(const ActorID &actor_id, const ActorID &peer_actor_id, const ObjectID &queue_id, uint64_t seq_id) : Message(actor_id, peer_actor_id, queue_id), seq_id_(seq_id) {} virtual ~NotificationMessage() {} static std::shared_ptr<NotificationMessage> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); uint64_t SeqId() { return seq_id_; } queue::protobuf::StreamingQueueMessageType Type() { return type_; } private: uint64_t seq_id_; const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueNotificationMsgType; }; /// Wrap StreamingQueueCheckMsg in streaming_queue.proto. /// CheckMessage, upstream queues sends to downstream queues, fot the data writer to check /// whether the corresponded downstream queue is read or not. class CheckMessage : public Message { public: CheckMessage(const ActorID &actor_id, const ActorID &peer_actor_id, const ObjectID &queue_id) : Message(actor_id, peer_actor_id, queue_id) {} virtual ~CheckMessage() {} static std::shared_ptr<CheckMessage> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); queue::protobuf::StreamingQueueMessageType Type() { return type_; } private: const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckMsgType; }; /// Wrap StreamingQueueCheckRspMsg in streaming_queue.proto. /// CheckRspMessage, downstream queues sends to upstream queues, the response message to /// CheckMessage to indicate whether downstream queue is ready or not. class CheckRspMessage : public Message { public: CheckRspMessage(const ActorID &actor_id, const ActorID &peer_actor_id, const ObjectID &queue_id, queue::protobuf::StreamingQueueError err_code) : Message(actor_id, peer_actor_id, queue_id), err_code_(err_code) {} virtual ~CheckRspMessage() {} static std::shared_ptr<CheckRspMessage> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); queue::protobuf::StreamingQueueMessageType Type() { return type_; } queue::protobuf::StreamingQueueError Error() { return err_code_; } private: queue::protobuf::StreamingQueueError err_code_; const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckRspMsgType; }; /// Wrap StreamingQueueTestInitMsg in streaming_queue.proto. /// TestInitMessage, used for test, driver sends to test workers to init test suite. class TestInitMessage : public Message { public: TestInitMessage(const queue::protobuf::StreamingQueueTestRole role, const ActorID &actor_id, const ActorID &peer_actor_id, const std::string actor_handle_serialized, const std::vector<ObjectID> &queue_ids, const std::vector<ObjectID> &rescale_queue_ids, std::string test_suite_name, std::string test_name, uint64_t param) : Message(actor_id, peer_actor_id, queue_ids[0]), actor_handle_serialized_(actor_handle_serialized), queue_ids_(queue_ids), rescale_queue_ids_(rescale_queue_ids), role_(role), test_suite_name_(test_suite_name), test_name_(test_name), param_(param) {} virtual ~TestInitMessage() {} static std::shared_ptr<TestInitMessage> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); queue::protobuf::StreamingQueueMessageType Type() { return type_; } std::string ActorHandleSerialized() { return actor_handle_serialized_; } queue::protobuf::StreamingQueueTestRole Role() { return role_; } std::vector<ObjectID> QueueIds() { return queue_ids_; } std::vector<ObjectID> RescaleQueueIds() { return rescale_queue_ids_; } std::string TestSuiteName() { return test_suite_name_; } std::string TestName() { return test_name_; } uint64_t Param() { return param_; } std::string ToString() { std::ostringstream os; os << "actor_handle_serialized: " << actor_handle_serialized_; os << " actor_id: " << ActorId(); os << " peer_actor_id: " << PeerActorId(); os << " queue_ids:["; for (auto &qid : queue_ids_) { os << qid << ","; } os << "], rescale_queue_ids:["; for (auto &qid : rescale_queue_ids_) { os << qid << ","; } os << "],"; os << " role:" << queue::protobuf::StreamingQueueTestRole_Name(role_); os << " suite_name: " << test_suite_name_; os << " test_name: " << test_name_; os << " param: " << param_; return os.str(); } private: const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueTestInitMsgType; std::string actor_handle_serialized_; std::vector<ObjectID> queue_ids_; std::vector<ObjectID> rescale_queue_ids_; queue::protobuf::StreamingQueueTestRole role_; std::string test_suite_name_; std::string test_name_; uint64_t param_; }; /// Wrap StreamingQueueTestCheckStatusRspMsg in streaming_queue.proto. /// TestCheckStatusRspMsg, used for test, driver sends to test workers to check /// whether test has completed or failed. class TestCheckStatusRspMsg : public Message { public: TestCheckStatusRspMsg(const std::string test_name, bool status) : test_name_(test_name), status_(status) {} virtual ~TestCheckStatusRspMsg() {} static std::shared_ptr<TestCheckStatusRspMsg> FromBytes(uint8_t *bytes); virtual void ToProtobuf(std::string *output); queue::protobuf::StreamingQueueMessageType Type() { return type_; } std::string TestName() { return test_name_; } bool Status() { return status_; } private: const queue::protobuf::StreamingQueueMessageType type_ = queue::protobuf::StreamingQueueMessageType::StreamingQueueTestCheckStatusRspMsgType; std::string test_name_; bool status_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue.cc
C++
#include "queue.h" #include <chrono> #include <thread> #include "queue_handler.h" #include "util/streaming_util.h" namespace ray { namespace streaming { bool Queue::Push(QueueItem item) { std::unique_lock<std::mutex> lock(mutex_); if (max_data_size_ < item.DataSize() + data_size_) return false; buffer_queue_.push_back(item); data_size_ += item.DataSize(); readable_cv_.notify_one(); return true; } QueueItem Queue::FrontProcessed() { std::unique_lock<std::mutex> lock(mutex_); STREAMING_CHECK(buffer_queue_.size() != 0) << "WriterQueue Pop fail"; if (watershed_iter_ == buffer_queue_.begin()) { return InvalidQueueItem(); } QueueItem item = buffer_queue_.front(); return item; } QueueItem Queue::PopProcessed() { std::unique_lock<std::mutex> lock(mutex_); STREAMING_CHECK(buffer_queue_.size() != 0) << "WriterQueue Pop fail"; if (watershed_iter_ == buffer_queue_.begin()) { return InvalidQueueItem(); } QueueItem item = buffer_queue_.front(); buffer_queue_.pop_front(); data_size_ -= item.DataSize(); data_size_sent_ -= item.DataSize(); return item; } QueueItem Queue::PopPending() { std::unique_lock<std::mutex> lock(mutex_); auto it = std::next(watershed_iter_); QueueItem item = *it; data_size_sent_ += it->DataSize(); buffer_queue_.splice(watershed_iter_, buffer_queue_, it, std::next(it)); return item; } QueueItem Queue::PopPendingBlockTimeout(uint64_t timeout_us) { std::unique_lock<std::mutex> lock(mutex_); std::chrono::system_clock::time_point point = std::chrono::system_clock::now() + std::chrono::microseconds(timeout_us); if (readable_cv_.wait_until(lock, point, [this] { return std::next(watershed_iter_) != buffer_queue_.end(); })) { auto it = std::next(watershed_iter_); QueueItem item = *it; data_size_sent_ += it->DataSize(); buffer_queue_.splice(watershed_iter_, buffer_queue_, it, std::next(it)); return item; } else { uint8_t data[1]; return QueueItem(QUEUE_INVALID_SEQ_ID, data, 1, 0, true); } } QueueItem Queue::BackPending() { std::unique_lock<std::mutex> lock(mutex_); if (std::next(watershed_iter_) == buffer_queue_.end()) { uint8_t data[1]; return QueueItem(QUEUE_INVALID_SEQ_ID, data, 1, 0, true); } return buffer_queue_.back(); } bool Queue::IsPendingEmpty() { std::unique_lock<std::mutex> lock(mutex_); return std::next(watershed_iter_) == buffer_queue_.end(); } bool Queue::IsPendingFull(uint64_t data_size) { std::unique_lock<std::mutex> lock(mutex_); return max_data_size_ < data_size + data_size_; } size_t Queue::ProcessedCount() { std::unique_lock<std::mutex> lock(mutex_); if (watershed_iter_ == buffer_queue_.begin()) return 0; auto begin = buffer_queue_.begin(); auto end = std::prev(watershed_iter_); return end->SeqId() + 1 - begin->SeqId(); } size_t Queue::PendingCount() { std::unique_lock<std::mutex> lock(mutex_); if (std::next(watershed_iter_) == buffer_queue_.end()) return 0; auto begin = std::next(watershed_iter_); auto end = std::prev(buffer_queue_.end()); return begin->SeqId() - end->SeqId() + 1; } Status WriterQueue::Push(uint64_t seq_id, uint8_t *data, uint32_t data_size, uint64_t timestamp, bool raw) { if (IsPendingFull(data_size)) { return Status::OutOfMemory("Queue Push OutOfMemory"); } while (is_pulling_) { STREAMING_LOG(INFO) << "This queue is sending pull data, wait."; std::this_thread::sleep_for(std::chrono::milliseconds(10)); } QueueItem item(seq_id, data, data_size, timestamp, raw); Queue::Push(item); return Status::OK(); } void WriterQueue::Send() { while (!IsPendingEmpty()) { // FIXME: front -> send -> pop QueueItem item = PopPending(); DataMessage msg(actor_id_, peer_actor_id_, queue_id_, item.SeqId(), item.Buffer(), item.IsRaw()); std::unique_ptr<LocalMemoryBuffer> buffer = msg.ToBytes(); STREAMING_CHECK(transport_ != nullptr); transport_->Send(std::move(buffer), DownstreamQueueMessageHandler::peer_async_function_); } } Status WriterQueue::TryEvictItems() { STREAMING_LOG(INFO) << "TryEvictItems"; QueueItem item = FrontProcessed(); uint64_t first_seq_id = item.SeqId(); STREAMING_LOG(INFO) << "TryEvictItems first_seq_id: " << first_seq_id << " min_consumed_id_: " << min_consumed_id_ << " eviction_limit_: " << eviction_limit_; if (min_consumed_id_ == QUEUE_INVALID_SEQ_ID || first_seq_id > min_consumed_id_) { return Status::OutOfMemory("The queue is full and some reader doesn't consume"); } if (eviction_limit_ == QUEUE_INVALID_SEQ_ID || first_seq_id > eviction_limit_) { return Status::OutOfMemory("The queue is full and eviction limit block evict"); } uint64_t evict_target_seq_id = std::min(min_consumed_id_, eviction_limit_); while (item.SeqId() <= evict_target_seq_id) { PopProcessed(); STREAMING_LOG(INFO) << "TryEvictItems directly " << item.SeqId(); item = FrontProcessed(); } return Status::OK(); } void WriterQueue::OnNotify(std::shared_ptr<NotificationMessage> notify_msg) { STREAMING_LOG(INFO) << "OnNotify target seq_id: " << notify_msg->SeqId(); min_consumed_id_ = notify_msg->SeqId(); } void ReaderQueue::OnConsumed(uint64_t seq_id) { STREAMING_LOG(INFO) << "OnConsumed: " << seq_id; QueueItem item = FrontProcessed(); while (item.SeqId() <= seq_id) { PopProcessed(); item = FrontProcessed(); } Notify(seq_id); } void ReaderQueue::Notify(uint64_t seq_id) { std::vector<TaskArg> task_args; CreateNotifyTask(seq_id, task_args); // SubmitActorTask NotificationMessage msg(actor_id_, peer_actor_id_, queue_id_, seq_id); std::unique_ptr<LocalMemoryBuffer> buffer = msg.ToBytes(); transport_->Send(std::move(buffer), UpstreamQueueMessageHandler::peer_async_function_); } void ReaderQueue::CreateNotifyTask(uint64_t seq_id, std::vector<TaskArg> &task_args) {} void ReaderQueue::OnData(QueueItem &item) { if (item.SeqId() != expect_seq_id_) { STREAMING_LOG(WARNING) << "OnData ignore seq_id: " << item.SeqId() << " expect_seq_id_: " << expect_seq_id_; return; } last_recv_seq_id_ = item.SeqId(); STREAMING_LOG(DEBUG) << "ReaderQueue::OnData seq_id: " << last_recv_seq_id_; Push(item); expect_seq_id_++; } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue.h
C/C++ Header
#ifndef _STREAMING_QUEUE_H_ #define _STREAMING_QUEUE_H_ #include <iterator> #include <list> #include <vector> #include "ray/common/id.h" #include "ray/util/util.h" #include "queue_item.h" #include "transport.h" #include "util/streaming_logging.h" #include "utils.h" namespace ray { namespace streaming { using ray::ObjectID; enum QueueType { UPSTREAM = 0, DOWNSTREAM }; /// A queue-like data structure, which does not delete its items after poped. /// The lifecycle of each item is: /// - Pending, an item is pushed into a queue, but has not been processed (sent out or /// consumed), /// - Processed, has been handled by the user, but should not be deleted. /// - Evicted, useless to the user, should be poped and destroyed. /// At present, this data structure is implemented with one std::list, /// using a watershed iterator to divided. class Queue { public: /// \param[in] queue_id the unique identification of a pair of queues (upstream and /// downstream). \param[in] size max size of the queue in bytes. \param[in] transport /// transport to send items to peer. Queue(ObjectID queue_id, uint64_t size, std::shared_ptr<Transport> transport) : queue_id_(queue_id), max_data_size_(size), data_size_(0), data_size_sent_(0) { buffer_queue_.push_back(InvalidQueueItem()); watershed_iter_ = buffer_queue_.begin(); } virtual ~Queue() {} /// Push an item into the queue. /// \param[in] item the QueueItem object to be send to peer. /// \return false if the queue is full. bool Push(QueueItem item); /// Get the front of item which in processed state. QueueItem FrontProcessed(); /// Pop the front of item which in processed state. QueueItem PopProcessed(); /// Pop the front of item which in pending state, the item /// will not be evicted at this moment, its state turn to /// processed. QueueItem PopPending(); /// PopPending with timeout in microseconds. QueueItem PopPendingBlockTimeout(uint64_t timeout_us); /// Return the last item in pending state. QueueItem BackPending(); bool IsPendingEmpty(); bool IsPendingFull(uint64_t data_size = 0); /// Return the size in bytes of all items in queue. uint64_t QueueSize() { return data_size_; } /// Return the size in bytes of all items in pending state. uint64_t PendingDataSize() { return data_size_ - data_size_sent_; } /// Return the size in bytes of all items in processed state. uint64_t ProcessedDataSize() { return data_size_sent_; } /// Return item count of the queue. size_t Count() { return buffer_queue_.size(); } /// Return item count in pending state. size_t PendingCount(); /// Return item count in processed state. size_t ProcessedCount(); protected: ObjectID queue_id_; std::list<QueueItem> buffer_queue_; std::list<QueueItem>::iterator watershed_iter_; /// max data size in bytes uint64_t max_data_size_; uint64_t data_size_; uint64_t data_size_sent_; std::mutex mutex_; std::condition_variable readable_cv_; }; /// Queue in upstream. class WriterQueue : public Queue { public: /// \param queue_id, the unique ObjectID to identify a queue /// \param actor_id, the actor id of upstream worker /// \param peer_actor_id, the actor id of downstream worker /// \param size, max data size in bytes /// \param transport, transport WriterQueue(const ObjectID &queue_id, const ActorID &actor_id, const ActorID &peer_actor_id, uint64_t size, std::shared_ptr<Transport> transport) : Queue(queue_id, size, transport), actor_id_(actor_id), peer_actor_id_(peer_actor_id), eviction_limit_(QUEUE_INVALID_SEQ_ID), min_consumed_id_(QUEUE_INVALID_SEQ_ID), peer_last_msg_id_(0), peer_last_seq_id_(QUEUE_INVALID_SEQ_ID), transport_(transport), is_pulling_(false) {} /// Push a continuous buffer into queue. /// NOTE: the buffer should be copied. Status Push(uint64_t seq_id, uint8_t *data, uint32_t data_size, uint64_t timestamp, bool raw = false); /// Callback function, will be called when downstream queue notifies /// it has consumed some items. /// NOTE: this callback function is called in queue thread. void OnNotify(std::shared_ptr<NotificationMessage> notify_msg); /// Send items through direct call. void Send(); /// Called when user pushs item into queue. The count of items /// can be evicted, determined by eviction_limit_ and min_consumed_id_. Status TryEvictItems(); void SetQueueEvictionLimit(uint64_t eviction_limit) { eviction_limit_ = eviction_limit; } uint64_t EvictionLimit() { return eviction_limit_; } uint64_t GetMinConsumedSeqID() { return min_consumed_id_; } void SetPeerLastIds(uint64_t msg_id, uint64_t seq_id) { peer_last_msg_id_ = msg_id; peer_last_seq_id_ = seq_id; } uint64_t GetPeerLastMsgId() { return peer_last_msg_id_; } uint64_t GetPeerLastSeqId() { return peer_last_seq_id_; } private: ActorID actor_id_; ActorID peer_actor_id_; uint64_t eviction_limit_; uint64_t min_consumed_id_; uint64_t peer_last_msg_id_; uint64_t peer_last_seq_id_; std::shared_ptr<Transport> transport_; std::atomic<bool> is_pulling_; }; /// Queue in downstream. class ReaderQueue : public Queue { public: /// \param queue_id, the unique ObjectID to identify a queue /// \param actor_id, the actor id of upstream worker /// \param peer_actor_id, the actor id of downstream worker /// \param transport, transport /// NOTE: we do not restrict queue size of ReaderQueue ReaderQueue(const ObjectID &queue_id, const ActorID &actor_id, const ActorID &peer_actor_id, std::shared_ptr<Transport> transport) : Queue(queue_id, std::numeric_limits<uint64_t>::max(), transport), actor_id_(actor_id), peer_actor_id_(peer_actor_id), min_consumed_id_(QUEUE_INVALID_SEQ_ID), last_recv_seq_id_(QUEUE_INVALID_SEQ_ID), expect_seq_id_(1), transport_(transport) {} /// Delete processed items whose seq id <= seq_id, /// then notify upstream queue. void OnConsumed(uint64_t seq_id); void OnData(QueueItem &item); uint64_t GetMinConsumedSeqID() { return min_consumed_id_; } uint64_t GetLastRecvSeqId() { return last_recv_seq_id_; } void SetExpectSeqId(uint64_t expect) { expect_seq_id_ = expect; } private: void Notify(uint64_t seq_id); void CreateNotifyTask(uint64_t seq_id, std::vector<TaskArg> &task_args); private: ActorID actor_id_; ActorID peer_actor_id_; uint64_t min_consumed_id_; uint64_t last_recv_seq_id_; uint64_t expect_seq_id_; std::shared_ptr<PromiseWrapper> promise_for_pull_; std::shared_ptr<Transport> transport_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue_client.cc
C++
#include "queue_client.h" namespace ray { namespace streaming { void WriterClient::OnWriterMessage(std::shared_ptr<LocalMemoryBuffer> buffer) { upstream_handler_->DispatchMessageAsync(buffer); } std::shared_ptr<LocalMemoryBuffer> WriterClient::OnWriterMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer) { return upstream_handler_->DispatchMessageSync(buffer); } void ReaderClient::OnReaderMessage(std::shared_ptr<LocalMemoryBuffer> buffer) { downstream_handler_->DispatchMessageAsync(buffer); } std::shared_ptr<LocalMemoryBuffer> ReaderClient::OnReaderMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer) { return downstream_handler_->DispatchMessageSync(buffer); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue_client.h
C/C++ Header
#ifndef _STREAMING_QUEUE_CLIENT_H_ #define _STREAMING_QUEUE_CLIENT_H_ #include "queue_handler.h" #include "transport.h" namespace ray { namespace streaming { /// The interface of the streaming queue for DataReader. /// A ReaderClient should be created before DataReader created in Cython/Jni, and hold by /// Jobworker. When DataReader receive a buffer from upstream DataWriter (DataReader's /// raycall function is called), it calls `OnReaderMessage` to pass the buffer to its own /// downstream queue, or `OnReaderMessageSync` to wait for handle result. class ReaderClient { public: /// Construct a ReaderClient object. /// \param[in] core_worker CoreWorker C++ pointer of current actor /// \param[in] async_func DataReader's raycall function descriptor to be called by /// DataWriter, asynchronous semantics \param[in] sync_func DataReader's raycall /// function descriptor to be called by DataWriter, synchronous semantics ReaderClient(CoreWorker *core_worker, RayFunction &async_func, RayFunction &sync_func) : core_worker_(core_worker) { DownstreamQueueMessageHandler::peer_async_function_ = async_func; DownstreamQueueMessageHandler::peer_sync_function_ = sync_func; downstream_handler_ = ray::streaming::DownstreamQueueMessageHandler::CreateService( core_worker_, core_worker_->GetWorkerContext().GetCurrentActorID()); } /// Post buffer to downstream queue service, asynchronously. void OnReaderMessage(std::shared_ptr<LocalMemoryBuffer> buffer); /// Post buffer to downstream queue service, synchronously. /// \return handle result. std::shared_ptr<LocalMemoryBuffer> OnReaderMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer); private: CoreWorker *core_worker_; std::shared_ptr<DownstreamQueueMessageHandler> downstream_handler_; }; /// Interface of streaming queue for DataWriter. Similar to ReaderClient. class WriterClient { public: WriterClient(CoreWorker *core_worker, RayFunction &async_func, RayFunction &sync_func) : core_worker_(core_worker) { UpstreamQueueMessageHandler::peer_async_function_ = async_func; UpstreamQueueMessageHandler::peer_sync_function_ = sync_func; upstream_handler_ = ray::streaming::UpstreamQueueMessageHandler::CreateService( core_worker, core_worker_->GetWorkerContext().GetCurrentActorID()); } void OnWriterMessage(std::shared_ptr<LocalMemoryBuffer> buffer); std::shared_ptr<LocalMemoryBuffer> OnWriterMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer); private: CoreWorker *core_worker_; std::shared_ptr<UpstreamQueueMessageHandler> upstream_handler_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue_handler.cc
C++
#include "queue_handler.h" #include "util/streaming_util.h" #include "utils.h" namespace ray { namespace streaming { constexpr uint64_t COMMON_SYNC_CALL_TIMEOUTT_MS = 5 * 1000; std::shared_ptr<UpstreamQueueMessageHandler> UpstreamQueueMessageHandler::upstream_handler_ = nullptr; std::shared_ptr<DownstreamQueueMessageHandler> DownstreamQueueMessageHandler::downstream_handler_ = nullptr; RayFunction UpstreamQueueMessageHandler::peer_sync_function_; RayFunction UpstreamQueueMessageHandler::peer_async_function_; RayFunction DownstreamQueueMessageHandler::peer_sync_function_; RayFunction DownstreamQueueMessageHandler::peer_async_function_; std::shared_ptr<Message> QueueMessageHandler::ParseMessage( std::shared_ptr<LocalMemoryBuffer> buffer) { uint8_t *bytes = buffer->Data(); uint8_t *p_cur = bytes; uint32_t *magic_num = (uint32_t *)p_cur; STREAMING_CHECK(*magic_num == Message::MagicNum) << *magic_num << " " << Message::MagicNum; p_cur += sizeof(Message::MagicNum); queue::protobuf::StreamingQueueMessageType *type = (queue::protobuf::StreamingQueueMessageType *)p_cur; std::shared_ptr<Message> message = nullptr; switch (*type) { case queue::protobuf::StreamingQueueMessageType::StreamingQueueNotificationMsgType: message = NotificationMessage::FromBytes(bytes); break; case queue::protobuf::StreamingQueueMessageType::StreamingQueueDataMsgType: message = DataMessage::FromBytes(bytes); break; case queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckMsgType: message = CheckMessage::FromBytes(bytes); break; case queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckRspMsgType: message = CheckRspMessage::FromBytes(bytes); break; default: STREAMING_CHECK(false) << "nonsupport message type: " << queue::protobuf::StreamingQueueMessageType_Name(*type); break; } return message; } void QueueMessageHandler::DispatchMessageAsync( std::shared_ptr<LocalMemoryBuffer> buffer) { queue_service_.post( boost::bind(&QueueMessageHandler::DispatchMessageInternal, this, buffer, nullptr)); } std::shared_ptr<LocalMemoryBuffer> QueueMessageHandler::DispatchMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer) { std::shared_ptr<LocalMemoryBuffer> result = nullptr; std::shared_ptr<PromiseWrapper> promise = std::make_shared<PromiseWrapper>(); queue_service_.post( boost::bind(&QueueMessageHandler::DispatchMessageInternal, this, buffer, [&promise, &result](std::shared_ptr<LocalMemoryBuffer> rst) { result = rst; promise->Notify(ray::Status::OK()); })); Status st = promise->Wait(); STREAMING_CHECK(st.ok()); return result; } std::shared_ptr<Transport> QueueMessageHandler::GetOutTransport( const ObjectID &queue_id) { auto it = out_transports_.find(queue_id); if (it == out_transports_.end()) return nullptr; return it->second; } void QueueMessageHandler::SetPeerActorID(const ObjectID &queue_id, const ActorID &actor_id) { actors_.emplace(queue_id, actor_id); out_transports_.emplace( queue_id, std::make_shared<ray::streaming::Transport>(core_worker_, actor_id)); } ActorID QueueMessageHandler::GetPeerActorID(const ObjectID &queue_id) { auto it = actors_.find(queue_id); STREAMING_CHECK(it != actors_.end()); return it->second; } void QueueMessageHandler::Release() { actors_.clear(); out_transports_.clear(); } void QueueMessageHandler::Start() { queue_thread_ = std::thread(&QueueMessageHandler::QueueThreadCallback, this); } void QueueMessageHandler::Stop() { STREAMING_LOG(INFO) << "QueueMessageHandler Stop."; queue_service_.stop(); if (queue_thread_.joinable()) { queue_thread_.join(); } } std::shared_ptr<UpstreamQueueMessageHandler> UpstreamQueueMessageHandler::CreateService( CoreWorker *core_worker, const ActorID &actor_id) { if (nullptr == upstream_handler_) { upstream_handler_ = std::make_shared<UpstreamQueueMessageHandler>(core_worker, actor_id); } return upstream_handler_; } std::shared_ptr<UpstreamQueueMessageHandler> UpstreamQueueMessageHandler::GetService() { return upstream_handler_; } std::shared_ptr<WriterQueue> UpstreamQueueMessageHandler::CreateUpstreamQueue( const ObjectID &queue_id, const ActorID &peer_actor_id, uint64_t size) { STREAMING_LOG(INFO) << "CreateUpstreamQueue: " << queue_id << " " << actor_id_ << "->" << peer_actor_id; std::shared_ptr<WriterQueue> queue = GetUpQueue(queue_id); if (queue != nullptr) { STREAMING_LOG(WARNING) << "Duplicate to create up queue." << queue_id; return queue; } queue = std::unique_ptr<streaming::WriterQueue>(new streaming::WriterQueue( queue_id, actor_id_, peer_actor_id, size, GetOutTransport(queue_id))); upstream_queues_[queue_id] = queue; return queue; } bool UpstreamQueueMessageHandler::UpstreamQueueExists(const ObjectID &queue_id) { return nullptr != GetUpQueue(queue_id); } std::shared_ptr<streaming::WriterQueue> UpstreamQueueMessageHandler::GetUpQueue( const ObjectID &queue_id) { auto it = upstream_queues_.find(queue_id); if (it == upstream_queues_.end()) return nullptr; return it->second; } bool UpstreamQueueMessageHandler::CheckQueueSync(const ObjectID &queue_id) { ActorID peer_actor_id = GetPeerActorID(queue_id); STREAMING_LOG(INFO) << "CheckQueueSync queue_id: " << queue_id << " peer_actor_id: " << peer_actor_id; CheckMessage msg(actor_id_, peer_actor_id, queue_id); std::unique_ptr<LocalMemoryBuffer> buffer = msg.ToBytes(); auto transport_it = GetOutTransport(queue_id); STREAMING_CHECK(transport_it != nullptr); std::shared_ptr<LocalMemoryBuffer> result_buffer = transport_it->SendForResultWithRetry( std::move(buffer), DownstreamQueueMessageHandler::peer_sync_function_, 10, COMMON_SYNC_CALL_TIMEOUTT_MS); if (result_buffer == nullptr) { return false; } std::shared_ptr<Message> result_msg = ParseMessage(result_buffer); STREAMING_CHECK( result_msg->Type() == queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckRspMsgType); std::shared_ptr<CheckRspMessage> check_rsp_msg = std::dynamic_pointer_cast<CheckRspMessage>(result_msg); STREAMING_LOG(INFO) << "CheckQueueSync return queue_id: " << check_rsp_msg->QueueId(); STREAMING_CHECK(check_rsp_msg->PeerActorId() == actor_id_); return queue::protobuf::StreamingQueueError::OK == check_rsp_msg->Error(); } void UpstreamQueueMessageHandler::WaitQueues(const std::vector<ObjectID> &queue_ids, int64_t timeout_ms, std::vector<ObjectID> &failed_queues) { failed_queues.insert(failed_queues.begin(), queue_ids.begin(), queue_ids.end()); uint64_t start_time_us = current_time_ms(); uint64_t current_time_us = start_time_us; while (!failed_queues.empty() && current_time_us < start_time_us + timeout_ms * 1000) { for (auto it = failed_queues.begin(); it != failed_queues.end();) { if (CheckQueueSync(*it)) { STREAMING_LOG(INFO) << "Check queue: " << *it << " return, ready."; it = failed_queues.erase(it); } else { STREAMING_LOG(INFO) << "Check queue: " << *it << " return, not ready."; std::this_thread::sleep_for(std::chrono::milliseconds(50)); it++; } } current_time_us = current_time_ms(); } } void UpstreamQueueMessageHandler::DispatchMessageInternal( std::shared_ptr<LocalMemoryBuffer> buffer, std::function<void(std::shared_ptr<LocalMemoryBuffer>)> callback) { std::shared_ptr<Message> msg = ParseMessage(buffer); STREAMING_LOG(DEBUG) << "QueueMessageHandler::DispatchMessageInternal: " << " qid: " << msg->QueueId() << " actorid " << msg->ActorId() << " peer actorid: " << msg->PeerActorId() << " type: " << queue::protobuf::StreamingQueueMessageType_Name(msg->Type()); if (msg->Type() == queue::protobuf::StreamingQueueMessageType::StreamingQueueNotificationMsgType) { OnNotify(std::dynamic_pointer_cast<NotificationMessage>(msg)); } else if (msg->Type() == queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckRspMsgType) { STREAMING_CHECK(false) << "Should not receive StreamingQueueCheckRspMsg"; } else { STREAMING_CHECK(false) << "message type should be added: " << queue::protobuf::StreamingQueueMessageType_Name( msg->Type()); } } void UpstreamQueueMessageHandler::OnNotify( std::shared_ptr<NotificationMessage> notify_msg) { auto queue = GetUpQueue(notify_msg->QueueId()); if (queue == nullptr) { STREAMING_LOG(WARNING) << "Can not find queue for " << queue::protobuf::StreamingQueueMessageType_Name( notify_msg->Type()) << ", maybe queue has been destroyed, ignore it." << " seq id: " << notify_msg->SeqId(); return; } queue->OnNotify(notify_msg); } void UpstreamQueueMessageHandler::ReleaseAllUpQueues() { STREAMING_LOG(INFO) << "ReleaseAllUpQueues"; upstream_queues_.clear(); Release(); } std::shared_ptr<DownstreamQueueMessageHandler> DownstreamQueueMessageHandler::CreateService(CoreWorker *core_worker, const ActorID &actor_id) { if (nullptr == downstream_handler_) { downstream_handler_ = std::make_shared<DownstreamQueueMessageHandler>(core_worker, actor_id); } return downstream_handler_; } std::shared_ptr<DownstreamQueueMessageHandler> DownstreamQueueMessageHandler::GetService() { return downstream_handler_; } bool DownstreamQueueMessageHandler::DownstreamQueueExists(const ObjectID &queue_id) { return nullptr != GetDownQueue(queue_id); } std::shared_ptr<ReaderQueue> DownstreamQueueMessageHandler::CreateDownstreamQueue( const ObjectID &queue_id, const ActorID &peer_actor_id) { STREAMING_LOG(INFO) << "CreateDownstreamQueue: " << queue_id << " " << peer_actor_id << "->" << actor_id_; auto it = downstream_queues_.find(queue_id); if (it != downstream_queues_.end()) { STREAMING_LOG(WARNING) << "Duplicate to create down queue!!!! " << queue_id; return it->second; } std::shared_ptr<streaming::ReaderQueue> queue = std::unique_ptr<streaming::ReaderQueue>(new streaming::ReaderQueue( queue_id, actor_id_, peer_actor_id, GetOutTransport(queue_id))); downstream_queues_[queue_id] = queue; return queue; } std::shared_ptr<streaming::ReaderQueue> DownstreamQueueMessageHandler::GetDownQueue( const ObjectID &queue_id) { auto it = downstream_queues_.find(queue_id); if (it == downstream_queues_.end()) return nullptr; return it->second; } std::shared_ptr<LocalMemoryBuffer> DownstreamQueueMessageHandler::OnCheckQueue( std::shared_ptr<CheckMessage> check_msg) { queue::protobuf::StreamingQueueError err_code = queue::protobuf::StreamingQueueError::OK; auto down_queue = downstream_queues_.find(check_msg->QueueId()); if (down_queue == downstream_queues_.end()) { STREAMING_LOG(WARNING) << "OnCheckQueue " << check_msg->QueueId() << " not found."; err_code = queue::protobuf::StreamingQueueError::QUEUE_NOT_EXIST; } CheckRspMessage msg(check_msg->PeerActorId(), check_msg->ActorId(), check_msg->QueueId(), err_code); std::shared_ptr<LocalMemoryBuffer> buffer = msg.ToBytes(); return buffer; } void DownstreamQueueMessageHandler::ReleaseAllDownQueues() { STREAMING_LOG(INFO) << "ReleaseAllDownQueues size: " << downstream_queues_.size(); downstream_queues_.clear(); Release(); } void DownstreamQueueMessageHandler::DispatchMessageInternal( std::shared_ptr<LocalMemoryBuffer> buffer, std::function<void(std::shared_ptr<LocalMemoryBuffer>)> callback) { std::shared_ptr<Message> msg = ParseMessage(buffer); STREAMING_LOG(DEBUG) << "QueueMessageHandler::DispatchMessageInternal: " << " qid: " << msg->QueueId() << " actorid " << msg->ActorId() << " peer actorid: " << msg->PeerActorId() << " type: " << queue::protobuf::StreamingQueueMessageType_Name(msg->Type()); if (msg->Type() == queue::protobuf::StreamingQueueMessageType::StreamingQueueDataMsgType) { OnData(std::dynamic_pointer_cast<DataMessage>(msg)); } else if (msg->Type() == queue::protobuf::StreamingQueueMessageType::StreamingQueueCheckMsgType) { std::shared_ptr<LocalMemoryBuffer> check_result = this->OnCheckQueue(std::dynamic_pointer_cast<CheckMessage>(msg)); if (callback != nullptr) { callback(check_result); } } else { STREAMING_CHECK(false) << "message type should be added: " << queue::protobuf::StreamingQueueMessageType_Name( msg->Type()); } } void DownstreamQueueMessageHandler::OnData(std::shared_ptr<DataMessage> msg) { auto queue = GetDownQueue(msg->QueueId()); if (queue == nullptr) { STREAMING_LOG(WARNING) << "Can not find queue for " << queue::protobuf::StreamingQueueMessageType_Name(msg->Type()) << ", maybe queue has been destroyed, ignore it." << " seq id: " << msg->SeqId(); return; } QueueItem item(msg); queue->OnData(item); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue_handler.h
C/C++ Header
#ifndef _QUEUE_SERVICE_H_ #define _QUEUE_SERVICE_H_ #include <boost/asio.hpp> #include <boost/bind.hpp> #include <boost/thread.hpp> #include <thread> #include "queue.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { /// Base class of UpstreamQueueMessageHandler and DownstreamQueueMessageHandler. /// A queue service manages a group of queues, upstream queues or downstream queues of /// the current actor. Each queue service holds a boost.asio io_service, to handle /// messages asynchronously. When a message received by Writer/Reader in ray call thread, /// the message was delivered to /// UpstreamQueueMessageHandler/DownstreamQueueMessageHandler, then the ray call thread /// returns immediately. The queue service parses meta infomation from the message, /// including queue_id actor_id, etc, and dispatchs message to queue according to /// queue_id. class QueueMessageHandler { public: /// Construct a QueueMessageHandler instance. /// \param[in] core_worker CoreWorker C++ pointer of current actor, used to call Core /// Worker's api. /// For Python worker, the pointer can be obtained from /// ray.worker.global_worker.core_worker; For Java worker, obtained from /// RayNativeRuntime object through java reflection. /// \param[in] actor_id actor id of current actor. QueueMessageHandler(CoreWorker *core_worker, const ActorID &actor_id) : core_worker_(core_worker), actor_id_(actor_id), queue_dummy_work_(queue_service_) { Start(); } virtual ~QueueMessageHandler() { Stop(); } /// Dispatch message buffer to asio service. /// \param[in] buffer serialized message received from peer actor. void DispatchMessageAsync(std::shared_ptr<LocalMemoryBuffer> buffer); /// Dispatch message buffer to asio service synchronously, and wait for handle result. /// \param[in] buffer serialized message received from peer actor. /// \return handle result. std::shared_ptr<LocalMemoryBuffer> DispatchMessageSync( std::shared_ptr<LocalMemoryBuffer> buffer); /// Get transport to a peer actor specified by actor_id. /// \param[in] actor_id actor id of peer actor /// \return transport std::shared_ptr<Transport> GetOutTransport(const ObjectID &actor_id); /// The actual function where message being dispatched, called by DispatchMessageAsync /// and DispatchMessageSync. /// \param[in] buffer serialized message received from peer actor. /// \param[in] callback the callback function used by DispatchMessageSync, called /// after message processed complete. The std::shared_ptr<LocalMemoryBuffer> /// parameter is the return value. virtual void DispatchMessageInternal( std::shared_ptr<LocalMemoryBuffer> buffer, std::function<void(std::shared_ptr<LocalMemoryBuffer>)> callback) = 0; /// Save actor_id of the peer actor specified by queue_id. For a upstream queue, the /// peer actor refer specifically to the actor in current ray cluster who has a /// downstream queue with same queue_id, and vice versa. /// \param[in] queue_id queue id of current queue. /// \param[in] actor_id actor_id actor id of corresponded peer actor. void SetPeerActorID(const ObjectID &queue_id, const ActorID &actor_id); /// Obtain the actor id of the peer actor specified by queue_id. /// \return actor id ActorID GetPeerActorID(const ObjectID &queue_id); /// Release all queues in current queue service. void Release(); private: /// Start asio service void Start(); /// Stop asio service void Stop(); /// The callback function of internal thread. void QueueThreadCallback() { queue_service_.run(); } protected: /// CoreWorker C++ pointer of current actor CoreWorker *core_worker_; /// actor_id actor id of current actor ActorID actor_id_; /// Helper function, parse message buffer to Message object. std::shared_ptr<Message> ParseMessage(std::shared_ptr<LocalMemoryBuffer> buffer); private: /// Map from queue id to a actor id of the queue's peer actor. std::unordered_map<ObjectID, ActorID> actors_; /// Map from queue id to a transport of the queue's peer actor. std::unordered_map<ObjectID, std::shared_ptr<Transport>> out_transports_; /// The internal thread which asio service run with. std::thread queue_thread_; /// The internal asio service. boost::asio::io_service queue_service_; /// The asio work which keeps queue_service_ alive. boost::asio::io_service::work queue_dummy_work_; }; /// UpstreamQueueMessageHandler holds and manages all upstream queues of current actor. class UpstreamQueueMessageHandler : public QueueMessageHandler { public: /// Construct a UpstreamQueueMessageHandler instance. UpstreamQueueMessageHandler(CoreWorker *core_worker, const ActorID &actor_id) : QueueMessageHandler(core_worker, actor_id) {} /// Create a upstream queue. /// \param[in] queue_id queue id of the queue to be created. /// \param[in] peer_actor_id actor id of peer actor. /// \param[in] size the max memory size of the queue. std::shared_ptr<WriterQueue> CreateUpstreamQueue(const ObjectID &queue_id, const ActorID &peer_actor_id, uint64_t size); /// Check whether the upstream queue specified by queue_id exists or not. bool UpstreamQueueExists(const ObjectID &queue_id); /// Wait all queues in queue_ids vector ready, until timeout. /// \param[in] queue_ids a group of queues. /// \param[in] timeout_ms max timeout time interval for wait all queues. /// \param[out] failed_queues a group of queues which are not ready when timeout. void WaitQueues(const std::vector<ObjectID> &queue_ids, int64_t timeout_ms, std::vector<ObjectID> &failed_queues); /// Handle notify message from corresponded downstream queue. void OnNotify(std::shared_ptr<NotificationMessage> notify_msg); /// Obtain upstream queue specified by queue_id. std::shared_ptr<streaming::WriterQueue> GetUpQueue(const ObjectID &queue_id); /// Release all upstream queues void ReleaseAllUpQueues(); virtual void DispatchMessageInternal( std::shared_ptr<LocalMemoryBuffer> buffer, std::function<void(std::shared_ptr<LocalMemoryBuffer>)> callback) override; static std::shared_ptr<UpstreamQueueMessageHandler> CreateService( CoreWorker *core_worker, const ActorID &actor_id); static std::shared_ptr<UpstreamQueueMessageHandler> GetService(); static RayFunction peer_sync_function_; static RayFunction peer_async_function_; private: bool CheckQueueSync(const ObjectID &queue_ids); private: std::unordered_map<ObjectID, std::shared_ptr<streaming::WriterQueue>> upstream_queues_; static std::shared_ptr<UpstreamQueueMessageHandler> upstream_handler_; }; /// UpstreamQueueMessageHandler holds and manages all downstream queues of current actor. class DownstreamQueueMessageHandler : public QueueMessageHandler { public: DownstreamQueueMessageHandler(CoreWorker *core_worker, const ActorID &actor_id) : QueueMessageHandler(core_worker, actor_id) {} std::shared_ptr<ReaderQueue> CreateDownstreamQueue(const ObjectID &queue_id, const ActorID &peer_actor_id); bool DownstreamQueueExists(const ObjectID &queue_id); void UpdateDownActor(const ObjectID &queue_id, const ActorID &actor_id); std::shared_ptr<LocalMemoryBuffer> OnCheckQueue( std::shared_ptr<CheckMessage> check_msg); std::shared_ptr<streaming::ReaderQueue> GetDownQueue(const ObjectID &queue_id); void ReleaseAllDownQueues(); void OnData(std::shared_ptr<DataMessage> msg); virtual void DispatchMessageInternal( std::shared_ptr<LocalMemoryBuffer> buffer, std::function<void(std::shared_ptr<LocalMemoryBuffer>)> callback); static std::shared_ptr<DownstreamQueueMessageHandler> CreateService( CoreWorker *core_worker, const ActorID &actor_id); static std::shared_ptr<DownstreamQueueMessageHandler> GetService(); static RayFunction peer_sync_function_; static RayFunction peer_async_function_; private: std::unordered_map<ObjectID, std::shared_ptr<streaming::ReaderQueue>> downstream_queues_; static std::shared_ptr<DownstreamQueueMessageHandler> downstream_handler_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/queue_item.h
C/C++ Header
#ifndef _STREAMING_QUEUE_ITEM_H_ #define _STREAMING_QUEUE_ITEM_H_ #include <iterator> #include <list> #include <thread> #include <vector> #include "ray/common/id.h" #include "message.h" #include "message/message_bundle.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { using ray::ObjectID; const uint64_t QUEUE_INVALID_SEQ_ID = std::numeric_limits<uint64_t>::max(); /// QueueItem is the element stored in `Queue`. Actually, when DataWriter pushes a message /// bundle into a queue, the bundle is packed into one QueueItem, so a one-to-one /// relationship exists between message bundle and QueueItem. Meanwhile, the QueueItem is /// also the minimum unit to send through direct actor call. Each QueueItem holds a /// LocalMemoryBuffer shared_ptr, which will be sent out by Transport. class QueueItem { public: /// Construct a QueueItem object. /// \param[in] seq_id the sequential id assigned by DataWriter for a message bundle and /// QueueItem. /// \param[in] data the data buffer to be stored in this QueueItem. /// \param[in] data_size the data size in bytes. /// \param[in] timestamp the time when this QueueItem created. /// \param[in] raw whether the data content is raw bytes, only used in some tests. QueueItem(uint64_t seq_id, uint8_t *data, uint32_t data_size, uint64_t timestamp, bool raw = false) : seq_id_(seq_id), timestamp_(timestamp), raw_(raw), /*COPY*/ buffer_(std::make_shared<LocalMemoryBuffer>(data, data_size, true)) {} QueueItem(uint64_t seq_id, std::shared_ptr<LocalMemoryBuffer> buffer, uint64_t timestamp, bool raw = false) : seq_id_(seq_id), timestamp_(timestamp), raw_(raw), buffer_(buffer) {} QueueItem(std::shared_ptr<DataMessage> data_msg) : seq_id_(data_msg->SeqId()), raw_(data_msg->IsRaw()), buffer_(data_msg->Buffer()) {} QueueItem(const QueueItem &&item) { buffer_ = item.buffer_; seq_id_ = item.seq_id_; timestamp_ = item.timestamp_; raw_ = item.raw_; } QueueItem(const QueueItem &item) { buffer_ = item.buffer_; seq_id_ = item.seq_id_; timestamp_ = item.timestamp_; raw_ = item.raw_; } QueueItem &operator=(const QueueItem &item) { buffer_ = item.buffer_; seq_id_ = item.seq_id_; timestamp_ = item.timestamp_; raw_ = item.raw_; return *this; } virtual ~QueueItem() = default; uint64_t SeqId() { return seq_id_; } bool IsRaw() { return raw_; } uint64_t TimeStamp() { return timestamp_; } size_t DataSize() { return buffer_->Size(); } std::shared_ptr<LocalMemoryBuffer> Buffer() { return buffer_; } /// Get max message id in this item. /// \return max message id. uint64_t MaxMsgId() { if (raw_) { return 0; } auto message_bundle = StreamingMessageBundleMeta::FromBytes(buffer_->Data()); return message_bundle->GetLastMessageId(); } protected: uint64_t seq_id_; uint64_t timestamp_; bool raw_; std::shared_ptr<LocalMemoryBuffer> buffer_; }; class InvalidQueueItem : public QueueItem { public: InvalidQueueItem() : QueueItem(QUEUE_INVALID_SEQ_ID, data_, 1, 0) {} private: uint8_t data_[1]; }; typedef std::shared_ptr<QueueItem> QueueItemPtr; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/transport.cc
C++
#include "transport.h" #include "utils.h" namespace ray { namespace streaming { static constexpr int TASK_OPTION_RETURN_NUM_0 = 0; static constexpr int TASK_OPTION_RETURN_NUM_1 = 1; void Transport::SendInternal(std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int return_num, std::vector<ObjectID> &return_ids) { std::unordered_map<std::string, double> resources; TaskOptions options{return_num, true, resources}; char meta_data[3] = {'R', 'A', 'W'}; std::shared_ptr<LocalMemoryBuffer> meta = std::make_shared<LocalMemoryBuffer>((uint8_t *)meta_data, 3, true); std::vector<TaskArg> args; if (function.GetLanguage() == Language::PYTHON) { auto dummy = "__RAY_DUMMY__"; std::shared_ptr<LocalMemoryBuffer> dummyBuffer = std::make_shared<LocalMemoryBuffer>((uint8_t *)dummy, 13, true); args.emplace_back(TaskArg::PassByValue( std::make_shared<RayObject>(std::move(dummyBuffer), meta, true))); } args.emplace_back( TaskArg::PassByValue(std::make_shared<RayObject>(std::move(buffer), meta, true))); STREAMING_CHECK(core_worker_ != nullptr); std::vector<std::shared_ptr<RayObject>> results; ray::Status st = core_worker_->SubmitActorTask(peer_actor_id_, function, args, options, &return_ids); if (!st.ok()) { STREAMING_LOG(ERROR) << "SubmitActorTask failed. " << st; } } void Transport::Send(std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function) { STREAMING_LOG(INFO) << "Transport::Send buffer size: " << buffer->Size(); std::vector<ObjectID> return_ids; SendInternal(std::move(buffer), function, TASK_OPTION_RETURN_NUM_0, return_ids); } std::shared_ptr<LocalMemoryBuffer> Transport::SendForResult( std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int64_t timeout_ms) { std::vector<ObjectID> return_ids; SendInternal(buffer, function, TASK_OPTION_RETURN_NUM_1, return_ids); std::vector<std::shared_ptr<RayObject>> results; Status get_st = core_worker_->Get(return_ids, timeout_ms, &results); if (!get_st.ok()) { STREAMING_LOG(ERROR) << "Get fail."; return nullptr; } STREAMING_CHECK(results.size() >= 1); if (results[0]->IsException()) { STREAMING_LOG(ERROR) << "peer actor may has exceptions, should retry."; return nullptr; } STREAMING_CHECK(results[0]->HasData()); if (results[0]->GetData()->Size() == 4) { STREAMING_LOG(WARNING) << "peer actor may not ready yet, should retry."; return nullptr; } std::shared_ptr<Buffer> result_buffer = results[0]->GetData(); std::shared_ptr<LocalMemoryBuffer> return_buffer = std::make_shared<LocalMemoryBuffer>( result_buffer->Data(), result_buffer->Size(), true); return return_buffer; } std::shared_ptr<LocalMemoryBuffer> Transport::SendForResultWithRetry( std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int retry_cnt, int64_t timeout_ms) { STREAMING_LOG(INFO) << "SendForResultWithRetry retry_cnt: " << retry_cnt << " timeout_ms: " << timeout_ms << " function: " << function.GetFunctionDescriptor()[0]; std::shared_ptr<LocalMemoryBuffer> buffer_shared = std::move(buffer); for (int cnt = 0; cnt < retry_cnt; cnt++) { auto result = SendForResult(buffer_shared, function, timeout_ms); if (result != nullptr) { return result; } } STREAMING_LOG(WARNING) << "SendForResultWithRetry fail after retry."; return nullptr; } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/transport.h
C/C++ Header
#ifndef _STREAMING_QUEUE_TRANSPORT_H_ #define _STREAMING_QUEUE_TRANSPORT_H_ #include "ray/common/id.h" #include "ray/core_worker/core_worker.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { /// Transport is the transfer endpoint to a specific actor, buffers can be sent to peer /// through direct actor call. class Transport { public: /// Construct a Transport object. /// \param[in] core_worker CoreWorker C++ pointer of current actor, which we call direct /// actor call interface with. /// \param[in] peer_actor_id actor id of peer actor. Transport(CoreWorker *core_worker, const ActorID &peer_actor_id) : core_worker_(core_worker), peer_actor_id_(peer_actor_id) {} virtual ~Transport() = default; /// Send buffer asynchronously, peer's `function` will be called. /// \param[in] buffer buffer to be sent. /// \param[in] function the function descriptor of peer's function. virtual void Send(std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function); /// Send buffer synchronously, peer's `function` will be called, and return the peer /// function's return value. /// \param[in] buffer buffer to be sent. /// \param[in] function the function descriptor of peer's function. /// \param[in] timeout_ms max time to wait for result. /// \return peer function's result. virtual std::shared_ptr<LocalMemoryBuffer> SendForResult( std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int64_t timeout_ms); /// Send buffer and get result with retry. /// return value. /// \param[in] buffer buffer to be sent. /// \param[in] function the function descriptor of peer's function. /// \param[in] max retry count /// \param[in] timeout_ms max time to wait for result. /// \return peer function's result. std::shared_ptr<LocalMemoryBuffer> SendForResultWithRetry( std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int retry_cnt, int64_t timeout_ms); private: /// Send buffer internal /// \param[in] buffer buffer to be sent. /// \param[in] function the function descriptor of peer's function. /// \param[in] return_num return value number of the call. /// \param[out] return_ids return ids from SubmitActorTask. virtual void SendInternal(std::shared_ptr<LocalMemoryBuffer> buffer, RayFunction &function, int return_num, std::vector<ObjectID> &return_ids); private: CoreWorker *core_worker_; ActorID peer_actor_id_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/queue/utils.h
C/C++ Header
#ifndef _STREAMING_QUEUE_UTILS_H_ #define _STREAMING_QUEUE_UTILS_H_ #include <chrono> #include <future> #include <thread> #include "ray/util/util.h" namespace ray { namespace streaming { /// Helper class encapulate std::future to help multithread async wait. class PromiseWrapper { public: Status Wait() { std::future<bool> fut = promise_.get_future(); fut.get(); return status_; } Status WaitFor(uint64_t timeout_ms) { std::future<bool> fut = promise_.get_future(); std::future_status status; do { status = fut.wait_for(std::chrono::milliseconds(timeout_ms)); if (status == std::future_status::deferred) { } else if (status == std::future_status::timeout) { return Status::Invalid("timeout"); } else if (status == std::future_status::ready) { return status_; } } while (status == std::future_status::deferred); return status_; } void Notify(Status status) { status_ = status; promise_.set_value(true); } Status GetResultStatus() { return status_; } private: std::promise<bool> promise_; Status status_; }; } // namespace streaming } // namespace ray #endif
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/ring_buffer.cc
C++
#include "ring_buffer.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { StreamingRingBuffer::StreamingRingBuffer(size_t buf_size, StreamingRingBufferType buffer_type) { switch (buffer_type) { case StreamingRingBufferType::SPSC: message_buffer_ = std::make_shared<RingBufferImplLockFree<StreamingMessagePtr>>(buf_size); break; case StreamingRingBufferType::SPSC_LOCK: default: message_buffer_ = std::make_shared<RingBufferImplThreadSafe<StreamingMessagePtr>>(buf_size); } } bool StreamingRingBuffer::Push(const StreamingMessagePtr &msg) { message_buffer_->Push(msg); return true; } bool StreamingRingBuffer::Push(StreamingMessagePtr &&msg) { message_buffer_->Push(std::forward<StreamingMessagePtr>(msg)); return true; } StreamingMessagePtr &StreamingRingBuffer::Front() { STREAMING_CHECK(!message_buffer_->Empty()); return message_buffer_->Front(); } void StreamingRingBuffer::Pop() { STREAMING_CHECK(!message_buffer_->Empty()); message_buffer_->Pop(); } bool StreamingRingBuffer::IsFull() { return message_buffer_->Full(); } bool StreamingRingBuffer::IsEmpty() { return message_buffer_->Empty(); } size_t StreamingRingBuffer::Size() { return message_buffer_->Size(); }; size_t StreamingRingBuffer::Capacity() const { return message_buffer_->Capacity(); } size_t StreamingRingBuffer::GetTransientBufferSize() { return transient_buffer_.GetTransientBufferSize(); }; void StreamingRingBuffer::SetTransientBufferSize(uint32_t new_transient_buffer_size) { return transient_buffer_.SetTransientBufferSize(new_transient_buffer_size); } size_t StreamingRingBuffer::GetMaxTransientBufferSize() const { return transient_buffer_.GetMaxTransientBufferSize(); } const uint8_t *StreamingRingBuffer::GetTransientBuffer() const { return transient_buffer_.GetTransientBuffer(); } uint8_t *StreamingRingBuffer::GetTransientBufferMutable() const { return transient_buffer_.GetTransientBufferMutable(); } void StreamingRingBuffer::ReallocTransientBuffer(uint32_t size) { transient_buffer_.ReallocTransientBuffer(size); } bool StreamingRingBuffer::IsTransientAvaliable() { return transient_buffer_.IsTransientAvaliable(); } void StreamingRingBuffer::FreeTransientBuffer(bool is_force) { transient_buffer_.FreeTransientBuffer(is_force); } } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/ring_buffer.h
C/C++ Header
#ifndef RAY_RING_BUFFER_H #define RAY_RING_BUFFER_H #include <atomic> #include <boost/circular_buffer.hpp> #include <boost/thread/locks.hpp> #include <boost/thread/shared_mutex.hpp> #include <condition_variable> #include <memory> #include <mutex> #include <queue> #include "message/message.h" #include "ray/common/status.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { /// Because the data cannot be successfully written to the channel every time, in /// order not to serialize the message repeatedly, we designed a temporary buffer /// area so that when the downstream is backpressured or the channel is blocked /// due to memory limitations, it can be cached first and waited for the next use. class StreamingTransientBuffer { private: std::shared_ptr<uint8_t> transient_buffer_; // BufferSize is length of last serialization data. uint32_t transient_buffer_size_ = 0; uint32_t max_transient_buffer_size_ = 0; bool transient_flag_ = false; public: inline size_t GetTransientBufferSize() const { return transient_buffer_size_; } inline void SetTransientBufferSize(uint32_t new_transient_buffer_size) { transient_buffer_size_ = new_transient_buffer_size; } inline size_t GetMaxTransientBufferSize() const { return max_transient_buffer_size_; } inline const uint8_t *GetTransientBuffer() const { return transient_buffer_.get(); } inline uint8_t *GetTransientBufferMutable() const { return transient_buffer_.get(); } /// To reuse transient buffer, we will realloc buffer memory if size of needed /// message bundle raw data is greater-than original buffer size. /// \param size buffer size /// inline void ReallocTransientBuffer(uint32_t size) { transient_buffer_size_ = size; transient_flag_ = true; if (max_transient_buffer_size_ > size) { return; } max_transient_buffer_size_ = size; transient_buffer_.reset(new uint8_t[size], std::default_delete<uint8_t[]>()); } inline bool IsTransientAvaliable() { return transient_flag_; } inline void FreeTransientBuffer(bool is_force = false) { transient_buffer_size_ = 0; transient_flag_ = false; // Transient buffer always holds max size buffer among all messages, which is // wasteful. So expiration time is considerable idea to release large buffer if this // transient buffer pointer hold it in long time. if (is_force) { max_transient_buffer_size_ = 0; transient_buffer_.reset(); } } virtual ~StreamingTransientBuffer() = default; }; template <class T> class AbstractRingBufferImpl { public: virtual void Push(T &&) = 0; virtual void Push(const T &) = 0; virtual void Pop() = 0; virtual T &Front() = 0; virtual bool Empty() = 0; virtual bool Full() = 0; virtual size_t Size() = 0; virtual size_t Capacity() = 0; }; template <class T> class RingBufferImplThreadSafe : public AbstractRingBufferImpl<T> { private: boost::shared_mutex ring_buffer_mutex_; boost::circular_buffer<T> buffer_; public: RingBufferImplThreadSafe(size_t size) : buffer_(size) {} virtual ~RingBufferImplThreadSafe() = default; void Push(T &&t) { boost::unique_lock<boost::shared_mutex> lock(ring_buffer_mutex_); buffer_.push_back(t); } void Push(const T &t) { boost::unique_lock<boost::shared_mutex> lock(ring_buffer_mutex_); buffer_.push_back(t); } void Pop() { boost::unique_lock<boost::shared_mutex> lock(ring_buffer_mutex_); buffer_.pop_front(); } T &Front() { boost::shared_lock<boost::shared_mutex> lock(ring_buffer_mutex_); return buffer_.front(); } bool Empty() { boost::shared_lock<boost::shared_mutex> lock(ring_buffer_mutex_); return buffer_.empty(); } bool Full() { boost::shared_lock<boost::shared_mutex> lock(ring_buffer_mutex_); return buffer_.full(); } size_t Size() { boost::shared_lock<boost::shared_mutex> lock(ring_buffer_mutex_); return buffer_.size(); } size_t Capacity() { return buffer_.capacity(); } }; template <class T> class RingBufferImplLockFree : public AbstractRingBufferImpl<T> { private: std::vector<T> buffer_; std::atomic<size_t> capacity_; std::atomic<size_t> read_index_; std::atomic<size_t> write_index_; public: RingBufferImplLockFree(size_t size) : buffer_(size, nullptr), capacity_(size), read_index_(0), write_index_(0) {} virtual ~RingBufferImplLockFree() = default; void Push(T &&t) { STREAMING_CHECK(!Full()); buffer_[write_index_] = t; write_index_ = IncreaseIndex(write_index_); } void Push(const T &t) { STREAMING_CHECK(!Full()); buffer_[write_index_] = t; write_index_ = IncreaseIndex(write_index_); } void Pop() { STREAMING_CHECK(!Empty()); read_index_ = IncreaseIndex(read_index_); } T &Front() { STREAMING_CHECK(!Empty()); return buffer_[read_index_]; } bool Empty() { return write_index_ == read_index_; } bool Full() { return IncreaseIndex(write_index_) == read_index_; } size_t Size() { return (write_index_ + capacity_ - read_index_) % capacity_; } size_t Capacity() { return capacity_; } private: size_t IncreaseIndex(size_t index) const { return (index + 1) % capacity_; } }; enum class StreamingRingBufferType : uint8_t { SPSC_LOCK, SPSC }; /// StreamingRinggBuffer is factory to generate two different buffers. In data /// writer, we use lock-free single producer single consumer (SPSC) ring buffer /// to hold messages from user thread because SPSC has much better performance /// than lock style. Since the SPSC_LOCK is useful to our event-driver model( /// we will use that buffer to optimize our thread model in the future), so /// it cann't be removed currently. class StreamingRingBuffer { private: std::shared_ptr<AbstractRingBufferImpl<StreamingMessagePtr>> message_buffer_; StreamingTransientBuffer transient_buffer_; public: explicit StreamingRingBuffer(size_t buf_size, StreamingRingBufferType buffer_type = StreamingRingBufferType::SPSC_LOCK); bool Push(StreamingMessagePtr &&msg); bool Push(const StreamingMessagePtr &msg); StreamingMessagePtr &Front(); void Pop(); bool IsFull(); bool IsEmpty(); size_t Size(); size_t Capacity() const; size_t GetTransientBufferSize(); void SetTransientBufferSize(uint32_t new_transient_buffer_size); size_t GetMaxTransientBufferSize() const; const uint8_t *GetTransientBuffer() const; uint8_t *GetTransientBufferMutable() const; void ReallocTransientBuffer(uint32_t size); bool IsTransientAvaliable(); void FreeTransientBuffer(bool is_force = false); }; typedef std::shared_ptr<StreamingRingBuffer> StreamingRingBufferPtr; } // namespace streaming } // namespace ray #endif // RAY_RING_BUFFER_H
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta
streaming/src/runtime_context.cc
C++
#include "ray/common/id.h" #include "ray/protobuf/common.pb.h" #include "ray/util/util.h" #include "runtime_context.h" #include "util/streaming_logging.h" namespace ray { namespace streaming { void RuntimeContext::SetConfig(const StreamingConfig &streaming_config) { STREAMING_CHECK(runtime_status_ == RuntimeStatus::Init) << "set config must be at beginning"; config_ = streaming_config; } void RuntimeContext::SetConfig(const uint8_t *data, uint32_t size) { STREAMING_CHECK(runtime_status_ == RuntimeStatus::Init) << "set config must be at beginning"; if (!data) { STREAMING_LOG(WARNING) << "buffer pointer is null, but len is => " << size; return; } config_.FromProto(data, size); } RuntimeContext::~RuntimeContext() {} RuntimeContext::RuntimeContext() : runtime_status_(RuntimeStatus::Init) {} } // namespace streaming } // namespace ray
zhuohan123/hoplite-rllib
3
Python
zhuohan123
Zhuohan Li
vLLM / Meta