index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/src/langchain.ts | // These `@langchain/core` imports are intentionally not peer dependencies
// to avoid package manager issues around circular dependencies.
// eslint-disable-next-line import/no-extraneous-dependencies
import { CallbackManager } from "@langchain/core/callbacks/manager";
// eslint-disable-next-line import/no-extraneous-dependencies
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
// eslint-disable-next-line import/no-extraneous-dependencies
import {
Runnable,
RunnableConfig,
patchConfig,
getCallbackManagerForConfig,
} from "@langchain/core/runnables";
import { RunTree } from "./run_trees.js";
import { Run } from "./schemas.js";
import {
TraceableFunction,
getCurrentRunTree,
isTraceableFunction,
} from "./traceable.js";
import { isAsyncIterable, isIteratorLike } from "./utils/asserts.js";
/**
* Converts the current run tree active within a traceable-wrapped function
* into a LangChain compatible callback manager. This is useful to handoff tracing
* from LangSmith to LangChain Runnables and LLMs.
*
* @param {RunTree | undefined} currentRunTree Current RunTree from within a traceable-wrapped function. If not provided, the current run tree will be inferred from AsyncLocalStorage.
* @returns {CallbackManager | undefined} Callback manager used by LangChain Runnable objects.
*/
export async function getLangchainCallbacks(
currentRunTree?: RunTree | undefined
) {
const runTree: RunTree | undefined = currentRunTree ?? getCurrentRunTree();
if (!runTree) return undefined;
// TODO: CallbackManager.configure() is only async due to LangChainTracer
// factory being unnecessarily async.
let callbacks = await CallbackManager.configure();
if (!callbacks && runTree.tracingEnabled) {
callbacks = new CallbackManager();
}
let langChainTracer = callbacks?.handlers.find(
(handler): handler is LangChainTracer =>
handler?.name === "langchain_tracer"
);
if (!langChainTracer && runTree.tracingEnabled) {
langChainTracer = new LangChainTracer();
callbacks?.addHandler(langChainTracer);
}
const runMap = new Map<string, Run>();
// find upward root run
let rootRun = runTree;
const rootVisited = new Set<string>();
while (rootRun.parent_run) {
if (rootVisited.has(rootRun.id)) break;
rootVisited.add(rootRun.id);
rootRun = rootRun.parent_run;
}
const queue = [rootRun];
const visited = new Set<string>();
while (queue.length > 0) {
const current = queue.shift();
if (!current || visited.has(current.id)) continue;
visited.add(current.id);
runMap.set(current.id, current);
if (current.child_runs) {
queue.push(...current.child_runs);
}
}
if (callbacks != null) {
Object.assign(callbacks, { _parentRunId: runTree.id });
}
if (langChainTracer != null) {
if (
"updateFromRunTree" in langChainTracer &&
typeof langChainTracer === "function"
) {
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore @langchain/core can use a different version of LangSmith
langChainTracer.updateFromRunTree(runTree);
} else {
Object.assign(langChainTracer, {
runMap,
client: runTree.client,
projectName: runTree.project_name || langChainTracer.projectName,
exampleId: runTree.reference_example_id || langChainTracer.exampleId,
});
}
}
return callbacks;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type AnyTraceableFunction = TraceableFunction<(...any: any[]) => any>;
/**
* RunnableTraceable is a Runnable that wraps a traceable function.
* This allows adding Langsmith traced functions into LangChain sequences.
*/
export class RunnableTraceable<RunInput, RunOutput> extends Runnable<
RunInput,
RunOutput
> {
lc_serializable = false;
lc_namespace = ["langchain_core", "runnables"];
protected func: AnyTraceableFunction;
constructor(fields: { func: AnyTraceableFunction }) {
super(fields);
if (!isTraceableFunction(fields.func)) {
throw new Error(
"RunnableTraceable requires a function that is wrapped in traceable higher-order function"
);
}
this.func = fields.func;
}
async invoke(input: RunInput, options?: Partial<RunnableConfig>) {
const [config] = this._getOptionsList(options ?? {}, 1);
const callbacks = await getCallbackManagerForConfig(config);
return (await this.func(
patchConfig(config, { callbacks }),
input
)) as RunOutput;
}
async *_streamIterator(
input: RunInput,
options?: Partial<RunnableConfig>
): AsyncGenerator<RunOutput> {
const result = await this.invoke(input, options);
if (isAsyncIterable(result)) {
for await (const item of result) {
yield item as RunOutput;
}
return;
}
if (isIteratorLike(result)) {
while (true) {
const state: IteratorResult<unknown> = result.next();
if (state.done) break;
yield state.value as RunOutput;
}
return;
}
yield result;
}
static from(func: AnyTraceableFunction) {
return new RunnableTraceable({ func });
}
}
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/src/vercel.ts | import type {
CoreAssistantMessage,
CoreMessage,
ToolCallPart,
generateText,
} from "ai";
import type { AISDKSpan } from "./vercel.types.js";
import { Client, RunTree } from "./index.js";
import { KVMap, RunCreate } from "./schemas.js";
import { v5 as uuid5, v4 as uuid4 } from "uuid";
import { getCurrentRunTree } from "./singletons/traceable.js";
import {
getLangSmithEnvironmentVariable,
getEnvironmentVariable,
} from "./utils/env.js";
import { isTracingEnabled } from "./env.js";
// eslint-disable-next-line @typescript-eslint/ban-types
type AnyString = string & {};
export type AITelemetrySettings = Exclude<
Parameters<typeof generateText>[0]["experimental_telemetry"],
undefined
>;
export interface TelemetrySettings extends AITelemetrySettings {
/** ID of the run sent to LangSmith */
runId?: string;
/** Name of the run sent to LangSmith */
runName?: string;
}
type LangChainMessageFields = {
content:
| string
| Array<
// eslint-disable-next-line @typescript-eslint/no-explicit-any
Record<string, any> & { type?: "text" | "image_url" | AnyString }
>;
name?: string;
id?: string;
additional_kwargs?: {
tool_calls?: {
id: string;
function: { arguments: string; name: string };
type: "function";
index?: number;
}[];
[key: string]: unknown;
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
response_metadata?: Record<string, any>;
};
type LangChainLikeMessage = { type: string; data: LangChainMessageFields };
// Attempt to convert CoreMessage to a LangChain-compatible format
// which allows us to render messages more nicely in LangSmith
function convertCoreToSmith(
message: CoreMessage
):
| LangChainLikeMessage
| CoreMessage
| Array<LangChainLikeMessage | CoreMessage> {
if (message.role === "assistant") {
const data: LangChainMessageFields = { content: message.content };
if (Array.isArray(message.content)) {
data.content = message.content.map((part) => {
if (part.type === "text") {
return {
type: "text",
text: part.text,
...part.experimental_providerMetadata,
};
}
if (part.type === "tool-call") {
return {
type: "tool_use",
name: part.toolName,
id: part.toolCallId,
input: part.args,
...part.experimental_providerMetadata,
};
}
return part;
});
const toolCalls = message.content.filter(
(part): part is ToolCallPart => part.type === "tool-call"
);
if (toolCalls.length > 0) {
data.additional_kwargs ??= {};
data.additional_kwargs.tool_calls = toolCalls.map((part) => {
return {
id: part.toolCallId,
type: "function",
function: {
name: part.toolName,
id: part.toolCallId,
arguments: JSON.stringify(part.args),
},
};
});
}
}
return { type: "ai", data };
}
if (message.role === "user") {
const data: LangChainMessageFields = { content: message.content };
if (Array.isArray(message.content)) {
data.content = message.content.map((part) => {
if (part.type === "text") {
return {
type: "text",
text: part.text,
...part.experimental_providerMetadata,
};
}
if (part.type === "image") {
return {
type: "image_url",
image_url: part.image,
...part.experimental_providerMetadata,
};
}
return part;
});
}
return { type: "human", data };
}
if (message.role === "system") {
return { type: "system", data: { content: message.content } };
}
if (message.role === "tool") {
const res = message.content.map((toolCall) => {
return {
type: "tool",
data: {
content: JSON.stringify(toolCall.result),
name: toolCall.toolName,
tool_call_id: toolCall.toolCallId,
},
};
});
if (res.length === 1) return res[0];
return res;
}
return message;
}
const tryJson = (
str:
| string
| number
| boolean
| Array<null | undefined | string>
| Array<null | undefined | number>
| Array<null | undefined | boolean>
| undefined
) => {
try {
if (!str) return str;
if (typeof str !== "string") return str;
return JSON.parse(str);
} catch {
return str;
}
};
function stripNonAlphanumeric(input: string) {
return input.replace(/[-:.]/g, "");
}
function convertToDottedOrderFormat(
[seconds, nanoseconds]: [seconds: number, nanoseconds: number],
runId: string,
executionOrder: number
) {
// Date only has millisecond precision, so we use the microseconds to break
// possible ties, avoiding incorrect run order
const ms = Number(String(nanoseconds).slice(0, 3));
const ns = String(Number(String(nanoseconds).slice(3, 6)) + executionOrder)
.padStart(3, "0")
.slice(0, 3);
return (
stripNonAlphanumeric(
`${new Date(seconds * 1000 + ms).toISOString().slice(0, -1)}${ns}Z`
) + runId
);
}
function convertToTimestamp([seconds, nanoseconds]: [
seconds: number,
nanoseconds: number
]) {
const ms = String(nanoseconds).slice(0, 3);
return Number(String(seconds) + ms);
}
function sortByHr(
a: [seconds: number, nanoseconds: number],
b: [seconds: number, nanoseconds: number]
): number {
if (a[0] !== b[0]) return Math.sign(a[0] - b[0]);
return Math.sign(a[1] - b[1]);
}
const ROOT = "$";
const RUN_ID_NAMESPACE = "5c718b20-9078-11ef-9a3d-325096b39f47";
const RUN_ID_METADATA_KEY = {
input: "langsmith:runId",
output: "ai.telemetry.metadata.langsmith:runId",
};
const RUN_NAME_METADATA_KEY = {
input: "langsmith:runName",
output: "ai.telemetry.metadata.langsmith:runName",
};
const TRACE_METADATA_KEY = {
input: "langsmith:trace",
output: "ai.telemetry.metadata.langsmith:trace",
};
const BAGGAGE_METADATA_KEY = {
input: "langsmith:baggage",
output: "ai.telemetry.metadata.langsmith:baggage",
};
const RESERVED_METADATA_KEYS = [
RUN_ID_METADATA_KEY.output,
RUN_NAME_METADATA_KEY.output,
TRACE_METADATA_KEY.output,
BAGGAGE_METADATA_KEY.output,
];
interface RunTask {
id: string;
parentId: string | undefined;
startTime: [seconds: number, nanoseconds: number];
run: RunCreate;
sent: boolean;
executionOrder: number;
}
type InteropType =
| { type: "traceable"; parentRunTree: RunTree }
| { type: "user"; userTraceId?: string }
| undefined;
/**
* OpenTelemetry trace exporter for Vercel AI SDK.
*
* @example
* ```ts
* import { AISDKExporter } from "langsmith/vercel";
* import { Client } from "langsmith";
*
* import { generateText } from "ai";
* import { openai } from "@ai-sdk/openai";
*
* import { NodeSDK } from "@opentelemetry/sdk-node";
* import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
*
* const client = new Client();
*
* const sdk = new NodeSDK({
* traceExporter: new AISDKExporter({ client }),
* instrumentations: [getNodeAutoInstrumentations()],
* });
*
* sdk.start();
*
* const res = await generateText({
* model: openai("gpt-4o-mini"),
* messages: [
* {
* role: "user",
* content: "What color is the sky?",
* },
* ],
* experimental_telemetry: AISDKExporter.getSettings({
* runName: "langsmith_traced_call",
* metadata: { userId: "123", language: "english" },
* }),
* });
*
* await sdk.shutdown();
* ```
*/
export class AISDKExporter {
private client: Client;
private traceByMap: Record<
string,
{
childMap: Record<string, RunTask[]>;
nodeMap: Record<string, RunTask>;
relativeExecutionOrder: Record<string, number>;
interop?: InteropType;
}
> = {};
private debug: boolean;
constructor(args?: { client?: Client; debug?: boolean }) {
this.client = args?.client ?? new Client();
this.debug =
args?.debug ?? getEnvironmentVariable("OTEL_LOG_LEVEL") === "DEBUG";
this.logDebug("creating exporter", { tracingEnabled: isTracingEnabled() });
}
static getSettings(settings?: TelemetrySettings) {
const { runId, runName, ...rest } = settings ?? {};
const metadata = { ...rest?.metadata };
if (runId != null) metadata[RUN_ID_METADATA_KEY.input] = runId;
if (runName != null) metadata[RUN_NAME_METADATA_KEY.input] = runName;
// attempt to obtain the run tree if used within a traceable function
let defaultEnabled = settings?.isEnabled ?? isTracingEnabled();
try {
const runTree = getCurrentRunTree();
const headers = runTree.toHeaders();
metadata[TRACE_METADATA_KEY.input] = headers["langsmith-trace"];
metadata[BAGGAGE_METADATA_KEY.input] = headers["baggage"];
// honor the tracingEnabled flag if coming from traceable
if (runTree.tracingEnabled != null) {
defaultEnabled = runTree.tracingEnabled;
}
} catch {
// pass
}
if (
metadata[RUN_ID_METADATA_KEY.input] &&
metadata[TRACE_METADATA_KEY.input]
) {
throw new Error(
"Cannot provide `runId` when used within traceable function."
);
}
return { ...rest, isEnabled: rest.isEnabled ?? defaultEnabled, metadata };
}
/** @internal */
protected getSpanAttributeKey = (
span: AISDKSpan,
key: string
): string | undefined => {
const attributes = span.attributes as Record<string, unknown>;
return key in attributes && typeof attributes[key] === "string"
? (attributes[key] as string)
: undefined;
};
/** @internal */
protected parseInteropFromMetadata(span: AISDKSpan): InteropType {
const userTraceId = this.getSpanAttributeKey(
span,
RUN_ID_METADATA_KEY.output
);
const parentTrace = this.getSpanAttributeKey(
span,
TRACE_METADATA_KEY.output
);
if (parentTrace && userTraceId) {
throw new Error(
`Cannot provide both "${RUN_ID_METADATA_KEY.input}" and "${TRACE_METADATA_KEY.input}" metadata keys.`
);
}
if (parentTrace) {
const parentRunTree = RunTree.fromHeaders({
"langsmith-trace": parentTrace,
baggage:
this.getSpanAttributeKey(span, BAGGAGE_METADATA_KEY.output) || "",
});
if (!parentRunTree)
throw new Error("Unreachable code: empty parent run tree");
return { type: "traceable", parentRunTree };
}
if (userTraceId) return { type: "user", userTraceId };
return undefined;
}
/** @internal */
protected getRunCreate(span: AISDKSpan): RunCreate | undefined {
const runId = uuid5(span.spanContext().spanId, RUN_ID_NAMESPACE);
const parentRunId = span.parentSpanId
? uuid5(span.parentSpanId, RUN_ID_NAMESPACE)
: undefined;
const asRunCreate = (rawConfig: RunCreate) => {
const aiMetadata = Object.keys(span.attributes)
.filter(
(key) =>
key.startsWith("ai.telemetry.metadata.") &&
!RESERVED_METADATA_KEYS.includes(key)
)
.reduce((acc, key) => {
acc[key.slice("ai.telemetry.metadata.".length)] =
span.attributes[key as keyof typeof span.attributes];
return acc;
}, {} as Record<string, unknown>);
if (
("ai.telemetry.functionId" in span.attributes &&
span.attributes["ai.telemetry.functionId"]) ||
("resource.name" in span.attributes && span.attributes["resource.name"])
) {
aiMetadata["functionId"] =
span.attributes["ai.telemetry.functionId"] ||
span.attributes["resource.name"];
}
const parsedStart = convertToTimestamp(span.startTime);
const parsedEnd = convertToTimestamp(span.endTime);
let name = rawConfig.name;
// if user provided a custom name, only use it if it's the root
if (this.isRootRun(span)) {
name =
this.getSpanAttributeKey(span, RUN_NAME_METADATA_KEY.output) || name;
}
const config: RunCreate = {
...rawConfig,
name,
id: runId,
parent_run_id: parentRunId,
extra: {
...rawConfig.extra,
metadata: {
...rawConfig.extra?.metadata,
...aiMetadata,
"ai.operationId": span.attributes["ai.operationId"],
},
},
session_name:
getLangSmithEnvironmentVariable("PROJECT") ??
getLangSmithEnvironmentVariable("SESSION"),
start_time: Math.min(parsedStart, parsedEnd),
end_time: Math.max(parsedStart, parsedEnd),
};
return config;
};
switch (span.name) {
case "ai.generateText.doGenerate":
case "ai.generateText":
case "ai.streamText.doStream":
case "ai.streamText": {
const inputs = ((): KVMap => {
if ("ai.prompt.messages" in span.attributes) {
return {
messages: tryJson(span.attributes["ai.prompt.messages"]).flatMap(
(i: CoreMessage) => convertCoreToSmith(i)
),
};
}
if ("ai.prompt" in span.attributes) {
const input = tryJson(span.attributes["ai.prompt"]);
if (
typeof input === "object" &&
input != null &&
"messages" in input &&
Array.isArray(input.messages)
) {
return {
messages: input.messages.flatMap((i: CoreMessage) =>
convertCoreToSmith(i)
),
};
}
return { input };
}
return {};
})();
const outputs = ((): KVMap | undefined => {
let result: KVMap | undefined = undefined;
if (span.attributes["ai.response.toolCalls"]) {
let content = tryJson(span.attributes["ai.response.toolCalls"]);
if (Array.isArray(content)) {
content = content.map((i) => ({
type: "tool-call",
...i,
args: tryJson(i.args),
}));
}
result = {
llm_output: convertCoreToSmith({
role: "assistant",
content,
} satisfies CoreAssistantMessage),
};
} else if (span.attributes["ai.response.text"]) {
result = {
llm_output: convertCoreToSmith({
role: "assistant",
content: span.attributes["ai.response.text"],
}),
};
}
if (span.attributes["ai.usage.completionTokens"]) {
result ??= {};
result.llm_output ??= {};
result.llm_output.token_usage ??= {};
result.llm_output.token_usage["completion_tokens"] =
span.attributes["ai.usage.completionTokens"];
}
if (span.attributes["ai.usage.promptTokens"]) {
result ??= {};
result.llm_output ??= {};
result.llm_output.token_usage ??= {};
result.llm_output.token_usage["prompt_tokens"] =
span.attributes["ai.usage.promptTokens"];
}
return result;
})();
const events: KVMap[] = [];
const firstChunkEvent = span.events.find(
(i) => i.name === "ai.stream.firstChunk"
);
if (firstChunkEvent) {
events.push({
name: "new_token",
time: convertToTimestamp(firstChunkEvent.time),
});
}
// TODO: add first_token_time
return asRunCreate({
run_type: "llm",
name: span.attributes["ai.model.provider"],
inputs,
outputs,
events,
extra: {
batch_size: 1,
metadata: {
ls_provider: span.attributes["ai.model.provider"]
.split(".")
.at(0),
ls_model_type: span.attributes["ai.model.provider"]
.split(".")
.at(1),
ls_model_name: span.attributes["ai.model.id"],
},
},
});
}
case "ai.toolCall": {
const args = tryJson(span.attributes["ai.toolCall.args"]);
let inputs: KVMap = { args };
if (typeof args === "object" && args != null) {
inputs = args;
}
const output = tryJson(span.attributes["ai.toolCall.result"]);
let outputs: KVMap = { output };
if (typeof output === "object" && output != null) {
outputs = output;
}
return asRunCreate({
run_type: "tool",
name: span.attributes["ai.toolCall.name"],
inputs,
outputs,
});
}
case "ai.streamObject":
case "ai.streamObject.doStream":
case "ai.generateObject":
case "ai.generateObject.doGenerate": {
const inputs = ((): KVMap => {
if ("ai.prompt.messages" in span.attributes) {
return {
messages: tryJson(span.attributes["ai.prompt.messages"]).flatMap(
(i: CoreMessage) => convertCoreToSmith(i)
),
};
}
if ("ai.prompt" in span.attributes) {
return { input: tryJson(span.attributes["ai.prompt"]) };
}
return {};
})();
const outputs = ((): KVMap | undefined => {
let result: KVMap | undefined = undefined;
if (span.attributes["ai.response.object"]) {
result = {
output: tryJson(span.attributes["ai.response.object"]),
};
}
if (span.attributes["ai.usage.completionTokens"]) {
result ??= {};
result.llm_output ??= {};
result.llm_output.token_usage ??= {};
result.llm_output.token_usage["completion_tokens"] =
span.attributes["ai.usage.completionTokens"];
}
if (span.attributes["ai.usage.promptTokens"]) {
result ??= {};
result.llm_output ??= {};
result.llm_output.token_usage ??= {};
result.llm_output.token_usage["prompt_tokens"] =
+span.attributes["ai.usage.promptTokens"];
}
return result;
})();
const events: KVMap[] = [];
const firstChunkEvent = span.events.find(
(i) => i.name === "ai.stream.firstChunk"
);
if (firstChunkEvent) {
events.push({
name: "new_token",
time: convertToTimestamp(firstChunkEvent.time),
});
}
return asRunCreate({
run_type: "llm",
name: span.attributes["ai.model.provider"],
inputs,
outputs,
events,
extra: {
batch_size: 1,
metadata: {
ls_provider: span.attributes["ai.model.provider"]
.split(".")
.at(0),
ls_model_type: span.attributes["ai.model.provider"]
.split(".")
.at(1),
ls_model_name: span.attributes["ai.model.id"],
},
},
});
}
case "ai.embed":
case "ai.embed.doEmbed":
case "ai.embedMany":
case "ai.embedMany.doEmbed":
default:
return undefined;
}
}
/** @internal */
protected isRootRun(span: AISDKSpan): boolean {
switch (span.name) {
case "ai.generateText":
case "ai.streamText":
case "ai.generateObject":
case "ai.streamObject":
case "ai.embed":
case "ai.embedMany":
return true;
default:
return false;
}
}
export(
spans: unknown[],
resultCallback: (result: { code: 0 | 1; error?: Error }) => void
): void {
this.logDebug("exporting spans", spans);
const typedSpans = (spans as AISDKSpan[])
.slice()
.sort((a, b) => sortByHr(a.startTime, b.startTime));
for (const span of typedSpans) {
const { traceId, spanId } = span.spanContext();
const parentId = span.parentSpanId ?? undefined;
this.traceByMap[traceId] ??= {
childMap: {},
nodeMap: {},
relativeExecutionOrder: {},
};
const runId = uuid5(spanId, RUN_ID_NAMESPACE);
let parentRunId = parentId
? uuid5(parentId, RUN_ID_NAMESPACE)
: undefined;
// in LangSmith we currently only support certain spans
// which may be deeply nested within other traces
if (this.isRootRun(span)) parentRunId = undefined;
const traceMap = this.traceByMap[traceId];
const run = this.getRunCreate(span);
if (!run) {
this.logDebug("skipping span", span);
continue;
}
traceMap.relativeExecutionOrder[parentRunId ?? ROOT] ??= -1;
traceMap.relativeExecutionOrder[parentRunId ?? ROOT] += 1;
traceMap.nodeMap[runId] ??= {
id: runId,
parentId: parentRunId,
startTime: span.startTime,
run,
sent: false,
executionOrder: traceMap.relativeExecutionOrder[parentRunId ?? ROOT],
};
if (this.debug) console.log(`[${span.name}] ${runId}`, run);
traceMap.childMap[parentRunId ?? ROOT] ??= [];
traceMap.childMap[parentRunId ?? ROOT].push(traceMap.nodeMap[runId]);
traceMap.interop = this.parseInteropFromMetadata(span);
}
type OverrideRunCreate = {
id: string;
trace_id: string;
dotted_order: string;
parent_run_id: string | undefined;
};
// We separate `id`,
const sampled: [OverrideRunCreate, RunCreate][] = [];
for (const traceId of Object.keys(this.traceByMap)) {
type QueueItem = { item: RunTask; dottedOrder: string; traceId: string };
const traceMap = this.traceByMap[traceId];
const queue: QueueItem[] =
traceMap.childMap[ROOT]?.map((item) => ({
item,
dottedOrder: convertToDottedOrderFormat(
item.startTime,
item.id,
item.executionOrder
),
traceId: item.id,
})) ?? [];
const seen = new Set<string>();
while (queue.length) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const task = queue.shift()!;
if (seen.has(task.item.id)) continue;
if (!task.item.sent) {
let override: OverrideRunCreate = {
id: task.item.id,
parent_run_id: task.item.parentId,
dotted_order: task.dottedOrder,
trace_id: task.traceId,
};
if (traceMap.interop) {
// attach the run to a parent run tree
// - id: preserve
// - parent_run_id: use existing parent run id or hook to the provided run tree
// - dotted_order: append to the dotted_order of the parent run tree
// - trace_id: use from the existing run tree
if (traceMap.interop.type === "traceable") {
override = {
id: override.id,
parent_run_id:
override.parent_run_id ?? traceMap.interop.parentRunTree.id,
dotted_order: [
traceMap.interop.parentRunTree.dotted_order,
override.dotted_order,
]
.filter(Boolean)
.join("."),
trace_id: traceMap.interop.parentRunTree.trace_id,
};
} else if (traceMap.interop.type === "user") {
// Allow user to specify custom trace ID = run ID of the root run
// - id: use user provided run ID if root run, otherwise preserve
// - parent_run_id: use user provided run ID if root run, otherwise preserve
// - dotted_order: replace the trace_id with the user provided run ID
// - trace_id: use user provided run ID
const userTraceId = traceMap.interop.userTraceId ?? uuid4();
override = {
id:
override.id === override.trace_id ? userTraceId : override.id,
parent_run_id:
override.parent_run_id === override.trace_id
? userTraceId
: override.parent_run_id,
dotted_order: override.dotted_order.replace(
override.trace_id,
userTraceId
),
trace_id: userTraceId,
};
}
}
sampled.push([override, task.item.run]);
task.item.sent = true;
}
const children = traceMap.childMap[task.item.id] ?? [];
queue.push(
...children.map((child) => {
return {
item: child,
dottedOrder: [
task.dottedOrder,
convertToDottedOrderFormat(
child.startTime,
child.id,
child.executionOrder
),
].join("."),
traceId: task.traceId,
};
})
);
}
}
this.logDebug(`sampled runs to be sent to LangSmith`, sampled);
Promise.all(
sampled.map(([override, value]) =>
this.client.createRun({ ...value, ...override })
)
).then(
() => resultCallback({ code: 0 }),
(error) => resultCallback({ code: 1, error })
);
}
async shutdown(): Promise<void> {
// find nodes which are incomplete
const incompleteNodes = Object.values(this.traceByMap).flatMap((trace) =>
Object.values(trace.nodeMap).filter((i) => !i.sent)
);
this.logDebug("shutting down", { incompleteNodes: incompleteNodes.length });
if (incompleteNodes.length > 0) {
console.warn(
"Some incomplete nodes were found before shutdown and not sent to LangSmith."
);
}
await this.client?.awaitPendingTraceBatches();
}
async forceFlush?(): Promise<void> {
await this.client?.awaitPendingTraceBatches();
}
protected logDebug(...args: Parameters<typeof console.debug>): void {
if (!this.debug) return;
console.debug(`[${new Date().toISOString()}] [LangSmith]`, ...args);
}
}
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/src/index.ts | export {
Client,
type ClientConfig,
type LangSmithTracingClientInterface,
} from "./client.js";
export type {
Dataset,
Example,
TracerSession,
Run,
Feedback,
RetrieverOutput,
} from "./schemas.js";
export { RunTree, type RunTreeConfig } from "./run_trees.js";
export { overrideFetchImplementation } from "./singletons/fetch.js";
// Update using yarn bump-version
export const __version__ = "0.2.10";
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/src/traceable.ts | import { AsyncLocalStorage } from "node:async_hooks";
import {
RunTree,
RunTreeConfig,
RunnableConfigLike,
isRunTree,
isRunnableConfigLike,
} from "./run_trees.js";
import { Attachments, InvocationParamsSchema, KVMap } from "./schemas.js";
import { isTracingEnabled } from "./env.js";
import {
ROOT,
AsyncLocalStorageProviderSingleton,
} from "./singletons/traceable.js";
import { _LC_CONTEXT_VARIABLES_KEY } from "./singletons/constants.js";
import { TraceableFunction } from "./singletons/types.js";
import {
isKVMap,
isReadableStream,
isAsyncIterable,
isIteratorLike,
isThenable,
isGenerator,
isPromiseMethod,
} from "./utils/asserts.js";
AsyncLocalStorageProviderSingleton.initializeGlobalInstance(
new AsyncLocalStorage<RunTree | undefined>()
);
const runInputsToMap = (rawInputs: unknown[]) => {
const firstInput = rawInputs[0];
let inputs: KVMap;
if (firstInput == null) {
inputs = {};
} else if (rawInputs.length > 1) {
inputs = { args: rawInputs };
} else if (isKVMap(firstInput)) {
inputs = firstInput;
} else {
inputs = { input: firstInput };
}
return inputs;
};
const handleRunInputs = (
inputs: KVMap,
processInputs: (inputs: Readonly<KVMap>) => KVMap
): KVMap => {
try {
return processInputs(inputs);
} catch (e) {
console.error(
"Error occurred during processInputs. Sending raw inputs:",
e
);
return inputs;
}
};
const handleRunOutputs = (
rawOutputs: unknown,
processOutputs: (outputs: Readonly<KVMap>) => KVMap
): KVMap => {
let outputs: KVMap;
if (isKVMap(rawOutputs)) {
outputs = rawOutputs;
} else {
outputs = { outputs: rawOutputs };
}
try {
return processOutputs(outputs);
} catch (e) {
console.error(
"Error occurred during processOutputs. Sending raw outputs:",
e
);
return outputs;
}
};
const handleRunAttachments = (
rawInputs: unknown[],
extractAttachments?: (
...args: unknown[]
) => [Attachments | undefined, unknown[]]
): [Attachments | undefined, unknown[]] => {
if (!extractAttachments) {
return [undefined, rawInputs];
}
try {
const [attachments, remainingArgs] = extractAttachments(...rawInputs);
return [attachments, remainingArgs];
} catch (e) {
console.error("Error occurred during extractAttachments:", e);
return [undefined, rawInputs];
}
};
const getTracingRunTree = <Args extends unknown[]>(
runTree: RunTree,
inputs: Args,
getInvocationParams:
| ((...args: Args) => InvocationParamsSchema | undefined)
| undefined,
processInputs: (inputs: Readonly<KVMap>) => KVMap,
extractAttachments:
| ((...args: Args) => [Attachments | undefined, KVMap])
| undefined
): RunTree | undefined => {
if (!isTracingEnabled(runTree.tracingEnabled)) {
return undefined;
}
const [attached, args] = handleRunAttachments(
inputs,
extractAttachments as
| ((...args: unknown[]) => [Attachments | undefined, unknown[]])
| undefined
);
runTree.attachments = attached;
runTree.inputs = handleRunInputs(args, processInputs);
const invocationParams = getInvocationParams?.(...inputs);
if (invocationParams != null) {
runTree.extra ??= {};
runTree.extra.metadata = {
...invocationParams,
...runTree.extra.metadata,
};
}
return runTree;
};
// idea: store the state of the promise outside
// but only when the promise is "consumed"
const getSerializablePromise = <T = unknown>(arg: Promise<T>) => {
const proxyState: {
current: ["resolve", unknown] | ["reject", unknown] | undefined;
} = { current: undefined };
const promiseProxy = new Proxy(arg, {
get(target, prop, receiver) {
if (prop === "then") {
const boundThen = arg[prop].bind(arg);
return (
resolve: (value: unknown) => unknown,
reject: (error: unknown) => unknown = (x) => {
throw x;
}
) => {
return boundThen(
(value) => {
proxyState.current = ["resolve", value];
return resolve(value);
},
(error) => {
proxyState.current = ["reject", error];
return reject(error);
}
);
};
}
if (prop === "catch") {
const boundCatch = arg[prop].bind(arg);
return (reject: (error: unknown) => unknown) => {
return boundCatch((error) => {
proxyState.current = ["reject", error];
return reject(error);
});
};
}
if (prop === "toJSON") {
return () => {
if (!proxyState.current) return undefined;
const [type, value] = proxyState.current ?? [];
if (type === "resolve") return value;
return { error: value };
};
}
return Reflect.get(target, prop, receiver);
},
});
return promiseProxy as Promise<T> & { toJSON: () => unknown };
};
const convertSerializableArg = (arg: unknown): unknown => {
if (isReadableStream(arg)) {
const proxyState: unknown[] = [];
const transform = new TransformStream({
start: () => void 0,
transform: (chunk, controller) => {
proxyState.push(chunk);
controller.enqueue(chunk);
},
flush: () => void 0,
});
const pipeThrough = arg.pipeThrough(transform);
Object.assign(pipeThrough, { toJSON: () => proxyState });
return pipeThrough;
}
if (isAsyncIterable(arg)) {
const proxyState: {
current: (Promise<IteratorResult<unknown>> & {
toJSON: () => unknown;
})[];
} = { current: [] };
return new Proxy(arg, {
get(target, prop, receiver) {
if (prop === Symbol.asyncIterator) {
return () => {
const boundIterator = arg[Symbol.asyncIterator].bind(arg);
const iterator = boundIterator();
return new Proxy(iterator, {
get(target, prop, receiver) {
if (prop === "next" || prop === "return" || prop === "throw") {
const bound = iterator.next.bind(iterator);
return (
...args: Parameters<
Exclude<
AsyncIterator<unknown>["next" | "return" | "throw"],
undefined
>
>
) => {
// @ts-expect-error TS cannot infer the argument types for the bound function
const wrapped = getSerializablePromise(bound(...args));
proxyState.current.push(wrapped);
return wrapped;
};
}
if (prop === "return" || prop === "throw") {
return iterator.next.bind(iterator);
}
return Reflect.get(target, prop, receiver);
},
});
};
}
if (prop === "toJSON") {
return () => {
const onlyNexts = proxyState.current;
const serialized = onlyNexts.map(
(next) => next.toJSON() as IteratorResult<unknown> | undefined
);
const chunks = serialized.reduce<unknown[]>((memo, next) => {
if (next?.value) memo.push(next.value);
return memo;
}, []);
return chunks;
};
}
return Reflect.get(target, prop, receiver);
},
});
}
if (!Array.isArray(arg) && isIteratorLike(arg)) {
const proxyState: Array<IteratorResult<unknown>> = [];
return new Proxy(arg, {
get(target, prop, receiver) {
if (prop === "next" || prop === "return" || prop === "throw") {
const bound = arg[prop]?.bind(arg);
return (
...args: Parameters<
Exclude<Iterator<unknown>["next" | "return" | "throw"], undefined>
>
) => {
// @ts-expect-error TS cannot infer the argument types for the bound function
const next = bound?.(...args);
if (next != null) proxyState.push(next);
return next;
};
}
if (prop === "toJSON") {
return () => {
const chunks = proxyState.reduce<unknown[]>((memo, next) => {
if (next.value) memo.push(next.value);
return memo;
}, []);
return chunks;
};
}
return Reflect.get(target, prop, receiver);
},
});
}
if (isThenable(arg)) {
return getSerializablePromise(arg);
}
return arg;
};
/**
* Higher-order function that takes function as input and returns a
* "TraceableFunction" - a wrapped version of the input that
* automatically handles tracing. If the returned traceable function calls any
* traceable functions, those are automatically traced as well.
*
* The returned TraceableFunction can accept a run tree or run tree config as
* its first argument. If omitted, it will default to the caller's run tree,
* or will be treated as a root run.
*
* @param wrappedFunc Targeted function to be traced
* @param config Additional metadata such as name, tags or providing
* a custom LangSmith client instance
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function traceable<Func extends (...args: any[]) => any>(
wrappedFunc: Func,
config?: Partial<RunTreeConfig> & {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
aggregator?: (args: any[]) => any;
argsConfigPath?: [number] | [number, string];
__finalTracedIteratorKey?: string;
/**
* Extract attachments from args and return remaining args.
* @param args Arguments of the traced function
* @returns Tuple of [Attachments, remaining args]
*/
extractAttachments?: (
...args: Parameters<Func>
) => [Attachments | undefined, KVMap];
/**
* Extract invocation parameters from the arguments of the traced function.
* This is useful for LangSmith to properly track common metadata like
* provider, model name and temperature.
*
* @param args Arguments of the traced function
* @returns Key-value map of the invocation parameters, which will be merged with the existing metadata
*/
getInvocationParams?: (
...args: Parameters<Func>
) => InvocationParamsSchema | undefined;
/**
* Apply transformations to the inputs before logging.
* This function should NOT mutate the inputs.
* `processInputs` is not inherited by nested traceable functions.
*
* @param inputs Key-value map of the function inputs.
* @returns Transformed key-value map
*/
processInputs?: (inputs: Readonly<KVMap>) => KVMap;
/**
* Apply transformations to the outputs before logging.
* This function should NOT mutate the outputs.
* `processOutputs` is not inherited by nested traceable functions.
*
* @param outputs Key-value map of the function outputs
* @returns Transformed key-value map
*/
processOutputs?: (outputs: Readonly<KVMap>) => KVMap;
}
) {
type Inputs = Parameters<Func>;
const {
aggregator,
argsConfigPath,
__finalTracedIteratorKey,
processInputs,
processOutputs,
extractAttachments,
...runTreeConfig
} = config ?? {};
const processInputsFn = processInputs ?? ((x) => x);
const processOutputsFn = processOutputs ?? ((x) => x);
const extractAttachmentsFn =
extractAttachments ?? ((...x) => [undefined, runInputsToMap(x)]);
const traceableFunc = (
...args: Inputs | [RunTree, ...Inputs] | [RunnableConfigLike, ...Inputs]
) => {
let ensuredConfig: RunTreeConfig;
try {
let runtimeConfig: Partial<RunTreeConfig> | undefined;
if (argsConfigPath) {
const [index, path] = argsConfigPath;
if (index === args.length - 1 && !path) {
runtimeConfig = args.pop() as Partial<RunTreeConfig>;
} else if (
index <= args.length &&
typeof args[index] === "object" &&
args[index] !== null
) {
if (path) {
const { [path]: extracted, ...rest } = args[index];
runtimeConfig = extracted as Partial<RunTreeConfig>;
args[index] = rest;
} else {
runtimeConfig = args[index] as Partial<RunTreeConfig>;
args.splice(index, 1);
}
}
}
ensuredConfig = {
name: wrappedFunc.name || "<lambda>",
...runTreeConfig,
...runtimeConfig,
tags: [
...new Set([
...(runTreeConfig?.tags ?? []),
...(runtimeConfig?.tags ?? []),
]),
],
metadata: {
...runTreeConfig?.metadata,
...runtimeConfig?.metadata,
},
};
} catch (err) {
console.warn(
`Failed to extract runtime config from args for ${
runTreeConfig?.name ?? wrappedFunc.name
}`,
err
);
ensuredConfig = {
name: wrappedFunc.name || "<lambda>",
...runTreeConfig,
};
}
const asyncLocalStorage = AsyncLocalStorageProviderSingleton.getInstance();
// TODO: deal with possible nested promises and async iterables
const processedArgs = args as unknown as Inputs;
for (let i = 0; i < processedArgs.length; i++) {
processedArgs[i] = convertSerializableArg(processedArgs[i]);
}
const [currentRunTree, rawInputs] = ((): [RunTree | undefined, Inputs] => {
const [firstArg, ...restArgs] = processedArgs;
// used for handoff between LangChain.JS and traceable functions
if (isRunnableConfigLike(firstArg)) {
return [
getTracingRunTree(
RunTree.fromRunnableConfig(firstArg, ensuredConfig),
restArgs as Inputs,
config?.getInvocationParams,
processInputsFn,
extractAttachmentsFn
),
restArgs as Inputs,
];
}
// deprecated: legacy CallbackManagerRunTree used in runOnDataset
// override ALS and do not pass-through the run tree
if (
isRunTree(firstArg) &&
"callbackManager" in firstArg &&
firstArg.callbackManager != null
) {
return [firstArg, restArgs as Inputs];
}
// when ALS is unreliable, users can manually
// pass in the run tree
if (firstArg === ROOT || isRunTree(firstArg)) {
const currentRunTree = getTracingRunTree(
firstArg === ROOT
? new RunTree(ensuredConfig)
: firstArg.createChild(ensuredConfig),
restArgs as Inputs,
config?.getInvocationParams,
processInputsFn,
extractAttachmentsFn
);
return [currentRunTree, [currentRunTree, ...restArgs] as Inputs];
}
// Node.JS uses AsyncLocalStorage (ALS) and AsyncResource
// to allow storing context
const prevRunFromStore = asyncLocalStorage.getStore();
if (isRunTree(prevRunFromStore)) {
return [
getTracingRunTree(
prevRunFromStore.createChild(ensuredConfig),
processedArgs,
config?.getInvocationParams,
processInputsFn,
extractAttachmentsFn
),
processedArgs as Inputs,
];
}
const currentRunTree = getTracingRunTree(
new RunTree(ensuredConfig),
processedArgs,
config?.getInvocationParams,
processInputsFn,
extractAttachmentsFn
);
// If a context var is set by LangChain outside of a traceable,
// it will be an object with a single property and we should copy
// context vars over into the new run tree.
if (
prevRunFromStore !== undefined &&
_LC_CONTEXT_VARIABLES_KEY in prevRunFromStore
) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(currentRunTree as any)[_LC_CONTEXT_VARIABLES_KEY] =
prevRunFromStore[_LC_CONTEXT_VARIABLES_KEY];
}
return [currentRunTree, processedArgs as Inputs];
})();
return asyncLocalStorage.run(currentRunTree, () => {
const postRunPromise = currentRunTree?.postRun();
async function handleChunks(chunks: unknown[]) {
if (aggregator !== undefined) {
try {
return await aggregator(chunks);
} catch (e) {
console.error(`[ERROR]: LangSmith aggregation failed: `, e);
}
}
return chunks;
}
function tapReadableStreamForTracing(
stream: ReadableStream<unknown>,
snapshot: ReturnType<typeof AsyncLocalStorage.snapshot> | undefined
) {
const reader = stream.getReader();
let finished = false;
const chunks: unknown[] = [];
const tappedStream = new ReadableStream({
async start(controller) {
// eslint-disable-next-line no-constant-condition
while (true) {
const result = await (snapshot
? snapshot(() => reader.read())
: reader.read());
if (result.done) {
finished = true;
await currentRunTree?.end(
handleRunOutputs(await handleChunks(chunks), processOutputsFn)
);
await handleEnd();
controller.close();
break;
}
chunks.push(result.value);
controller.enqueue(result.value);
}
},
async cancel(reason) {
if (!finished) await currentRunTree?.end(undefined, "Cancelled");
await currentRunTree?.end(
handleRunOutputs(await handleChunks(chunks), processOutputsFn)
);
await handleEnd();
return reader.cancel(reason);
},
});
return tappedStream;
}
async function* wrapAsyncIteratorForTracing(
iterator: AsyncIterator<unknown, unknown, undefined>,
snapshot: ReturnType<typeof AsyncLocalStorage.snapshot> | undefined
) {
let finished = false;
const chunks: unknown[] = [];
try {
while (true) {
const { value, done } = await (snapshot
? snapshot(() => iterator.next())
: iterator.next());
if (done) {
finished = true;
break;
}
chunks.push(value);
yield value;
}
} catch (e) {
await currentRunTree?.end(undefined, String(e));
throw e;
} finally {
if (!finished) await currentRunTree?.end(undefined, "Cancelled");
await currentRunTree?.end(
handleRunOutputs(await handleChunks(chunks), processOutputsFn)
);
await handleEnd();
}
}
function wrapAsyncGeneratorForTracing(
iterable: AsyncIterable<unknown>,
snapshot: ReturnType<typeof AsyncLocalStorage.snapshot> | undefined
) {
if (isReadableStream(iterable)) {
return tapReadableStreamForTracing(iterable, snapshot);
}
const iterator = iterable[Symbol.asyncIterator]();
const wrappedIterator = wrapAsyncIteratorForTracing(iterator, snapshot);
iterable[Symbol.asyncIterator] = () => wrappedIterator;
return iterable;
}
async function handleEnd() {
const onEnd = config?.on_end;
if (onEnd) {
if (!currentRunTree) {
console.warn(
"Can not call 'on_end' if currentRunTree is undefined"
);
} else {
onEnd(currentRunTree);
}
}
await postRunPromise;
await currentRunTree?.patchRun();
}
function gatherAll(iterator: Iterator<unknown>) {
const chunks: IteratorResult<unknown>[] = [];
// eslint-disable-next-line no-constant-condition
while (true) {
const next = iterator.next();
chunks.push(next);
if (next.done) break;
}
return chunks;
}
let returnValue: unknown;
try {
returnValue = wrappedFunc(...rawInputs);
} catch (err: unknown) {
returnValue = Promise.reject(err);
}
if (isAsyncIterable(returnValue)) {
const snapshot = AsyncLocalStorage.snapshot();
return wrapAsyncGeneratorForTracing(returnValue, snapshot);
}
if (
!Array.isArray(returnValue) &&
typeof returnValue === "object" &&
returnValue != null &&
__finalTracedIteratorKey !== undefined &&
isAsyncIterable(
(returnValue as Record<string, any>)[__finalTracedIteratorKey]
)
) {
const snapshot = AsyncLocalStorage.snapshot();
return {
...returnValue,
[__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing(
(returnValue as Record<string, any>)[__finalTracedIteratorKey],
snapshot
),
};
}
const tracedPromise = new Promise<unknown>((resolve, reject) => {
Promise.resolve(returnValue)
.then(
async (rawOutput) => {
if (isAsyncIterable(rawOutput)) {
const snapshot = AsyncLocalStorage.snapshot();
return resolve(
wrapAsyncGeneratorForTracing(rawOutput, snapshot)
);
}
if (
!Array.isArray(rawOutput) &&
typeof rawOutput === "object" &&
rawOutput != null &&
__finalTracedIteratorKey !== undefined &&
isAsyncIterable(
(rawOutput as Record<string, any>)[__finalTracedIteratorKey]
)
) {
const snapshot = AsyncLocalStorage.snapshot();
return {
...rawOutput,
[__finalTracedIteratorKey]: wrapAsyncGeneratorForTracing(
(rawOutput as Record<string, any>)[
__finalTracedIteratorKey
],
snapshot
),
};
}
if (isGenerator(wrappedFunc) && isIteratorLike(rawOutput)) {
const chunks = gatherAll(rawOutput);
try {
await currentRunTree?.end(
handleRunOutputs(
await handleChunks(
chunks.reduce<unknown[]>((memo, { value, done }) => {
if (!done || typeof value !== "undefined") {
memo.push(value);
}
return memo;
}, [])
),
processOutputsFn
)
);
await handleEnd();
} catch (e) {
console.error("Error occurred during handleEnd:", e);
}
return (function* () {
for (const ret of chunks) {
if (ret.done) return ret.value;
yield ret.value;
}
})();
}
try {
await currentRunTree?.end(
handleRunOutputs(rawOutput, processOutputsFn)
);
await handleEnd();
} finally {
// eslint-disable-next-line no-unsafe-finally
return rawOutput;
}
},
async (error: unknown) => {
await currentRunTree?.end(undefined, String(error));
await handleEnd();
throw error;
}
)
.then(resolve, reject);
});
if (typeof returnValue !== "object" || returnValue === null) {
return tracedPromise;
}
return new Proxy(returnValue, {
get(target, prop, receiver) {
if (isPromiseMethod(prop)) {
return tracedPromise[prop].bind(tracedPromise);
}
return Reflect.get(target, prop, receiver);
},
});
});
};
Object.defineProperty(traceableFunc, "langsmith:traceable", {
value: runTreeConfig,
});
return traceableFunc as TraceableFunction<Func>;
}
export {
getCurrentRunTree,
isTraceableFunction,
withRunTree,
ROOT,
} from "./singletons/traceable.js";
export type { RunTreeLike, TraceableFunction } from "./singletons/types.js";
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/anonymizer/index.ts | import set from "../utils/lodash/set.js";
export interface StringNode {
value: string;
path: string;
}
function extractStringNodes(data: unknown, options: { maxDepth?: number }) {
const parsedOptions = { ...options, maxDepth: options.maxDepth ?? 10 };
const queue: [value: unknown, depth: number, path: string][] = [
[data, 0, ""],
];
const result: StringNode[] = [];
while (queue.length > 0) {
const task = queue.shift();
if (task == null) continue;
const [value, depth, path] = task;
if (typeof value === "object" && value != null) {
if (depth >= parsedOptions.maxDepth) continue;
for (const [key, nestedValue] of Object.entries(value)) {
queue.push([nestedValue, depth + 1, path ? `${path}.${key}` : key]);
}
} else if (Array.isArray(value)) {
if (depth >= parsedOptions.maxDepth) continue;
for (let i = 0; i < value.length; i++) {
queue.push([value[i], depth + 1, `${path}[${i}]`]);
}
} else if (typeof value === "string") {
result.push({ value, path });
}
}
return result;
}
function deepClone<T>(data: T): T {
return JSON.parse(JSON.stringify(data));
}
export interface StringNodeProcessor {
maskNodes: (nodes: StringNode[]) => StringNode[];
}
export interface StringNodeRule {
type?: "pattern";
pattern: RegExp | string;
replace?: string;
}
export type ReplacerType =
| ((value: string, path?: string) => string)
| StringNodeRule[]
| StringNodeProcessor;
export function createAnonymizer(
replacer: ReplacerType,
options?: { maxDepth?: number }
) {
return <T>(data: T): T => {
let mutateValue = deepClone(data);
const nodes = extractStringNodes(mutateValue, {
maxDepth: options?.maxDepth,
});
const processor: StringNodeProcessor = Array.isArray(replacer)
? (() => {
const replacers: [regex: RegExp, replace: string][] = replacer.map(
({ pattern, type, replace }) => {
if (type != null && type !== "pattern")
throw new Error("Invalid anonymizer type");
return [
typeof pattern === "string"
? new RegExp(pattern, "g")
: pattern,
replace ?? "[redacted]",
];
}
);
if (replacers.length === 0) throw new Error("No replacers provided");
return {
maskNodes: (nodes: StringNode[]) => {
return nodes.reduce<StringNode[]>((memo, item) => {
const newValue = replacers.reduce((value, [regex, replace]) => {
const result = value.replace(regex, replace);
// make sure we reset the state of regex
regex.lastIndex = 0;
return result;
}, item.value);
if (newValue !== item.value) {
memo.push({ value: newValue, path: item.path });
}
return memo;
}, []);
},
};
})()
: typeof replacer === "function"
? {
maskNodes: (nodes: StringNode[]) =>
nodes.reduce<StringNode[]>((memo, item) => {
const newValue = replacer(item.value, item.path);
if (newValue !== item.value) {
memo.push({ value: newValue, path: item.path });
}
return memo;
}, []),
}
: replacer;
const toUpdate = processor.maskNodes(nodes);
for (const node of toUpdate) {
if (node.path === "") {
mutateValue = node.value as unknown as T;
} else {
set(mutateValue as unknown as object, node.path, node.value);
}
}
return mutateValue;
};
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/fetch.test.ts | /* eslint-disable no-process-env */
/* eslint-disable @typescript-eslint/no-explicit-any */
import { jest } from "@jest/globals";
import { Client } from "../client.js";
import { overrideFetchImplementation } from "../singletons/fetch.js";
import { traceable } from "../traceable.js";
describe.each([[""], ["mocked"]])("Client uses %s fetch", (description) => {
let globalFetchMock: jest.Mock;
let overriddenFetch: jest.Mock;
let expectedFetchMock: jest.Mock;
let unexpectedFetchMock: jest.Mock;
beforeEach(() => {
globalFetchMock = jest.fn(() =>
Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
batch_ingest_config: {
use_multipart_endpoint: true,
},
}),
text: () => Promise.resolve(""),
})
);
overriddenFetch = jest.fn(() =>
Promise.resolve({
ok: true,
json: () =>
Promise.resolve({
batch_ingest_config: {
use_multipart_endpoint: true,
},
}),
text: () => Promise.resolve(""),
})
);
expectedFetchMock =
description === "mocked" ? overriddenFetch : globalFetchMock;
unexpectedFetchMock =
description === "mocked" ? globalFetchMock : overriddenFetch;
if (description === "mocked") {
overrideFetchImplementation(overriddenFetch);
} else {
overrideFetchImplementation(globalFetchMock);
}
// Mock global fetch
(globalThis as any).fetch = globalFetchMock;
});
afterEach(() => {
jest.restoreAllMocks();
});
describe("createLLMExample", () => {
it("should create an example with the given input and generation", async () => {
const client = new Client({ apiKey: "test-api-key" });
const input = "Hello, world!";
const generation = "Bonjour, monde!";
const options = { datasetName: "test-dataset" };
await client.createLLMExample(input, generation, options);
expect(expectedFetchMock).toHaveBeenCalled();
expect(unexpectedFetchMock).not.toHaveBeenCalled();
});
});
describe("createChatExample", () => {
it("should convert LangChainBaseMessage objects to examples", async () => {
const client = new Client({ apiKey: "test-api-key" });
const input = [
{ text: "Hello", sender: "user" },
{ text: "Hi there", sender: "bot" },
];
const generations = {
type: "langchain",
data: { text: "Bonjour", sender: "bot" },
};
const options = { datasetName: "test-dataset" };
await client.createChatExample(input, generations, options);
expect(expectedFetchMock).toHaveBeenCalled();
expect(unexpectedFetchMock).not.toHaveBeenCalled();
});
});
test("basic traceable implementation", async () => {
process.env.LANGSMITH_TRACING_BACKGROUND = "false";
const llm = traceable(
async function* llm(input: string) {
const response = input.repeat(2).split("");
for (const char of response) {
yield char;
}
},
{ tracingEnabled: true }
);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of llm("Hello world")) {
// pass
}
expect(expectedFetchMock).toHaveBeenCalled();
expect(unexpectedFetchMock).not.toHaveBeenCalled();
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/batch_client.int.test.ts | import { v4 as uuidv4 } from "uuid";
import * as fs from "node:fs";
import * as path from "node:path";
import { fileURLToPath } from "node:url";
import nodeFetch from "node-fetch";
import { Client } from "../client.js";
import { RunTree, convertToDottedOrderFormat } from "../run_trees.js";
import {
deleteProject,
waitUntilProjectFound,
waitUntilRunFound,
} from "./utils.js";
import { traceable } from "../traceable.js";
import { overrideFetchImplementation } from "../singletons/fetch.js";
test.concurrent(
"Test persist update run",
async () => {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 2 },
timeout_ms: 30_000,
});
const projectName =
"__test_persist_update_run_batch_1" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await langchainClient.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
});
await Promise.all([
waitUntilRunFound(langchainClient, runId, true),
waitUntilProjectFound(langchainClient, projectName),
]);
const storedRun = await langchainClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
await langchainClient.deleteProject({ projectName });
},
180_000
);
test.concurrent(
"Test persist update runs above the batch size limit",
async () => {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 2 },
batchSizeBytesLimit: 1,
timeout_ms: 30_000,
});
const projectName =
"__test_persist_update_run_batch_above_bs_limit" +
uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
const createRun = async () => {
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await langchainClient.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: Math.floor(new Date().getTime() / 1000),
});
await Promise.all([
waitUntilRunFound(langchainClient, runId, true),
waitUntilProjectFound(langchainClient, projectName),
]);
const storedRun = await langchainClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
};
await Promise.all([createRun(), createRun(), createRun()]);
await langchainClient.deleteProject({ projectName });
},
180_000
);
test.concurrent(
"Test persist update run with delay",
async () => {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 2 },
timeout_ms: 30_000,
});
const projectName =
"__test_persist_update_run_batch_with_delay" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await new Promise((resolve) => setTimeout(resolve, 1000));
await langchainClient.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: Math.floor(new Date().getTime() / 1000),
});
await Promise.all([
waitUntilRunFound(langchainClient, runId, true),
waitUntilProjectFound(langchainClient, projectName),
]);
const storedRun = await langchainClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
await langchainClient.deleteProject({ projectName });
},
180_000
);
test.concurrent(
"Test persist update run tree",
async () => {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 2 },
timeout_ms: 30_000,
});
const projectName =
"__test_persist_update_run_tree" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
const runTree = new RunTree({
name: "Test Run Tree",
id: runId,
inputs: { input: "foo1" },
client: langchainClient,
project_name: projectName,
});
await runTree.postRun();
await runTree.end({ output: "foo2" });
await runTree.patchRun();
await Promise.all([
waitUntilRunFound(langchainClient, runId, true),
waitUntilProjectFound(langchainClient, projectName),
]);
const storedRun = await langchainClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
expect(storedRun.dotted_order).toEqual(runTree.dotted_order);
expect(storedRun.trace_id).toEqual(runTree.trace_id);
expect(storedRun.inputs).toEqual({ input: "foo1" });
expect(storedRun.outputs).toEqual({ output: "foo2" });
await langchainClient.deleteProject({ projectName });
},
180_000
);
test.concurrent(
"Test persist run with attachment",
async () => {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 2 },
timeout_ms: 30_000,
});
const projectName = "__test_create_attachment" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
const pathname = path.join(
path.dirname(fileURLToPath(import.meta.url)),
"test_data",
"parrot-icon.png"
);
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
attachments: {
testimage: ["image/png", fs.readFileSync(pathname)],
},
});
await langchainClient.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: Math.floor(new Date().getTime() / 1000),
});
await Promise.all([
waitUntilRunFound(langchainClient, runId, true),
waitUntilProjectFound(langchainClient, projectName),
]);
const storedRun = await langchainClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
await langchainClient.deleteProject({ projectName });
},
180_000
);
test.skip("very large runs", async () => {
const langchainClient = new Client({
autoBatchTracing: true,
timeout_ms: 120_000,
});
const projectName = "__test_large_runs" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
console.time("largeRunTimer");
const promises = [];
for (let i = 0; i < 10; i++) {
promises.push(
traceable(
async () => {
return "x".repeat(9000000);
},
{
project_name: projectName,
client: langchainClient,
tracingEnabled: true,
}
)()
);
}
await Promise.all(promises);
console.timeLog("largeRunTimer");
await langchainClient.awaitPendingTraceBatches();
console.timeLog("largeRunTimer");
await Promise.all([waitUntilProjectFound(langchainClient, projectName)]);
await langchainClient.deleteProject({ projectName });
}, 180_000);
test("multipart should work with overridden node-fetch", async () => {
overrideFetchImplementation(nodeFetch);
const langchainClient = new Client({
autoBatchTracing: true,
timeout_ms: 120_000,
});
const projectName = "__test_node_fetch" + uuidv4().substring(0, 4);
await deleteProject(langchainClient, projectName);
await traceable(
async () => {
return "testing with node fetch";
},
{
project_name: projectName,
client: langchainClient,
tracingEnabled: true,
}
)();
await langchainClient.awaitPendingTraceBatches();
await Promise.all([waitUntilProjectFound(langchainClient, projectName)]);
await langchainClient.deleteProject({ projectName });
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/lcls_handoff.int.test.ts | import { BaseMessage, HumanMessage } from "@langchain/core/messages";
import { RunnableConfig, RunnableLambda } from "@langchain/core/runnables";
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
import { MessageGraph } from "@langchain/langgraph";
import { v4 as uuidv4 } from "uuid";
import { Client } from "../client.js";
import { Run } from "../schemas.js";
import { traceable } from "../traceable.js";
import { toArray, waitUntil } from "./utils.js";
test.concurrent(
"Test handoff between run tree and LangChain code.",
async () => {
const projectName = `__test_handoff ${uuidv4()}`;
// Define a new graph
const workflow = new MessageGraph();
const addValueTraceable = traceable(
(msg: BaseMessage) => {
return new HumanMessage({ content: msg.content + " world" });
},
{
name: "add_negligible_value",
}
);
const myFunc = async (messages: BaseMessage[], config?: RunnableConfig) => {
const runnableConfig = config ?? { callbacks: [] };
const newMsg = await addValueTraceable(
runnableConfig,
messages[0] as HumanMessage
);
return [newMsg];
};
// Define the two nodes we will cycle between
workflow
.addNode(
"agent",
new RunnableLambda({
func: async () => new HumanMessage({ content: "Hello!" }),
})
)
.addNode("action", new RunnableLambda({ func: myFunc }))
.addEdge("__start__", "agent")
.addEdge("agent", "action")
.addEdge("action", "__end__");
const app = workflow.compile();
const tracer = new LangChainTracer({ projectName });
const client = new Client({
callerOptions: { maxRetries: 3 },
timeout_ms: 30_000,
});
try {
const runId = uuidv4();
const result = await app.invoke(
[new HumanMessage({ content: "Hello!" })],
{
callbacks: [tracer],
runId,
}
);
expect(result[result.length - 1].content).toEqual("Hello! world");
// First wait until at least one trace is found in the project
const getNestedFunction = (): Promise<Run[]> =>
toArray(
client.listRuns({
projectName,
filter: "eq(name, 'add_negligible_value')",
})
);
await waitUntil(
async () => {
const traces = await getNestedFunction();
return traces.length > 0;
},
120_000,
10
);
const traces = await getNestedFunction();
expect(traces.length).toEqual(1);
const trace = traces[0];
expect(trace.name).toEqual("add_negligible_value");
expect(trace.parent_run_id).not.toBeNull();
expect(trace.trace_id).toEqual(runId);
} catch (e) {
console.error(e);
throw e;
} finally {
await client.deleteProject({ projectName });
}
}
);
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/run_trees.test.ts | /* eslint-disable no-process-env, @typescript-eslint/no-explicit-any */
import { jest } from "@jest/globals";
import { Client } from "../client.js";
import { RunTree } from "../run_trees.js";
const _DATE = 1620000000000;
Date.now = jest.fn(() => _DATE);
test("Should work with manually set API key", async () => {
const key = process.env.LANGCHAIN_API_KEY;
delete process.env.LANGCHAIN_API_KEY;
try {
const langchainClient = new Client({
autoBatchTracing: true,
callerOptions: { maxRetries: 0 },
timeout_ms: 30_000,
apiKey: key,
});
const callSpy = jest
.spyOn((langchainClient as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
const projectName = "__test_persist_update_run_tree";
const runTree = new RunTree({
name: "Test Run Tree",
inputs: { input: "foo1" },
client: langchainClient,
project_name: projectName,
});
await runTree.postRun();
await new Promise((resolve) => setTimeout(resolve, 1500));
expect(callSpy).toHaveBeenCalled();
} finally {
process.env.LANGCHAIN_API_KEY = key;
}
}, 180_000);
test("nested", () => {
const id = "00000000-0000-0000-0000-00000000000";
const date = "20210503T00000000000";
const parent = new RunTree({ name: "parent_1", id: `${id}0` });
const child1 = parent.createChild({ name: "child_1", id: `${id}1` });
const child2 = parent.createChild({ name: "child_2", id: `${id}2` });
const grandchild1 = child1.createChild({
name: "grandchild_1",
id: `${id}3`,
});
const grandchild2 = child1.createChild({
name: "grandchild_2",
id: `${id}4`,
});
const child3 = parent.createChild({ name: "child_3", id: `${id}5` });
expect(parent.dotted_order).toBe(`${date}1Z${id}0`);
expect(child1.dotted_order).toBe(`${date}1Z${id}0.${date}2Z${id}1`);
expect(child2.dotted_order).toBe(`${date}1Z${id}0.${date}3Z${id}2`);
expect(grandchild1.dotted_order).toBe(
`${date}1Z${id}0.${date}2Z${id}1.${date}3Z${id}3`
);
expect(grandchild2.dotted_order).toBe(
`${date}1Z${id}0.${date}2Z${id}1.${date}4Z${id}4`
);
expect(child3.dotted_order).toBe(`${date}1Z${id}0.${date}5Z${id}5`);
});
test("serializing run tree", () => {
const parent = new RunTree({ name: "parent_1" });
parent.createChild({ name: "child_1" }).createChild({ name: "child_2" });
parent.createChild({ name: "child_3" });
expect(JSON.parse(JSON.stringify(parent))).toMatchObject({
name: "parent_1",
run_type: "chain",
child_runs: [
{
name: "child_1",
run_type: "chain",
child_runs: [{ name: "child_2", run_type: "chain", child_runs: [] }],
},
{
name: "child_3",
run_type: "chain",
child_runs: [],
},
],
});
});
test("distributed", () => {
const parent = new RunTree({
name: "parent_1",
id: "00000000-0000-0000-0000-00000000000",
start_time: Date.parse("2021-05-03T00:00:00.000Z"),
});
const serialized = parent.toHeaders();
const child2 = RunTree.fromHeaders(serialized)?.createChild({
name: "child_2",
id: "00000000-0000-0000-0000-00000000001",
start_time: Date.parse("2021-05-03T00:00:01.000Z"),
});
expect(JSON.parse(JSON.stringify(child2))).toMatchObject({
name: "child_2",
run_type: "chain",
dotted_order:
"20210503T000000000001Z00000000-0000-0000-0000-00000000000.20210503T000001000002Z00000000-0000-0000-0000-00000000001",
});
});
test("shared client between run trees", () => {
const runTree1 = new RunTree({ name: "tree_1" });
const runTree2 = new RunTree({ name: "tree_2" });
expect(runTree1.client).toBeDefined();
expect(runTree2.client).toBeDefined();
expect(runTree1.client).toBe(runTree2.client);
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/client.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { jest } from "@jest/globals";
import { Client } from "../client.js";
import {
getEnvironmentVariables,
getLangChainEnvVars,
getLangChainEnvVarsMetadata,
} from "../utils/env.js";
import {
isVersionGreaterOrEqual,
parsePromptIdentifier,
} from "../utils/prompts.js";
describe("Client", () => {
describe("createLLMExample", () => {
it("should create an example with the given input and generation", async () => {
const client = new Client({ apiKey: "test-api-key" });
const createExampleSpy = jest
.spyOn(client, "createExample")
.mockResolvedValue({
id: "test-example-id",
dataset_id: "test-dataset-id",
inputs: {},
outputs: { text: "Bonjour, monde!" },
created_at: "2022-01-01T00:00:00.000Z",
modified_at: "2022-01-01T00:00:00.000Z",
runs: [],
});
const input = "Hello, world!";
const generation = "Bonjour, monde!";
const options = { datasetName: "test-dataset" };
await client.createLLMExample(input, generation, options);
expect(createExampleSpy).toHaveBeenCalledWith(
{ input },
{ output: generation },
options
);
});
});
describe("createChatExample", () => {
it("should convert LangChainBaseMessage objects to examples", async () => {
const client = new Client({ apiKey: "test-api-key" });
const createExampleSpy = jest
.spyOn(client, "createExample")
.mockResolvedValue({
id: "test-example-id",
dataset_id: "test-dataset-id",
inputs: {},
outputs: { text: "Bonjour", sender: "bot" },
created_at: "2022-01-01T00:00:00.000Z",
modified_at: "2022-01-01T00:00:00.000Z",
runs: [],
});
const input = [
{ text: "Hello", sender: "user" },
{ text: "Hi there", sender: "bot" },
];
const generations = {
type: "langchain",
data: { text: "Bonjour", sender: "bot" },
};
const options = { datasetName: "test-dataset" };
await client.createChatExample(input, generations, options);
expect(createExampleSpy).toHaveBeenCalledWith(
{
input: [
{ text: "Hello", sender: "user" },
{ text: "Hi there", sender: "bot" },
],
},
{
output: {
data: { text: "Bonjour", sender: "bot" },
type: "langchain",
},
},
options
);
});
});
it("should trim trailing slash on a passed apiUrl", () => {
const client = new Client({ apiUrl: "https://example.com/" });
const result = (client as any).apiUrl;
expect(result).toBe("https://example.com");
});
describe("getHostUrl", () => {
it("should return the webUrl if it exists", () => {
const client = new Client({
webUrl: "http://example.com",
apiKey: "test-api-key",
});
const result = (client as any).getHostUrl();
expect(result).toBe("http://example.com");
});
it("should return 'http://localhost:3000' if apiUrl is localhost", () => {
const client = new Client({ apiUrl: "http://localhost/api" });
const result = (client as any).getHostUrl();
expect(result).toBe("http://localhost:3000");
});
it("should return the webUrl without '/api' if apiUrl contains '/api'", () => {
const client = new Client({
webUrl: "https://example.com",
apiKey: "test-api-key",
});
const result = (client as any).getHostUrl();
expect(result).toBe("https://example.com");
});
it("should trim trailing slash on a passed webUrl", () => {
const client = new Client({ webUrl: "https://example.com/" });
const result = (client as any).getHostUrl();
expect(result).toBe("https://example.com");
});
it("should return 'https://dev.smith.langchain.com' if apiUrl contains 'dev'", () => {
const client = new Client({
apiUrl: "https://dev.smith.langchain.com/api",
apiKey: "test-api-key",
});
const result = (client as any).getHostUrl();
expect(result).toBe("https://dev.smith.langchain.com");
});
it("should return 'https://eu.smith.langchain.com' if apiUrl contains 'eu'", () => {
const client = new Client({
apiUrl: "https://eu.smith.langchain.com/api",
apiKey: "test-api-key",
});
const result = (client as any).getHostUrl();
expect(result).toBe("https://eu.smith.langchain.com");
});
it("should return 'https://smith.langchain.com' for any other apiUrl", () => {
const client = new Client({
apiUrl: "https://smith.langchain.com/api",
apiKey: "test-api-key",
});
const result = (client as any).getHostUrl();
expect(result).toBe("https://smith.langchain.com");
});
});
describe("env functions", () => {
it("should return the env variables correctly", async () => {
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_REVISION_ID = "test_revision_id";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_API_KEY = "fake_api_key";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_OTHER_KEY = "test_other_key";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_OTHER_NON_SENSITIVE_METADATA = "test_some_metadata";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_ENDPOINT = "https://example.com";
// eslint-disable-next-line no-process-env
process.env.SOME_RANDOM_THING = "random";
const envVars = getEnvironmentVariables();
const langchainEnvVars = getLangChainEnvVars();
const langchainMetadataEnvVars = getLangChainEnvVarsMetadata();
expect(envVars).toMatchObject({
LANGCHAIN_REVISION_ID: "test_revision_id",
LANGCHAIN_API_KEY: "fake_api_key",
LANGCHAIN_OTHER_KEY: "test_other_key",
LANGCHAIN_ENDPOINT: "https://example.com",
SOME_RANDOM_THING: "random",
LANGCHAIN_OTHER_NON_SENSITIVE_METADATA: "test_some_metadata",
});
expect(langchainEnvVars).toMatchObject({
LANGCHAIN_REVISION_ID: "test_revision_id",
LANGCHAIN_API_KEY: "fa********ey",
LANGCHAIN_OTHER_KEY: "te**********ey",
LANGCHAIN_ENDPOINT: "https://example.com",
LANGCHAIN_OTHER_NON_SENSITIVE_METADATA: "test_some_metadata",
});
expect(langchainEnvVars).not.toHaveProperty("SOME_RANDOM_THING");
expect(langchainMetadataEnvVars).toEqual({
revision_id: "test_revision_id",
LANGCHAIN_OTHER_NON_SENSITIVE_METADATA: "test_some_metadata",
});
});
});
describe("isVersionGreaterOrEqual", () => {
it("should return true if the version is greater or equal", () => {
// Test versions equal to 0.5.23
expect(isVersionGreaterOrEqual("0.5.23", "0.5.23")).toBe(true);
// Test versions greater than 0.5.23
expect(isVersionGreaterOrEqual("0.5.24", "0.5.23"));
expect(isVersionGreaterOrEqual("0.6.0", "0.5.23"));
expect(isVersionGreaterOrEqual("1.0.0", "0.5.23"));
// Test versions less than 0.5.23
expect(isVersionGreaterOrEqual("0.5.22", "0.5.23")).toBe(false);
expect(isVersionGreaterOrEqual("0.5.0", "0.5.23")).toBe(false);
expect(isVersionGreaterOrEqual("0.4.99", "0.5.23")).toBe(false);
});
});
describe("parsePromptIdentifier", () => {
it("should parse valid identifiers correctly", () => {
expect(parsePromptIdentifier("name")).toEqual(["-", "name", "latest"]);
expect(parsePromptIdentifier("owner/name")).toEqual([
"owner",
"name",
"latest",
]);
expect(parsePromptIdentifier("owner/name:commit")).toEqual([
"owner",
"name",
"commit",
]);
expect(parsePromptIdentifier("name:commit")).toEqual([
"-",
"name",
"commit",
]);
});
it("should throw an error for invalid identifiers", () => {
const invalidIdentifiers = [
"",
"/",
":",
"owner/",
"/name",
"owner//name",
"owner/name/",
"owner/name/extra",
":commit",
];
invalidIdentifiers.forEach((identifier) => {
expect(() => parsePromptIdentifier(identifier)).toThrowError(
`Invalid identifier format: ${identifier}`
);
});
});
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/wrapped_sdk.int.test.ts | import { jest } from "@jest/globals";
import { OpenAI } from "openai";
import { wrapSDK } from "../wrappers/index.js";
import { Client } from "../client.js";
test.concurrent("chat.completions", async () => {
const client = new Client({ autoBatchTracing: false });
const callSpy = jest
// eslint-disable-next-line @typescript-eslint/no-explicit-any
.spyOn((client as any).caller, "call")
.mockResolvedValue({ ok: true, text: () => "" });
const originalClient = new OpenAI();
const patchedClient = wrapSDK(new OpenAI(), { client });
// invoke
const original = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
});
const patched = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
});
expect(patched.choices).toEqual(original.choices);
// stream
const originalStream = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
const originalChoices = [];
for await (const chunk of originalStream) {
originalChoices.push(chunk.choices);
}
const patchedStream = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
const patchedChoices = [];
for await (const chunk of patchedStream) {
patchedChoices.push(chunk.choices);
}
expect(patchedChoices).toEqual(originalChoices);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/wrapped_ai_sdk.int.test.ts | import { openai } from "@ai-sdk/openai";
import {
generateObject,
generateText,
streamObject,
streamText,
tool,
} from "ai";
import { z } from "zod";
import { wrapAISDKModel } from "../wrappers/vercel.js";
const DEBUG = false;
test("AI SDK generateText", async () => {
const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini"));
const { text } = await generateText({
model: modelWithTracing,
prompt: "Write a vegetarian lasagna recipe for 4 people.",
});
DEBUG && console.log(text);
});
test("AI SDK generateText with a tool", async () => {
const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini"));
const { text } = await generateText({
model: modelWithTracing,
prompt:
"Write a vegetarian lasagna recipe for 4 people. Get ingredients first.",
tools: {
getIngredients: tool({
description: "get a list of ingredients",
parameters: z.object({
ingredients: z.array(z.string()),
}),
execute: async () =>
JSON.stringify(["pasta", "tomato", "cheese", "onions"]),
}),
},
maxToolRoundtrips: 2,
});
DEBUG && console.log(text);
});
test("AI SDK generateObject", async () => {
const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini"));
const { object } = await generateObject({
model: modelWithTracing,
prompt: "Write a vegetarian lasagna recipe for 4 people.",
schema: z.object({
ingredients: z.array(z.string()),
}),
});
DEBUG && console.log(object);
});
test("AI SDK streamText", async () => {
const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini"));
const { textStream } = await streamText({
model: modelWithTracing,
prompt: "Write a vegetarian lasagna recipe for 4 people.",
});
for await (const chunk of textStream) {
DEBUG && console.log(chunk);
}
});
test("AI SDK streamObject", async () => {
const modelWithTracing = wrapAISDKModel(openai("gpt-4o-mini"));
const { partialObjectStream } = await streamObject({
model: modelWithTracing,
prompt: "Write a vegetarian lasagna recipe for 4 people.",
schema: z.object({
ingredients: z.array(z.string()),
}),
});
for await (const chunk of partialObjectStream) {
DEBUG && console.log(chunk);
}
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/anonymizer.test.ts | import { StringNodeRule, createAnonymizer } from "../anonymizer/index.js";
import { v4 as uuid } from "uuid";
import { traceable } from "../traceable.js";
import { BaseMessage, SystemMessage } from "@langchain/core/messages";
import { mockClient } from "./utils/mock_client.js";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
const EMAIL_REGEX = /[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}/g;
const UUID_REGEX =
/[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}/g;
describe("replacer", () => {
const replacer = (text: string) =>
text.replace(EMAIL_REGEX, "[email address]").replace(UUID_REGEX, "[uuid]");
test("object", () => {
expect(
createAnonymizer(replacer)({
message: "Hello, this is my email: hello@example.com",
metadata: uuid(),
})
).toEqual({
message: "Hello, this is my email: [email address]",
metadata: "[uuid]",
});
});
test("array", () => {
expect(createAnonymizer(replacer)(["human", "hello@example.com"])).toEqual([
"human",
"[email address]",
]);
});
test("string", () => {
expect(createAnonymizer(replacer)("hello@example.com")).toEqual(
"[email address]"
);
});
});
describe("declared", () => {
const replacers: StringNodeRule[] = [
{ pattern: EMAIL_REGEX, replace: "[email address]" },
{ pattern: UUID_REGEX, replace: "[uuid]" },
];
test("object", () => {
expect(
createAnonymizer(replacers)({
message: "Hello, this is my email: hello@example.com",
metadata: uuid(),
})
).toEqual({
message: "Hello, this is my email: [email address]",
metadata: "[uuid]",
});
});
test("array", () => {
expect(createAnonymizer(replacers)(["human", "hello@example.com"])).toEqual(
["human", "[email address]"]
);
});
test("string", () => {
expect(createAnonymizer(replacers)("hello@example.com")).toEqual(
"[email address]"
);
});
});
describe("client", () => {
test("messages", async () => {
const anonymizer = createAnonymizer([
{ pattern: EMAIL_REGEX, replace: "[email]" },
{ pattern: UUID_REGEX, replace: "[uuid]" },
]);
const { client, callSpy } = mockClient({ anonymizer });
const id = uuid();
const child = traceable(
(value: { messages: BaseMessage[]; values: Record<string, unknown> }) => {
return [
...value.messages.map((message) => message.content.toString()),
...Object.entries(value.values).map((lst) => lst.join(": ")),
].join("\n");
},
{ name: "child" }
);
const evaluate = traceable(
(values: Record<string, unknown>) => {
const messages = [new SystemMessage(`UUID: ${id}`)];
return child({ messages, values });
},
{ client, name: "evaluate", tracingEnabled: true }
);
const result = await evaluate({ email: "hello@example.com" });
expect(result).toEqual(
[`UUID: ${id}`, `email: hello@example.com`].join("\n")
);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["evaluate:0", "child:1"],
data: {
"evaluate:0": {
inputs: { email: "[email]" },
outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") },
},
"child:1": {
inputs: {
messages: [
{
lc: 1,
type: "constructor",
id: ["langchain_core", "messages", "SystemMessage"],
kwargs: { content: "UUID: [uuid]" },
},
],
values: { email: "[email]" },
},
outputs: { outputs: [`UUID: [uuid]`, `email: [email]`].join("\n") },
},
},
});
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/vercel.int.test.ts | import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
import {
generateText,
streamText,
generateObject,
streamObject,
tool,
} from "ai";
import { openai } from "@ai-sdk/openai";
import { v4 as uuid } from "uuid";
import { z } from "zod";
import { AISDKExporter } from "../vercel.js";
import { Client } from "../index.js";
import { traceable } from "../traceable.js";
import { waitUntilRunFound, toArray } from "./utils.js";
const client = new Client();
// Not using @opentelemetry/sdk-node because we need to force flush
// the spans to ensure they are sent to LangSmith between tests
const provider = new NodeTracerProvider();
provider.addSpanProcessor(
new BatchSpanProcessor(new AISDKExporter({ client }))
);
provider.register();
test("generateText", async () => {
const runId = uuid();
await generateText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: "What are my orders and where are they? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});
test("generateText with image", async () => {
const runId = uuid();
await generateText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: [
{
type: "text",
text: "What's in this picture?",
},
{
type: "image",
image: new URL("https://picsum.photos/200/300"),
},
],
},
],
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runId,
runName: "vercelImageTest",
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});
test.skip("streamText", async () => {
const runId = uuid();
const result = await streamText({
model: openai("gpt-4o-mini"),
messages: [
{
role: "user",
content: "What are my orders and where are they? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
await toArray(result.fullStream);
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});
test("generateObject", async () => {
const runId = uuid();
await generateObject({
model: openai("gpt-4o-mini", { structuredOutputs: true }),
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runId,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});
test("streamObject", async () => {
const runId = uuid();
const result = await streamObject({
model: openai("gpt-4o-mini", { structuredOutputs: true }),
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runId,
functionId: "functionId",
metadata: {
userId: "123",
language: "english",
},
}),
});
await toArray(result.partialObjectStream);
await provider.forceFlush();
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.id).toEqual(runId);
});
test("traceable", async () => {
const runId = uuid();
const wrappedText = traceable(
async (content: string) => {
const { text } = await generateText({
model: openai("gpt-4o-mini"),
messages: [{ role: "user", content }],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
viewTrackingInformation: tool({
description: "view tracking information for a specific order",
parameters: z.object({ orderId: z.string() }),
execute: async ({ orderId }) =>
`Here is the tracking information for ${orderId}`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
functionId: "functionId",
runName: "nestedVercelTrace",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
const foo = traceable(
async () => {
return "bar";
},
{
name: "foo",
}
);
await foo();
return { text };
},
{ name: "parentTraceable", id: runId }
);
const result = await wrappedText(
"What are my orders and where are they? My user ID is 123. Use available tools."
);
await waitUntilRunFound(client, runId, true);
const storedRun = await client.readRun(runId);
expect(storedRun.outputs).toEqual(result);
});
afterAll(async () => {
await provider.shutdown();
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/evaluate.int.test.ts | import {
EvaluationResult,
EvaluationResults,
} from "../evaluation/evaluator.js";
import { evaluate } from "../evaluation/_runner.js";
import { waitUntilRunFound } from "./utils.js";
import { Example, Run, TracerSession } from "../schemas.js";
import { Client } from "../index.js";
import { afterAll, beforeAll } from "@jest/globals";
import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables";
import { v4 as uuidv4 } from "uuid";
const TESTING_DATASET_NAME = `test_dataset_js_evaluate_${uuidv4()}`;
const TESTING_DATASET_NAME2 = `my_splits_ds_${uuidv4()}`;
beforeAll(async () => {
const client = new Client();
if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) {
// create a new dataset
await client.createDataset(TESTING_DATASET_NAME, {
description:
"For testing purposed. Is created & deleted for each test run.",
});
// create examples
const res = await client.createExamples({
inputs: [{ input: 1 }, { input: 2 }],
outputs: [{ output: 2 }, { output: 3 }],
datasetName: TESTING_DATASET_NAME,
});
if (res.length !== 2) {
throw new Error("Failed to create examples");
}
}
});
afterAll(async () => {
const client = new Client();
await client.deleteDataset({
datasetName: TESTING_DATASET_NAME,
});
try {
await client.deleteDataset({
datasetName: "my_splits_ds2",
});
} catch {
//pass
}
});
test("evaluate can evaluate", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
description: "Experiment from evaluate can evaluate integration test",
});
// console.log(evalRes.results)
expect(evalRes.results).toHaveLength(2);
expect(evalRes.results[0].run).toBeDefined();
expect(evalRes.results[0].example).toBeDefined();
expect(evalRes.results[0].evaluationResults).toBeDefined();
const firstRun = evalRes.results[0].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(firstRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(firstRun.outputs?.foo).toBeLessThanOrEqual(3);
const firstRunResults = evalRes.results[0].evaluationResults;
expect(firstRunResults.results).toHaveLength(0);
expect(evalRes.results[1].run).toBeDefined();
expect(evalRes.results[1].example).toBeDefined();
expect(evalRes.results[1].evaluationResults).toBeDefined();
const secondRun = evalRes.results[1].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(secondRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(secondRun.outputs?.foo).toBeLessThanOrEqual(3);
const secondRunResults = evalRes.results[1].evaluationResults;
expect(secondRunResults.results).toHaveLength(0);
});
test("evaluate can repeat", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
description: "Experiment from evaluate can evaluate integration test",
numRepetitions: 3,
});
expect(evalRes.results).toHaveLength(6);
for (let i = 0; i < 6; i++) {
expect(evalRes.results[i].run).toBeDefined();
expect(evalRes.results[i].example).toBeDefined();
expect(evalRes.results[i].evaluationResults).toBeDefined();
const currRun = evalRes.results[i].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(currRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(currRun.outputs?.foo).toBeLessThanOrEqual(3);
const firstRunResults = evalRes.results[i].evaluationResults;
expect(firstRunResults.results).toHaveLength(0);
}
});
test("evaluate can evaluate with RunEvaluator evaluators", async () => {
const targetFunc = (input: { input: number }) => {
return { foo: input.input + 1 };
};
const customEvaluator = async (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evaluator = {
evaluateRun: customEvaluator,
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [evaluator],
description: "evaluate can evaluate with RunEvaluator evaluators",
});
expect(evalRes.results).toHaveLength(2);
expect(evalRes.results[0].run).toBeDefined();
expect(evalRes.results[0].example).toBeDefined();
expect(evalRes.results[0].evaluationResults).toBeDefined();
const firstRun = evalRes.results[0].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(firstRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(firstRun.outputs?.foo).toBeLessThanOrEqual(3);
const firstExample = evalRes.results[0].example;
expect(firstExample).toBeDefined();
const firstEvalResults = evalRes.results[0].evaluationResults;
expect(firstEvalResults.results).toHaveLength(1);
expect(firstEvalResults.results[0].key).toEqual("key");
expect(firstEvalResults.results[0].score).toEqual(1);
expect(evalRes.results[1].run).toBeDefined();
expect(evalRes.results[1].example).toBeDefined();
expect(evalRes.results[1].evaluationResults).toBeDefined();
const secondRun = evalRes.results[1].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(secondRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(secondRun.outputs?.foo).toBeLessThanOrEqual(3);
const secondExample = evalRes.results[1].example;
expect(secondExample).toBeDefined();
const secondEvalResults = evalRes.results[1].evaluationResults;
expect(secondEvalResults.results).toHaveLength(1);
expect(secondEvalResults.results[0].key).toEqual("key");
expect(secondEvalResults.results[0].score).toEqual(1);
// Test runs & examples were passed to customEvaluator
const expectedCommentStrings = [
`Run: ${secondRun.id} Example: ${secondExample?.id}`,
`Run: ${firstRun.id} Example: ${firstExample?.id}`,
];
const receivedCommentStrings = evalRes.results
.map(({ evaluationResults }) => evaluationResults.results[0].comment)
.filter((c): c is string => !!c);
expect(receivedCommentStrings.length).toBe(2);
expect(receivedCommentStrings).toEqual(
expect.arrayContaining(expectedCommentStrings)
);
});
test("evaluate can evaluate with custom evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluator = (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [customEvaluator],
description: "evaluate can evaluate with custom evaluators",
});
expect(evalRes.results).toHaveLength(2);
expect(evalRes.results[0].run).toBeDefined();
expect(evalRes.results[0].example).toBeDefined();
expect(evalRes.results[0].evaluationResults).toBeDefined();
const firstRun = evalRes.results[0].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(firstRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(firstRun.outputs?.foo).toBeLessThanOrEqual(3);
const firstExample = evalRes.results[0].example;
expect(firstExample).toBeDefined();
const firstEvalResults = evalRes.results[0].evaluationResults;
expect(firstEvalResults.results).toHaveLength(1);
expect(firstEvalResults.results[0].key).toEqual("key");
expect(firstEvalResults.results[0].score).toEqual(1);
expect(evalRes.results[1].run).toBeDefined();
expect(evalRes.results[1].example).toBeDefined();
expect(evalRes.results[1].evaluationResults).toBeDefined();
const secondRun = evalRes.results[1].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(secondRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(secondRun.outputs?.foo).toBeLessThanOrEqual(3);
const secondExample = evalRes.results[1].example;
expect(secondExample).toBeDefined();
const secondEvalResults = evalRes.results[1].evaluationResults;
expect(secondEvalResults.results).toHaveLength(1);
expect(secondEvalResults.results[0].key).toEqual("key");
expect(secondEvalResults.results[0].score).toEqual(1);
// Test runs & examples were passed to customEvaluator
const expectedCommentStrings = [
`Run: ${secondRun.id} Example: ${secondExample?.id}`,
`Run: ${firstRun.id} Example: ${firstExample?.id}`,
];
const receivedCommentStrings = evalRes.results
.map(({ evaluationResults }) => evaluationResults.results[0].comment)
.filter((c): c is string => !!c);
expect(receivedCommentStrings.length).toBe(2);
expect(receivedCommentStrings).toEqual(
expect.arrayContaining(expectedCommentStrings)
);
});
test("evaluate can evaluate with summary evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customSummaryEvaluator = (
runs: Run[],
examples?: Example[]
): Promise<EvaluationResult> => {
const runIds = runs.map(({ id }) => id).join(", ");
const exampleIds = examples?.map(({ id }) => id).join(", ");
return Promise.resolve({
key: "MyCustomScore",
score: 1,
comment: `Runs: ${runIds} Examples: ${exampleIds}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
summaryEvaluators: [customSummaryEvaluator],
description: "evaluate can evaluate with summary evaluators",
});
expect(evalRes.summaryResults.results).toHaveLength(1);
expect(evalRes.summaryResults.results[0].key).toBe("MyCustomScore");
expect(evalRes.summaryResults.results[0].score).toBe(1);
const allRuns = evalRes.results.map(({ run }) => run);
const allExamples = evalRes.results.map(({ example }) => example);
const runIds = allRuns.map(({ id }) => id).join(", ");
const exampleIds = allExamples.map(({ id }) => id).join(", ");
expect(evalRes.summaryResults.results[0].comment).toBe(
`Runs: ${runIds} Examples: ${exampleIds}`
);
expect(evalRes.results).toHaveLength(2);
expect(evalRes.results[0].run).toBeDefined();
expect(evalRes.results[0].example).toBeDefined();
expect(evalRes.results[0].evaluationResults).toBeDefined();
const firstRun = evalRes.results[0].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(firstRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(firstRun.outputs?.foo).toBeLessThanOrEqual(3);
expect(evalRes.results[1].run).toBeDefined();
expect(evalRes.results[1].example).toBeDefined();
expect(evalRes.results[1].evaluationResults).toBeDefined();
const secondRun = evalRes.results[1].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(secondRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(secondRun.outputs?.foo).toBeLessThanOrEqual(3);
});
test.skip("can iterate over evaluate results", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluator = async (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evaluator = {
evaluateRun: customEvaluator,
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [evaluator],
description: "can iterate over evaluate results",
});
for await (const item of evalRes) {
console.log("item", item);
}
});
test("can pass multiple evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluatorOne = async (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const customEvaluatorTwo = async (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evaluators = [
{ evaluateRun: customEvaluatorOne },
{ evaluateRun: customEvaluatorTwo },
];
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: evaluators,
description: "can pass multiple evaluators",
});
expect(evalRes.results).toHaveLength(2);
const firstEvalResults = evalRes.results[0];
expect(firstEvalResults.evaluationResults.results).toHaveLength(2);
const firstRun = firstEvalResults.run;
const firstExample = firstEvalResults.example;
const receivedCommentStrings = firstEvalResults.evaluationResults.results
.map(({ comment }) => comment)
.filter((c): c is string => !!c);
const expectedCommentStrings = `Run: ${firstRun.id} Example: ${firstExample?.id}`;
// Checks that both evaluators were called with the expected run and example
expect(receivedCommentStrings).toEqual(
expect.arrayContaining([expectedCommentStrings, expectedCommentStrings])
);
});
test("split info saved correctly", async () => {
const client = new Client();
// create a new dataset
await client.createDataset(TESTING_DATASET_NAME2, {
description:
"For testing purposed. Is created & deleted for each test run.",
});
// create examples
await client.createExamples({
inputs: [{ input: 1 }, { input: 2 }, { input: 3 }],
outputs: [{ output: 2 }, { output: 3 }, { output: 4 }],
splits: [["test"], ["train"], ["validation", "test"]],
datasetName: TESTING_DATASET_NAME2,
});
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
await evaluate(targetFunc, {
data: client.listExamples({ datasetName: TESTING_DATASET_NAME2 }),
description: "splits info saved correctly",
});
const exp = client.listProjects({
referenceDatasetName: TESTING_DATASET_NAME2,
});
let myExp: TracerSession | null = null;
for await (const session of exp) {
myExp = session;
}
expect(myExp?.extra?.metadata?.dataset_splits.sort()).toEqual(
["test", "train", "validation"].sort()
);
await evaluate(targetFunc, {
data: client.listExamples({
datasetName: TESTING_DATASET_NAME2,
splits: ["test"],
}),
description: "splits info saved correctly",
});
const exp2 = client.listProjects({
referenceDatasetName: TESTING_DATASET_NAME2,
});
let myExp2: TracerSession | null = null;
for await (const session of exp2) {
if (myExp2 === null || session.start_time > myExp2.start_time) {
myExp2 = session;
}
}
expect(myExp2?.extra?.metadata?.dataset_splits.sort()).toEqual(
["test", "validation"].sort()
);
await evaluate(targetFunc, {
data: client.listExamples({
datasetName: TESTING_DATASET_NAME2,
splits: ["train"],
}),
description: "splits info saved correctly",
});
const exp3 = client.listProjects({
referenceDatasetName: TESTING_DATASET_NAME2,
});
let myExp3: TracerSession | null = null;
for await (const session of exp3) {
if (myExp3 === null || session.start_time > myExp3.start_time) {
myExp3 = session;
}
}
expect(myExp3?.extra?.metadata?.dataset_splits.sort()).toEqual(
["train"].sort()
);
});
test("can pass multiple summary evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customSummaryEvaluator = (
runs: Run[],
examples?: Example[]
): Promise<EvaluationResult> => {
const runIds = runs.map(({ id }) => id).join(", ");
const exampleIds = examples?.map(({ id }) => id).join(", ");
return Promise.resolve({
key: "key",
score: 1,
comment: `Runs: ${runIds} Examples: ${exampleIds}`,
});
};
const summaryEvaluators = [customSummaryEvaluator, customSummaryEvaluator];
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
summaryEvaluators,
description: "can pass multiple summary evaluators",
});
expect(evalRes.results).toHaveLength(2);
const allRuns = evalRes.results.map(({ run }) => run);
const allExamples = evalRes.results.map(({ example }) => example);
const runIds = allRuns.map(({ id }) => id).join(", ");
const exampleIds = allExamples.map(({ id }) => id).join(", ");
const summaryResults = evalRes.summaryResults.results;
expect(summaryResults).toHaveLength(2);
const receivedCommentStrings = summaryResults
.map(({ comment }) => comment)
.filter((c): c is string => !!c);
const expectedCommentString = `Runs: ${runIds} Examples: ${exampleIds}`;
// Checks that both evaluators were called with the expected run and example
expect(receivedCommentStrings).toEqual(
expect.arrayContaining([expectedCommentString, expectedCommentString])
);
});
test("can pass AsyncIterable of Example's to evaluator instead of dataset name", async () => {
const client = new Client();
const examplesIterator = client.listExamples({
datasetName: TESTING_DATASET_NAME,
});
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluator = (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: examplesIterator,
evaluators: [customEvaluator],
description: "can pass AsyncIterable of Example's to evaluator",
});
const firstEvalResults = evalRes.results[0];
const runId = firstEvalResults.run.id;
const exampleId = firstEvalResults.example.id;
const expectedCommentStrings = `Run: ${runId} Example: ${exampleId}`;
const receivedCommentStrings =
firstEvalResults.evaluationResults.results[0].comment;
expect(evalRes.results).toHaveLength(2);
expect(firstEvalResults.evaluationResults.results).toHaveLength(1);
expect(receivedCommentStrings).toEqual(expectedCommentStrings);
});
test("max concurrency works with custom evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluator = (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [customEvaluator],
maxConcurrency: 1,
description: "max concurrency works with custom evaluators",
});
expect(evalRes.results).toHaveLength(2);
const firstEvalResults = evalRes.results[0];
const runId = firstEvalResults.run.id;
const exampleId = firstEvalResults.example.id;
const expectedCommentStrings = `Run: ${runId} Example: ${exampleId}`;
const receivedCommentStrings =
firstEvalResults.evaluationResults.results[0].comment;
expect(evalRes.results).toHaveLength(2);
expect(firstEvalResults.evaluationResults.results).toHaveLength(1);
expect(receivedCommentStrings).toEqual(expectedCommentStrings);
});
test("max concurrency works with summary evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customSummaryEvaluator = (
runs: Run[],
examples?: Example[]
): Promise<EvaluationResult> => {
const runIds = runs.map(({ id }) => id).join(", ");
const exampleIds = examples?.map(({ id }) => id).join(", ");
return Promise.resolve({
key: "key",
score: 1,
comment: `Runs: ${runIds} Examples: ${exampleIds}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
summaryEvaluators: [customSummaryEvaluator],
maxConcurrency: 1,
description: "max concurrency works with summary evaluators",
});
expect(evalRes.results).toHaveLength(2);
const allRuns = evalRes.results.map(({ run }) => run);
const allExamples = evalRes.results.map(({ example }) => example);
const runIds = allRuns.map(({ id }) => id).join(", ");
const exampleIds = allExamples.map(({ id }) => id).join(", ");
const summaryResults = evalRes.summaryResults.results;
expect(summaryResults).toHaveLength(1);
const receivedCommentStrings = summaryResults[0].comment;
const expectedCommentString = `Runs: ${runIds} Examples: ${exampleIds}`;
// Checks that both evaluators were called with the expected run and example
expect(receivedCommentStrings).toEqual(expectedCommentString);
});
test.skip("Target func can be a runnable", async () => {
const targetFunc = RunnableSequence.from([
RunnableLambda.from((input: Record<string, any>) => ({
foo: input.input + 1,
})).withConfig({ runName: "First Step" }),
RunnableLambda.from((input: { foo: number }) => ({
foo: input.foo + 1,
})).withConfig({ runName: "Second Step" }),
]);
const customEvaluator = async (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evaluator = {
evaluateRun: customEvaluator,
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [evaluator],
description: "Target func can be a runnable",
});
expect(evalRes.results).toHaveLength(2);
expect(evalRes.results[0].run).toBeDefined();
expect(evalRes.results[0].example).toBeDefined();
expect(evalRes.results[0].evaluationResults).toBeDefined();
const firstRun = evalRes.results[0].run;
// The examples are not always in the same order, so it should always be 2 or 3
expect(firstRun.outputs?.foo).toBeGreaterThanOrEqual(2);
expect(firstRun.outputs?.foo).toBeLessThanOrEqual(3);
const firstExample = evalRes.results[0].example;
expect(firstExample).toBeDefined();
const firstEvalResults = evalRes.results[0].evaluationResults;
expect(firstEvalResults.results).toHaveLength(1);
expect(firstEvalResults.results[0].key).toEqual("key");
expect(firstEvalResults.results[0].score).toEqual(1);
// check if the evaluated function has valid children
const gatheredChildRunNames = [];
const queue = [firstRun];
const visited = new Set<string>();
while (queue.length > 0) {
const current = queue.shift();
if (!current || visited.has(current.id)) continue;
visited.add(current.id);
if (current.child_runs) {
gatheredChildRunNames.push(...current.child_runs.map((run) => run.name));
queue.push(...current.child_runs);
}
}
expect(gatheredChildRunNames).toEqual(
expect.arrayContaining(["RunnableSequence", "First Step", "Second Step"])
);
});
test("evaluate can accept array of examples", async () => {
const client = new Client();
const examplesIterator = client.listExamples({
datasetName: TESTING_DATASET_NAME,
});
const examples: Example[] = [];
for await (const example of examplesIterator) {
examples.push(example);
}
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customEvaluator = (run: Run, example?: Example) => {
return Promise.resolve({
key: "key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: examples,
evaluators: [customEvaluator],
description: "evaluate can accept array of examples",
});
const firstEvalResults = evalRes.results[0];
const runId = firstEvalResults.run.id;
const exampleId = firstEvalResults.example.id;
const expectedCommentStrings = `Run: ${runId} Example: ${exampleId}`;
const receivedCommentStrings =
firstEvalResults.evaluationResults.results[0].comment;
expect(evalRes.results).toHaveLength(2);
expect(firstEvalResults.evaluationResults.results).toHaveLength(1);
expect(receivedCommentStrings).toEqual(expectedCommentStrings);
});
test("evaluate accepts evaluators which return multiple feedback keys", async () => {
const targetFunc = (input: Record<string, any>) => {
return { foo: input.input + 1 };
};
const customEvaluator = (
run: Run,
example?: Example
): Promise<EvaluationResults> => {
return Promise.resolve({
results: [
{
key: "first-key",
score: 1,
comment: `Run: ${run.id} Example: ${example?.id}`,
},
{
key: "second-key",
score: 2,
comment: `Run: ${run.id} Example: ${example?.id}`,
},
],
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [customEvaluator],
description: "evaluate can evaluate with custom evaluators",
});
expect(evalRes.results).toHaveLength(2);
const comment = `Run: ${evalRes.results[0].run.id} Example: ${evalRes.results[0].example.id}`;
expect(evalRes.results[0].evaluationResults.results).toMatchObject([
{ key: "first-key", score: 1, comment },
{ key: "second-key", score: 2, comment },
]);
});
test("evaluate can handle evaluators with object parameters", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const objectEvaluator = ({
inputs,
outputs,
referenceOutputs,
}: {
inputs?: Record<string, any>;
outputs?: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => {
return {
key: "object_evaluator",
score: outputs?.foo === referenceOutputs?.output ? 1 : 0,
comment: `Input: ${inputs?.input}, Output: ${outputs?.foo}, Expected: ${referenceOutputs?.output}`,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [objectEvaluator],
description: "evaluate can handle evaluators with object parameters",
});
expect(evalRes.results).toHaveLength(2);
// Check first result
const firstResult = evalRes.results[0];
expect(firstResult.evaluationResults.results).toHaveLength(1);
const firstEval = firstResult.evaluationResults.results[0];
expect(firstEval.key).toBe("object_evaluator");
expect(firstEval.score).toBeDefined();
expect(firstEval.comment).toContain("Input:");
expect(firstEval.comment).toContain("Output:");
expect(firstEval.comment).toContain("Expected:");
// Check second result
const secondResult = evalRes.results[1];
expect(secondResult.evaluationResults.results).toHaveLength(1);
const secondEval = secondResult.evaluationResults.results[0];
expect(secondEval.key).toBe("object_evaluator");
expect(secondEval.score).toBeDefined();
expect(secondEval.comment).toContain("Input:");
expect(secondEval.comment).toContain("Output:");
expect(secondEval.comment).toContain("Expected:");
});
test("evaluate can mix evaluators with different parameter styles", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
// Traditional style evaluator
const traditionalEvaluator = (run: Run, example?: Example) => {
return {
key: "traditional",
score: run.outputs?.foo === example?.outputs?.output ? 1 : 0,
};
};
// Object style evaluator
const objectEvaluator = ({
outputs,
referenceOutputs,
}: {
outputs?: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => {
return {
key: "object_style",
score: outputs?.foo === referenceOutputs?.output ? 1 : 0,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [traditionalEvaluator, objectEvaluator],
description: "evaluate can mix evaluators with different parameter styles",
});
expect(evalRes.results).toHaveLength(2);
// Check both evaluators ran for each example
for (const result of evalRes.results) {
expect(result.evaluationResults.results).toHaveLength(2);
const traditionalResult = result.evaluationResults.results.find(
(r) => r.key === "traditional"
);
expect(traditionalResult).toBeDefined();
expect(typeof traditionalResult?.score).toBe("number");
const objectResult = result.evaluationResults.results.find(
(r) => r.key === "object_style"
);
expect(objectResult).toBeDefined();
expect(typeof objectResult?.score).toBe("number");
}
});
test("evaluate handles partial object parameters correctly", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
// Evaluator that only uses outputs and referenceOutputs
const outputOnlyEvaluator = ({
outputs,
referenceOutputs,
}: {
outputs?: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => {
return {
key: "output_only",
score: outputs?.foo === referenceOutputs?.output ? 1 : 0,
};
};
// Evaluator that only uses run and example
const runOnlyEvaluator = ({
run,
example,
}: {
run?: Run;
example?: Example;
}) => {
return {
key: "run_only",
score: run?.outputs?.foo === example?.outputs?.output ? 1 : 0,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [outputOnlyEvaluator, runOnlyEvaluator],
description: "evaluate handles partial object parameters correctly",
});
expect(evalRes.results).toHaveLength(2);
// Check both evaluators ran for each example
for (const result of evalRes.results) {
expect(result.evaluationResults.results).toHaveLength(2);
const outputResult = result.evaluationResults.results.find(
(r) => r.key === "output_only"
);
expect(outputResult).toBeDefined();
expect(typeof outputResult?.score).toBe("number");
const runResult = result.evaluationResults.results.find(
(r) => r.key === "run_only"
);
expect(runResult).toBeDefined();
expect(typeof runResult?.score).toBe("number");
}
});
test("evaluate handles async object-style evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const asyncEvaluator = async ({
outputs,
referenceOutputs,
}: {
outputs?: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => {
// Simulate async operation
await new Promise((resolve) => setTimeout(resolve, 10));
return {
key: "async_evaluator",
score: outputs?.foo === referenceOutputs?.output ? 1 : 0,
};
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
evaluators: [asyncEvaluator],
description: "evaluate handles async object-style evaluators",
});
expect(evalRes.results).toHaveLength(2);
for (const result of evalRes.results) {
expect(result.evaluationResults.results).toHaveLength(1);
const evalResult = result.evaluationResults.results[0];
expect(evalResult.key).toBe("async_evaluator");
expect(typeof evalResult.score).toBe("number");
}
});
test("evaluate can evaluate with updated summary evaluators", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const customSummaryEvaluator = ({
runs,
examples,
inputs,
outputs,
referenceOutputs,
}: {
runs?: Run[];
examples?: Example[];
inputs?: Record<string, any>[];
outputs?: Record<string, any>[];
referenceOutputs?: Record<string, any>[];
}): Promise<EvaluationResult> => {
const runIds = runs?.map(({ id }) => id).join(", ") || "";
const exampleIds = examples?.map(({ id }) => id).join(", ");
const inputValues = inputs?.map((input) => input.input).join(", ");
const outputValues = outputs?.map((output) => output.foo).join(", ");
const referenceOutputValues = referenceOutputs
?.map((ref) => ref.output)
.join(", ");
return Promise.resolve({
key: "UpdatedSummaryEvaluator",
score: 1,
comment: `Runs: ${runIds} Examples: ${exampleIds} Inputs: ${inputValues} Outputs: ${outputValues} ReferenceOutputs: ${referenceOutputValues}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
summaryEvaluators: [customSummaryEvaluator],
description: "evaluate can evaluate with updated summary evaluators",
});
expect(evalRes.summaryResults.results).toHaveLength(1);
expect(evalRes.summaryResults.results[0].key).toBe("UpdatedSummaryEvaluator");
expect(evalRes.summaryResults.results[0].score).toBe(1);
const allRuns = evalRes.results.map(({ run }) => run);
const allExamples = evalRes.results.map(({ example }) => example);
const allInputs = evalRes.results.map(({ example }) => example.inputs);
const allOutputs = evalRes.results.map(({ run }) => run.outputs);
const allReferenceOutputs = evalRes.results.map(
({ example }) => example.outputs
);
const runIds = allRuns.map(({ id }) => id).join(", ");
const exampleIds = allExamples.map(({ id }) => id).join(", ");
const inputValues = allInputs.map((input) => input.input).join(", ");
const outputValues = allOutputs.map((output) => output?.foo).join(", ");
const referenceOutputValues = allReferenceOutputs
.map((ref) => ref?.output)
.join(", ");
expect(evalRes.summaryResults.results[0].comment).toBe(
`Runs: ${runIds} Examples: ${exampleIds} Inputs: ${inputValues} Outputs: ${outputValues} ReferenceOutputs: ${referenceOutputValues}`
);
});
test("evaluate handles partial summary evaluator parameters correctly", async () => {
const targetFunc = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
// Summary evaluator that only uses inputs, outputs, and referenceOutputs
const outputOnlySummaryEvaluator = ({
inputs,
outputs,
referenceOutputs,
}: {
inputs?: Record<string, any>[];
outputs?: Record<string, any>[];
referenceOutputs?: Record<string, any>[];
}): Promise<EvaluationResult> => {
const inputValues = inputs?.map((input) => input.input).join(", ") || "";
const outputValues = outputs?.map((output) => output.foo).join(", ") || "";
const referenceOutputValues = referenceOutputs
?.map((ref) => ref?.output)
.join(", ");
// Calculate average difference between outputs and reference outputs
const avgDiff =
outputs?.reduce((sum, output, i) => {
return sum + Math.abs(output?.foo - referenceOutputs?.[i]?.output);
}, 0) || 0;
return Promise.resolve({
key: "OutputOnlySummaryEvaluator",
score: avgDiff === 0 ? 1 : 0,
comment: `Inputs: ${inputValues} Outputs: ${outputValues} ReferenceOutputs: ${referenceOutputValues} AvgDiff: ${avgDiff}`,
});
};
const evalRes = await evaluate(targetFunc, {
data: TESTING_DATASET_NAME,
summaryEvaluators: [outputOnlySummaryEvaluator],
description: "evaluate handles partial summary evaluator parameters",
});
expect(evalRes.summaryResults.results).toHaveLength(1);
const summaryResult = evalRes.summaryResults.results[0];
expect(summaryResult.key).toBe("OutputOnlySummaryEvaluator");
expect(typeof summaryResult.score).toBe("number");
// Verify the comment contains all the expected parts
const allInputs = evalRes.results.map(({ example }) => example.inputs);
const allOutputs = evalRes.results.map(({ run }) => run.outputs);
const allReferenceOutputs = evalRes.results.map(
({ example }) => example.outputs
);
const inputValues = allInputs.map((input) => input.input).join(", ");
const outputValues = allOutputs.map((output) => output?.foo).join(", ");
const referenceOutputValues = allReferenceOutputs
.map((ref) => ref?.output)
.join(", ");
// Calculate expected average difference
const expectedAvgDiff =
allOutputs.reduce((sum, output, i) => {
return sum + Math.abs(output?.foo - allReferenceOutputs[i]?.output);
}, 0) / allOutputs.length;
expect(summaryResult.comment).toBe(
`Inputs: ${inputValues} Outputs: ${outputValues} ReferenceOutputs: ${referenceOutputValues} AvgDiff: ${expectedAvgDiff}`
);
});
test("evaluate handles comparative target with ComparativeEvaluateOptions", async () => {
const client = new Client();
// First, create two experiments to compare
const targetFunc1 = (input: Record<string, any>) => {
return {
foo: input.input + 1,
};
};
const targetFunc2 = (input: Record<string, any>) => {
return {
foo: input.input + 2,
};
};
// Run initial experiments
const exp1 = await evaluate(targetFunc1, {
data: TESTING_DATASET_NAME,
description: "First experiment for comparison",
});
const exp2 = await evaluate(targetFunc2, {
data: TESTING_DATASET_NAME,
description: "Second experiment for comparison",
});
await Promise.all(
[exp1, exp2].flatMap(({ results }) =>
results.flatMap(({ run }) => waitUntilRunFound(client, run.id))
)
);
// Create comparative evaluator
const comparativeEvaluator = ({
runs,
example,
}: {
runs: Run[];
example: Example;
}) => {
if (!runs || !example) throw new Error("Missing required parameters");
// Compare outputs from both runs
const scores = Object.fromEntries(
runs.map((run) => [
run.id,
run.outputs?.foo === example.outputs?.output ? 1 : 0,
])
);
return {
key: "comparative_score",
scores,
};
};
// Run comparative evaluation
const compareRes = await evaluate(
[exp1.experimentName, exp2.experimentName],
{
evaluators: [comparativeEvaluator],
description: "Comparative evaluation test",
randomizeOrder: true,
loadNested: false,
}
);
// Verify we got ComparisonEvaluationResults
expect(compareRes.experimentName).toBeDefined();
expect(compareRes.experimentName).toBeDefined();
expect(compareRes.results).toBeDefined();
expect(Array.isArray(compareRes.results)).toBe(true);
// Check structure of comparison results
for (const result of compareRes.results) {
expect(result.key).toBe("comparative_score");
expect(result.scores).toBeDefined();
expect(Object.keys(result.scores)).toHaveLength(2); // Should have scores for both experiments
}
});
test("evaluate enforces correct evaluator types for comparative evaluation at runtime", async () => {
const exp1 = await evaluate(
(input: Record<string, any>) => ({ foo: input.input + 1 }),
{
data: TESTING_DATASET_NAME,
}
);
const exp2 = await evaluate(
(input: Record<string, any>) => ({ foo: input.input + 2 }),
{
data: TESTING_DATASET_NAME,
}
);
// Create a standard evaluator (wrong type)
const standardEvaluator = (run: Run, example: Example) => ({
key: "standard",
score: run.outputs?.foo === example.outputs?.output ? 1 : 0,
});
await expect(
// @ts-expect-error - Should error because standardEvaluator is not a ComparativeEvaluator
evaluate([exp1.experimentName, exp2.experimentName], {
evaluators: [standardEvaluator],
description: "Should fail at runtime",
})
).rejects.toThrow(); // You might want to be more specific about the error message
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/evaluate_comparative.int.test.ts | import { evaluate } from "../evaluation/_runner.js";
import {
evaluateComparative,
_ComparativeEvaluator,
} from "../evaluation/evaluate_comparative.js";
import { Client } from "../index.js";
import { Run } from "../schemas.js";
import { waitUntilRunFound } from "./utils.js";
import { v4 as uuidv4 } from "uuid";
const TESTING_DATASET_NAME = `test_evaluate_comparative_js_${uuidv4()}`;
beforeAll(async () => {
const client = new Client();
if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) {
await client.createDataset(TESTING_DATASET_NAME, {
description: "For testing pruposes",
});
await client.createExamples({
inputs: [{ input: 1 }, { input: 2 }],
outputs: [{ output: 2 }, { output: 3 }],
datasetName: TESTING_DATASET_NAME,
});
}
});
afterAll(async () => {
const client = new Client();
await client.deleteDataset({ datasetName: TESTING_DATASET_NAME });
});
describe("evaluate comparative", () => {
test("basic", async () => {
const client = new Client();
const firstEval = await evaluate(
(input) => ({ foo: `first:${input.input}` }),
{ data: TESTING_DATASET_NAME }
);
const secondEval = await evaluate(
(input) => ({ foo: `second:${input.input}` }),
{ data: TESTING_DATASET_NAME }
);
await Promise.all(
[firstEval, secondEval].flatMap(({ results }) =>
results.flatMap(({ run }) => waitUntilRunFound(client, run.id))
)
);
const pairwise = await evaluateComparative(
[firstEval.experimentName, secondEval.experimentName],
{
evaluators: [
({ runs }: { runs?: Run[] }) => ({
key: "latter_precedence",
scores: Object.fromEntries(
runs?.map((run, i) => [run.id, i % 2]) ?? []
),
}),
],
}
);
expect(pairwise.results.length).toEqual(2);
});
test("pass directly", async () => {
const pairwise = await evaluateComparative(
[
evaluate((input) => ({ foo: `first:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
evaluate((input) => ({ foo: `second:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
],
{
evaluators: [
({ runs }: { runs?: Run[] }) => ({
key: "latter_precedence",
scores: Object.fromEntries(
runs?.map((run, i) => [run.id, i % 2]) ?? []
),
}),
],
}
);
expect(pairwise.results.length).toEqual(2);
});
describe("evaluator formats", () => {
test("old format evaluator", async () => {
const pairwise = await evaluateComparative(
[
evaluate((input) => ({ foo: `first:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
evaluate((input) => ({ foo: `second:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
],
{
evaluators: [
// Old format evaluator
(runs, example) => ({
key: "old_format",
scores: Object.fromEntries(
runs.map((run) => [
run.id,
run.outputs?.foo === `second:${example.inputs.input}` ? 1 : 0,
])
),
}),
],
}
);
expect(pairwise.results.length).toEqual(2);
expect(pairwise.results[0].key).toBe("old_format");
// Second run in each pair should have score of 1
expect(Object.values(pairwise.results[0].scores)).toEqual([0, 1]);
});
test("new format evaluator", async () => {
const matchesSecondEvaluator: _ComparativeEvaluator = ({
runs,
inputs,
outputs,
}: {
runs?: Run[];
inputs?: Record<string, any>;
outputs?: Record<string, any>[];
}) => ({
key: "new_format",
scores: Object.fromEntries(
// Add null checks for the optional parameters
runs?.map((run, i) => [
run.id,
outputs?.[i]?.foo === `second:${inputs?.input}` ? 1 : 0,
]) ?? []
),
});
const pairwise = await evaluateComparative(
[
evaluate((input) => ({ foo: `first:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
evaluate((input) => ({ foo: `second:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
],
{
evaluators: [matchesSecondEvaluator],
}
);
expect(pairwise.results.length).toEqual(2);
expect(pairwise.results[0].key).toBe("new_format");
// Second run in each pair should have score of 1
expect(Object.values(pairwise.results[0].scores)).toEqual([0, 1]);
});
test("mixed old and new format evaluators", async () => {
const matchesSecondEvaluator: _ComparativeEvaluator = ({
runs,
inputs,
outputs,
}: {
runs?: Run[];
inputs?: Record<string, any>;
outputs?: Record<string, any>[];
}) => ({
key: "new_format",
scores: Object.fromEntries(
runs?.map((run, i) => [
run.id,
outputs?.[i]?.foo === `second:${inputs?.input}` ? 1 : 0,
]) ?? []
),
});
const pairwise = await evaluateComparative(
[
evaluate((input) => ({ foo: `first:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
evaluate((input) => ({ foo: `second:${input.input}` }), {
data: TESTING_DATASET_NAME,
}),
],
{
evaluators: [
// Old format
(runs, example) => ({
key: "old_format",
scores: Object.fromEntries(
runs.map((run) => [
run.id,
run.outputs?.foo === `second:${example.inputs.input}` ? 1 : 0,
])
),
}),
// New format
matchesSecondEvaluator,
],
}
);
expect(pairwise.results.length).toEqual(4); // 2 examples × 2 evaluators
expect(pairwise.results.map((r) => r.key)).toContain("old_format");
expect(pairwise.results.map((r) => r.key)).toContain("new_format");
// Each evaluator should score the second run as 1
pairwise.results.forEach((result) => {
expect(Object.values(result.scores)).toEqual([0, 1]);
});
});
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/client.int.test.ts | import { Dataset, Example, Run, TracerSession } from "../schemas.js";
import {
FunctionMessage,
HumanMessage,
SystemMessage,
} from "@langchain/core/messages";
import { Client } from "../client.js";
import { v4 as uuidv4 } from "uuid";
import {
createRunsFactory,
deleteDataset,
deleteProject,
toArray,
waitUntil,
} from "./utils.js";
import { ChatPromptTemplate, PromptTemplate } from "@langchain/core/prompts";
import { ChatOpenAI } from "@langchain/openai";
import { RunnableSequence } from "@langchain/core/runnables";
import { load } from "langchain/load";
import { _getFetchImplementation } from "../singletons/fetch.js";
type CheckOutputsType = boolean | ((run: Run) => boolean);
async function waitUntilRunFound(
client: Client,
runId: string,
checkOutputs: CheckOutputsType = false
) {
return waitUntil(
async () => {
try {
const run = await client.readRun(runId);
if (checkOutputs) {
const hasOutputs = run.outputs !== null && run.outputs !== undefined;
if (typeof checkOutputs === "boolean") {
return hasOutputs;
} else if (typeof checkOutputs === "function") {
return hasOutputs && checkOutputs(run);
}
}
return true;
} catch (e) {
return false;
}
},
210_000,
5_000
);
}
// Test Dataset Creation, List, Read, Delete + upload CSV
// Test Example Creation, List, Read, Update, Delete
test.concurrent("Test LangSmith Client Dataset CRD", async () => {
const client = new Client({ autoBatchTracing: false });
const csvContent = `col1,col2,col3,col4\nval1,val2,val3,val4`;
const blobData = new Blob([Buffer.from(csvContent)]);
const description = "Test Dataset";
const inputKeys = ["col1", "col3"];
const outputKeys = ["col2", "col4"];
const fileName = "__some_file.int.csv";
await deleteDataset(client, fileName);
const newDataset = await client.uploadCsv({
csvFile: blobData,
fileName: fileName,
description,
inputKeys,
outputKeys,
});
expect(newDataset).toHaveProperty("id");
expect(newDataset.description).toBe(description);
const dataset = await client.readDataset({ datasetId: newDataset.id });
const datasetId = dataset.id;
const dataset2 = await client.readDataset({ datasetId });
expect(dataset.id).toBe(dataset2.id);
const datasets = await toArray(
client.listDatasets({ datasetName: fileName })
);
expect(datasets.length).toBeGreaterThan(0);
expect(datasets.map((d) => d.id)).toContain(datasetId);
const example = await client.createExample(
{ col1: "addedExampleCol1" },
{ col2: "addedExampleCol2" },
{ datasetId: newDataset.id, split: "my_split" }
);
const exampleValue = await client.readExample(example.id);
expect(exampleValue.inputs.col1).toBe("addedExampleCol1");
expect(exampleValue.outputs?.col2).toBe("addedExampleCol2");
const examples = await toArray(
client.listExamples({ datasetId: newDataset.id })
);
expect(examples.length).toBe(2);
expect(examples.map((e) => e.id)).toContain(example.id);
const _examples = await toArray(
client.listExamples({ datasetId: newDataset.id, splits: ["my_split"] })
);
expect(_examples.length).toBe(1);
expect(_examples.map((e) => e.id)).toContain(example.id);
await client.updateExample(example.id, {
inputs: { col1: "updatedExampleCol1" },
outputs: { col2: "updatedExampleCol2" },
split: ["my_split2"],
});
// Says 'example updated' or something similar
const newExampleValue = await client.readExample(example.id);
expect(newExampleValue.inputs.col1).toBe("updatedExampleCol1");
expect(newExampleValue.metadata?.dataset_split).toStrictEqual(["my_split2"]);
await client.updateExample(example.id, {
inputs: { col1: "updatedExampleCol3" },
outputs: { col2: "updatedExampleCol4" },
split: "my_split3",
});
// Says 'example updated' or something similar
const newExampleValue2 = await client.readExample(example.id);
expect(newExampleValue2.inputs.col1).toBe("updatedExampleCol3");
expect(newExampleValue2.metadata?.dataset_split).toStrictEqual(["my_split3"]);
const newExample = await client.createExample(
{ col1: "newAddedExampleCol1" },
{ col2: "newAddedExampleCol2" },
{ datasetId: newDataset.id }
);
const newExampleValue_ = await client.readExample(newExample.id);
expect(newExampleValue_.inputs.col1).toBe("newAddedExampleCol1");
expect(newExampleValue_.outputs?.col2).toBe("newAddedExampleCol2");
await client.updateExamples([
{
id: newExample.id,
inputs: { col1: "newUpdatedExampleCol1" },
outputs: { col2: "newUpdatedExampleCol2" },
metadata: { foo: "baz" },
},
{
id: example.id,
inputs: { col1: "newNewUpdatedExampleCol" },
outputs: { col2: "newNewUpdatedExampleCol2" },
metadata: { foo: "qux" },
},
]);
const updatedExample = await client.readExample(newExample.id);
expect(updatedExample.inputs.col1).toBe("newUpdatedExampleCol1");
expect(updatedExample.outputs?.col2).toBe("newUpdatedExampleCol2");
expect(updatedExample.metadata?.foo).toBe("baz");
const updatedExample2 = await client.readExample(example.id);
expect(updatedExample2.inputs.col1).toBe("newNewUpdatedExampleCol");
expect(updatedExample2.outputs?.col2).toBe("newNewUpdatedExampleCol2");
expect(updatedExample2.metadata?.foo).toBe("qux");
await client.deleteExample(example.id);
const examples2 = await toArray(
client.listExamples({ datasetId: newDataset.id })
);
expect(examples2.length).toBe(2);
await client.deleteDataset({ datasetId });
const rawDataset = await client.createDataset(fileName, {
description: "Test Dataset",
});
await client.deleteDataset({ datasetId: rawDataset.id });
});
test.concurrent(
"test create dataset",
async () => {
const langchainClient = new Client({ autoBatchTracing: false });
const datasetName = "__test_create_dataset JS";
const datasets = await toArray(
langchainClient.listDatasets({ datasetName })
);
datasets.map(async (dataset: Dataset) => {
if (dataset.name === datasetName) {
await langchainClient.deleteDataset({ datasetName });
}
});
const dataset = await langchainClient.createDataset(datasetName, {
dataType: "llm",
metadata: { key: "valuefoo" },
});
await langchainClient.createExample(
{ input: "hello world" },
{ output: "hi there" },
{
datasetId: dataset.id,
}
);
const loadedDataset = await langchainClient.readDataset({ datasetName });
expect(loadedDataset.data_type).toEqual("llm");
const datasetsByMetadata = await toArray(
langchainClient.listDatasets({ metadata: { key: "valuefoo" } })
);
expect(datasetsByMetadata.length).toEqual(1);
expect(datasetsByMetadata.map((d) => d.id)).toContain(dataset.id);
await langchainClient.deleteDataset({ datasetName });
},
180_000
);
test.concurrent(
"Test share and unshare run",
async () => {
const langchainClient = new Client({ autoBatchTracing: false });
// Create a new run
const runId = uuidv4();
await langchainClient.createRun({
name: "Test run",
inputs: { input: "hello world" },
run_type: "chain",
id: runId,
});
await waitUntilRunFound(langchainClient, runId);
const sharedUrl = await langchainClient.shareRun(runId);
const response = await _getFetchImplementation()(sharedUrl);
expect(response.status).toEqual(200);
expect(await langchainClient.readRunSharedLink(runId)).toEqual(sharedUrl);
await langchainClient.unshareRun(runId);
const sharedLink = await langchainClient.readRunSharedLink(runId);
expect(sharedLink).toBe(undefined);
},
180_000
);
test.concurrent(
"Test list datasets",
async () => {
const langchainClient = new Client({ autoBatchTracing: false });
const datasetName1 = "___TEST dataset1 JS";
const datasetName2 = "___TEST dataset2 JS";
await deleteDataset(langchainClient, datasetName1);
await deleteDataset(langchainClient, datasetName2);
// Create two new datasets
const dataset1 = await langchainClient.createDataset(datasetName1, {
dataType: "llm",
});
const dataset2 = await langchainClient.createDataset(datasetName2, {
dataType: "kv",
});
// List datasets by ID
const datasetsById: Dataset[] = [];
const datasetsByIdIterable = langchainClient.listDatasets({
datasetIds: [dataset1.id, dataset2.id],
});
for await (const dataset of datasetsByIdIterable) {
datasetsById.push(dataset);
}
expect(datasetsById).toHaveLength(2);
expect(datasetsById.map((dataset) => dataset.id)).toContain(dataset1.id);
expect(datasetsById.map((dataset) => dataset.id)).toContain(dataset2.id);
// List datasets by name
const datasetsByNameIterable = langchainClient.listDatasets({
datasetName: datasetName1,
});
const datasetsByName = [];
for await (const dataset of datasetsByNameIterable) {
datasetsByName.push(dataset);
}
expect(datasetsByName).toHaveLength(1);
expect(datasetsByName.map((dataset) => dataset.id)).toContain(dataset1.id);
await langchainClient.deleteDataset({ datasetId: dataset1.id });
await langchainClient.deleteDataset({ datasetId: dataset2.id });
},
180_000
);
test.concurrent(
"Test create feedback with source run",
async () => {
const langchainClient = new Client({ autoBatchTracing: false });
const projectName = "__test_create_feedback_with_source_run JS";
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { prompt: "hello world" },
outputs: { generation: "hi there" },
start_time: new Date().getTime(),
end_time: new Date().getTime(),
});
const runId2 = uuidv4();
await langchainClient.createRun({
id: runId2,
project_name: projectName,
name: "test_run_2",
run_type: "llm",
inputs: { prompt: "hello world 2" },
outputs: { generation: "hi there 2" },
start_time: new Date().getTime(),
end_time: new Date().getTime(),
});
await langchainClient.createFeedback(runId, "test_feedback", {
score: 0.5,
sourceRunId: runId2,
feedbackSourceType: "app",
});
},
180_000
);
test.concurrent(
"Test create run with masked inputs/outputs",
async () => {
const langchainClient = new Client({
hideInputs: true,
hideOutputs: true,
autoBatchTracing: false,
});
const projectName = "__test_create_run_with_masked_inputs_outputs JS";
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { prompt: "hello world" },
outputs: { generation: "hi there" },
start_time: new Date().getTime(),
end_time: new Date().getTime(),
});
const runId2 = uuidv4();
await langchainClient.createRun({
id: runId2,
project_name: projectName,
name: "test_run_2",
run_type: "llm",
inputs: { messages: "hello world 2" },
start_time: new Date().getTime(),
});
await langchainClient.updateRun(runId2, {
outputs: { generation: "hi there 2" },
end_time: new Date().getTime(),
});
await waitUntilRunFound(langchainClient, runId, false);
const run1 = await langchainClient.readRun(runId);
expect(Object.keys(run1.inputs ?? {})).toHaveLength(0);
expect(Object.keys(run1.outputs ?? {})).toHaveLength(0);
await waitUntilRunFound(langchainClient, runId2, false);
const run2 = await langchainClient.readRun(runId2);
expect(Object.keys(run2.inputs ?? {})).toHaveLength(0);
expect(Object.keys(run2.outputs ?? {})).toHaveLength(0);
},
240_000
);
test.concurrent(
"Test create run with revision id",
async () => {
const langchainClient = new Client({ autoBatchTracing: false });
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_REVISION_ID = "test_revision_id";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_OTHER_FIELD = "test_other_field";
// eslint-disable-next-line no-process-env
process.env.LANGCHAIN_OTHER_KEY = "test_other_key";
const projectName = "__test_create_run_with_revision_id JS";
await deleteProject(langchainClient, projectName);
const runId = uuidv4();
await langchainClient.createRun({
id: runId,
project_name: projectName,
name: "test_run_with_revision",
run_type: "llm",
inputs: { prompt: "hello world" },
outputs: { generation: "hi there" },
start_time: new Date().getTime(),
end_time: new Date().getTime(),
});
const runId2 = uuidv4();
await langchainClient.createRun({
id: runId2,
project_name: projectName,
name: "test_run_2_with_revision",
run_type: "llm",
inputs: { messages: "hello world 2" },
start_time: new Date().getTime(),
revision_id: "different_revision_id",
});
await waitUntilRunFound(
langchainClient,
runId,
(run: Run | undefined) => Object.keys(run?.outputs || {}).length !== 0
);
const run1 = await langchainClient.readRun(runId);
expect(run1.extra?.metadata?.revision_id).toEqual("test_revision_id");
expect(run1.extra?.metadata.LANGCHAIN_OTHER_FIELD).toEqual(
"test_other_field"
);
expect(run1.extra?.metadata.LANGCHAIN_OTHER_KEY).toBeUndefined();
expect(run1.extra?.metadata).not.toHaveProperty("LANGCHAIN_API_KEY");
await waitUntilRunFound(langchainClient, runId2);
const run2 = await langchainClient.readRun(runId2);
expect(run2.extra?.metadata?.revision_id).toEqual("different_revision_id");
expect(run2.extra?.metadata.LANGCHAIN_OTHER_FIELD).toEqual(
"test_other_field"
);
expect(run2.extra?.metadata.LANGCHAIN_OTHER_KEY).toBeUndefined();
expect(run2.extra?.metadata).not.toHaveProperty("LANGCHAIN_API_KEY");
},
180_000
);
describe("createChatExample", () => {
it("should convert LangChainBaseMessage objects to examples", async () => {
const langchainClient = new Client({
autoBatchTracing: false,
// Test the fetch options option
fetchOptions: { cache: "no-store" },
});
const datasetName = "__createChatExample-test-dataset JS";
await deleteDataset(langchainClient, datasetName);
const dataset = await langchainClient.createDataset(datasetName);
const input = [new HumanMessage({ content: "Hello, world!" })];
const generation = new FunctionMessage({
name: "foo",
content: "",
additional_kwargs: {
function_call: { arguments: "args", name: "foo" },
},
});
const options = { datasetId: dataset.id };
// Create the example from messages
await langchainClient.createChatExample(input, generation, options);
// Read the example
const examples = [];
for await (const example of langchainClient.listExamples({
datasetId: dataset.id,
})) {
examples.push(example);
}
expect(examples.length).toBe(1);
expect(examples[0].inputs).toEqual({
input: [
{
type: "human",
data: { content: "Hello, world!" },
},
],
});
expect(examples[0].outputs).toEqual({
output: {
type: "function",
data: {
content: "",
additional_kwargs: {
function_call: { arguments: "args", name: "foo" },
},
},
},
});
// Delete dataset
await langchainClient.deleteDataset({ datasetId: dataset.id });
}, 180_000);
});
test.concurrent(
"Test getRunUrl with run",
async () => {
const client = new Client({ autoBatchTracing: false });
const runId = uuidv4();
const run: Run = {
id: runId,
name: "foo",
run_type: "llm",
inputs: { input: "hello world" },
outputs: { output: "hi there" },
};
await client.createRun({ project_name: "foo", ...run });
await waitUntilRunFound(
client,
runId,
(run: Run | undefined) => Object.keys(run?.outputs || {}).length !== 0
);
const result = await client.getRunUrl({
run,
projectOpts: { projectName: "foo" },
});
expect(result).toContain(runId);
},
180_000
);
test.concurrent(
"Examples CRUD",
async () => {
const client = new Client({ autoBatchTracing: false });
const datasetName = "__test_examples_crud JS" + Date.now();
await deleteDataset(client, datasetName);
const dataset = await client.createDataset(datasetName);
const example = await client.createExample(
{ input: "hello world" },
{ output: "hi there" },
{
datasetId: dataset.id,
metadata: { key: "value" },
}
);
const exampleValue = await client.readExample(example.id);
const initialVersion = exampleValue.modified_at;
expect(exampleValue.inputs.input).toEqual("hello world");
expect(exampleValue?.outputs?.output).toEqual("hi there");
expect(exampleValue?.metadata?.key).toEqual("value");
// Update the example by modifying the metadata
await client.updateExample(example.id, {
metadata: { key: "new value" },
});
const updatedExampleValue = await client.readExample(example.id);
expect(updatedExampleValue?.metadata?.key).toEqual("new value");
// Create multiple
await client.createExamples({
inputs: [
{ input: "hello world 1" },
{ input: "hello world 2" },
{ input: "hello world 3" },
],
outputs: [
{ output: "hi there 1" },
{ output: "hi there 2" },
{ output: "hi there 3" },
],
metadata: [{ key: "value 1" }, { key: "value 2" }, { key: "value 3" }],
splits: ["train", "test", ["train", "validation"]],
datasetId: dataset.id,
});
const initialExamplesList = await toArray(
client.listExamples({ datasetId: dataset.id, asOf: initialVersion })
);
expect(initialExamplesList.length).toEqual(1);
const examplesList = await toArray(
client.listExamples({ datasetId: dataset.id })
);
expect(examplesList.length).toEqual(4);
const examplesListLimited = await toArray(
client.listExamples({ datasetId: dataset.id, limit: 2 })
);
expect(examplesListLimited.length).toEqual(2);
const examplesListOffset = await toArray(
client.listExamples({ datasetId: dataset.id, offset: 2 })
);
expect(examplesListOffset.length).toEqual(2);
const examplesListLimitedOffset = await toArray(
client.listExamples({ datasetId: dataset.id, limit: 1, offset: 2 })
);
expect(examplesListLimitedOffset.length).toEqual(1);
await client.deleteExample(example.id);
const examplesList2 = await toArray(
client.listExamples({ datasetId: dataset.id })
);
expect(examplesList2.length).toEqual(3);
const datasetDiff = await client.diffDatasetVersions({
datasetId: dataset.id,
fromVersion: initialVersion,
toVersion: "latest",
});
expect(datasetDiff.examples_added.length).toEqual(3);
expect(datasetDiff.examples_modified.length).toEqual(0);
expect(datasetDiff.examples_removed.length).toEqual(1);
// verify the example inputs, outputs, and metadata
const example1 = examplesList2.find(
(e) => e.inputs.input === "hello world 1"
);
expect(example1?.outputs?.output).toEqual("hi there 1");
expect(example1?.metadata?.key).toEqual("value 1");
expect(example1?.metadata?.dataset_split).toEqual(["train"]);
const example2 = examplesList2.find(
(e) => e.inputs.input === "hello world 2"
);
expect(example2?.outputs?.output).toEqual("hi there 2");
expect(example2?.metadata?.key).toEqual("value 2");
expect(example2?.metadata?.dataset_split).toEqual(["test"]);
const example3 = examplesList2.find(
(e) => e.inputs.input === "hello world 3"
);
expect(example3?.outputs?.output).toEqual("hi there 3");
expect(example3?.metadata?.key).toEqual("value 3");
expect(example3?.metadata?.dataset_split).toContain("train");
expect(example3?.metadata?.dataset_split).toContain("validation");
await client.createExample(
{ input: "hello world" },
{ output: "hi there" },
{
datasetId: dataset.id,
metadata: { foo: "bar", baz: "qux" },
}
);
let examplesList3 = await toArray(
client.listExamples({ datasetId: dataset.id, metadata: { foo: "bar" } })
);
expect(examplesList3.length).toEqual(1);
expect(examplesList3[0].metadata?.foo).toEqual("bar");
expect(examplesList3[0].metadata?.baz).toEqual("qux");
examplesList3 = await toArray(
client.listExamples({ datasetId: dataset.id, metadata: { foo: "qux" } })
);
expect(examplesList3.length).toEqual(0);
examplesList3 = await toArray(
client.listExamples({ datasetId: dataset.id, metadata: { baz: "qux" } })
);
expect(examplesList3.length).toEqual(1);
expect(examplesList3[0].metadata?.foo).toEqual("bar");
expect(examplesList3[0].metadata?.baz).toEqual("qux");
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
metadata: { foo: "bar", baz: "qux" },
})
);
expect(examplesList3.length).toEqual(1);
expect(examplesList3[0].metadata?.foo).toEqual("bar");
expect(examplesList3[0].metadata?.baz).toEqual("qux");
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
filter: 'exists(metadata, "baz")',
})
);
expect(examplesList3.length).toEqual(1);
expect(examplesList3[0].metadata?.foo).toEqual("bar");
expect(examplesList3[0].metadata?.baz).toEqual("qux");
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
filter: 'has("metadata", \'{"foo": "bar"}\')',
})
);
expect(examplesList3.length).toEqual(1);
expect(examplesList3[0].metadata?.foo).toEqual("bar");
expect(examplesList3[0].metadata?.baz).toEqual("qux");
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
filter: 'exists(metadata, "bazzz")',
})
);
expect(examplesList3.length).toEqual(0);
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
splits: ["train"],
})
);
expect(examplesList3.length).toEqual(2);
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
splits: ["test"],
})
);
expect(examplesList3.length).toEqual(1);
examplesList3 = await toArray(
client.listExamples({
datasetId: dataset.id,
splits: ["train", "test"],
})
);
expect(examplesList3.length).toEqual(3);
await client.deleteDataset({ datasetId: dataset.id });
},
180_000
);
test.concurrent("list runs limit arg works", async () => {
const client = new Client();
const projectName = `test-limit-runs-${uuidv4().substring(0, 4)}`;
const limit = 6;
// delete the project just in case
if (await client.hasProject({ projectName })) {
await client.deleteProject({ projectName });
}
try {
const runsArr: Array<Run> = [];
// create a fresh project with 10 runs --default amount created by createRunsFactory
await client.createProject({ projectName });
await Promise.all(
createRunsFactory(projectName).map(async (payload) => {
if (!payload.id) payload.id = uuidv4();
await client.createRun(payload);
await waitUntilRunFound(client, payload.id);
})
);
let iters = 0;
for await (const run of client.listRuns({ limit, projectName })) {
expect(run).toBeDefined();
runsArr.push(run);
iters += 1;
if (iters > limit) {
throw new Error(
`More runs returned than expected.\nExpected: ${limit}\nReceived: ${iters}`
);
}
}
expect(runsArr.length).toBe(limit);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} catch (e: any) {
if (e.message.startsWith("More runs returned than expected.")) {
throw e;
} else {
console.error(e);
}
} finally {
if (await client.hasProject({ projectName })) {
await client.deleteProject({ projectName });
}
}
});
test.concurrent("Test run stats", async () => {
const client = new Client();
const stats = await client.getRunStats({
projectNames: ["default"],
runType: "llm",
});
expect(stats).toBeDefined();
});
test("Test createProject raises LangSmithConflictError on duplicate name", async () => {
const client = new Client();
const projectName = `test_project_${uuidv4()}`;
try {
// Create the project for the first time
await client.createProject({ projectName });
// Attempt to create the project with the same name again
await expect(client.createProject({ projectName })).rejects.toThrow(
expect.objectContaining({
name: "LangSmithConflictError",
})
);
} finally {
try {
// Clean up: delete the project
if (await client.hasProject({ projectName })) {
await client.deleteProject({ projectName });
}
} catch (e) {
// Everyone has those days.
}
}
});
test("Test list prompts", async () => {
const client = new Client();
const uid = uuidv4();
// push 3 prompts
const promptName1 = `test_prompt_${uid}__0`;
const promptName2 = `test_prompt_${uid}__1`;
const promptName3 = `test_prompt_${uid}__2`;
await client.pushPrompt(promptName1, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
isPublic: true,
});
await client.pushPrompt(promptName2, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
await client.pushPrompt(promptName3, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
// expect at least one of the prompts to have promptName1
const response = client.listPrompts({ isPublic: true, query: uid });
let found = false;
expect(response).toBeDefined();
for await (const prompt of response) {
expect(prompt).toBeDefined();
if (prompt.repo_handle === promptName1) {
found = true;
}
}
expect(found).toBe(true);
// expect the prompts to be sorted by updated_at
const response2 = client.listPrompts({ sortField: "updated_at", query: uid });
expect(response2).toBeDefined();
let lastUpdatedAt: number | undefined;
for await (const prompt of response2) {
expect(prompt.updated_at).toBeDefined();
const currentUpdatedAt = new Date(prompt.updated_at).getTime();
if (lastUpdatedAt !== undefined) {
expect(currentUpdatedAt).toBeLessThanOrEqual(lastUpdatedAt);
}
lastUpdatedAt = currentUpdatedAt;
}
expect(lastUpdatedAt).toBeDefined();
});
test("Test get prompt", async () => {
const client = new Client();
const promptName = `test_prompt_${uuidv4().slice(0, 8)}`;
const promptTemplate = ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
);
const url = await client.pushPrompt(promptName, { object: promptTemplate });
expect(url).toBeDefined();
const prompt = await client.getPrompt(promptName);
expect(prompt).toBeDefined();
expect(prompt?.repo_handle).toBe(promptName);
await client.deletePrompt(promptName);
});
test("Test prompt exists", async () => {
const client = new Client();
const nonExistentPrompt = `non_existent_${uuidv4().slice(0, 8)}`;
expect(await client.promptExists(nonExistentPrompt)).toBe(false);
const existentPrompt = `existent_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(existentPrompt, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
expect(await client.promptExists(existentPrompt)).toBe(true);
await client.deletePrompt(existentPrompt);
});
test("Test update prompt", async () => {
const client = new Client();
const promptName = `test_update_prompt_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(promptName, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
const updatedData = await client.updatePrompt(promptName, {
description: "Updated description",
isPublic: true,
tags: ["test", "update"],
});
expect(updatedData).toBeDefined();
const updatedPrompt = await client.getPrompt(promptName);
expect(updatedPrompt?.description).toBe("Updated description");
expect(updatedPrompt?.is_public).toBe(true);
expect(updatedPrompt?.tags).toEqual(
expect.arrayContaining(["test", "update"])
);
await client.deletePrompt(promptName);
});
test("Test delete prompt", async () => {
const client = new Client();
const promptName = `test_delete_prompt_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(promptName, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
expect(await client.promptExists(promptName)).toBe(true);
await client.deletePrompt(promptName);
expect(await client.promptExists(promptName)).toBe(false);
});
test("test listing projects by metadata", async () => {
const client = new Client();
const uid = uuidv4();
const projectName = `my_metadata_project_${uid}`;
await client.createProject({
projectName: projectName,
metadata: {
foobar: uid,
baz: "barfooqux",
},
});
const projects = await client.listProjects({ metadata: { foobar: uid } });
let myProject: TracerSession | null = null;
for await (const project of projects) {
myProject = project;
}
expect(myProject?.name).toEqual(projectName);
await client.deleteProject({ projectName: projectName });
});
test("Test create commit", async () => {
const client = new Client();
const promptName = `test_create_commit_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(promptName, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
const newTemplate = ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "My question is: {{question}}" }),
],
{ templateFormat: "mustache" }
);
const commitUrl = await client.createCommit(promptName, newTemplate);
expect(commitUrl).toBeDefined();
expect(commitUrl).toContain(promptName);
await client.deletePrompt(promptName);
});
test("Test like and unlike prompt", async () => {
const client = new Client();
const promptName = `test_like_prompt_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(promptName, {
object: ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
),
});
await client.likePrompt(promptName);
let prompt = await client.getPrompt(promptName);
expect(prompt?.num_likes).toBe(1);
await client.unlikePrompt(promptName);
prompt = await client.getPrompt(promptName);
expect(prompt?.num_likes).toBe(0);
await client.deletePrompt(promptName);
});
test("Test pull prompt commit", async () => {
const client = new Client();
const promptName = `test_pull_commit_${uuidv4().slice(0, 8)}`;
const initialTemplate = ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
);
await client.pushPrompt(promptName, { object: initialTemplate });
const promptCommit = await client.pullPromptCommit(promptName);
expect(promptCommit).toBeDefined();
expect(promptCommit.repo).toBe(promptName);
await client.deletePrompt(promptName);
});
test("Test push and pull prompt", async () => {
const client = new Client();
const promptName = `test_push_pull_${uuidv4().slice(0, 8)}`;
const template = ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "{{question}}" }),
],
{ templateFormat: "mustache" }
);
const template2 = ChatPromptTemplate.fromMessages(
[
new SystemMessage({ content: "System message" }),
new HumanMessage({ content: "My question is: {{question}}" }),
],
{ templateFormat: "mustache" }
);
await client.pushPrompt(promptName, {
object: template,
description: "Test description",
readme: "Test readme",
tags: ["test", "tag"],
});
// test you can push an updated manifest without any other options
await client.pushPrompt(promptName, {
object: template2,
});
const pulledPrompt = await client._pullPrompt(promptName);
expect(pulledPrompt).toBeDefined();
const promptInfo = await client.getPrompt(promptName);
expect(promptInfo?.description).toBe("Test description");
expect(promptInfo?.readme).toBe("Test readme");
expect(promptInfo?.tags).toEqual(expect.arrayContaining(["test", "tag"]));
expect(promptInfo?.is_public).toBe(false);
await client.deletePrompt(promptName);
});
test("Test pull prompt include model", async () => {
const client = new Client();
const model = new ChatOpenAI({});
const promptTemplate = PromptTemplate.fromTemplate(
"Tell me a joke about {topic}"
);
const promptWithModel = promptTemplate.pipe(model);
const promptName = `test_prompt_with_model_${uuidv4().slice(0, 8)}`;
await client.pushPrompt(promptName, { object: promptWithModel });
const pulledPrompt = await client._pullPrompt(promptName, {
includeModel: true,
});
const rs: RunnableSequence = await load(pulledPrompt);
expect(rs).toBeDefined();
expect(rs).toBeInstanceOf(RunnableSequence);
await client.deletePrompt(promptName);
});
test("list shared examples can list shared examples", async () => {
const client = new Client();
const multiverseMathPublicDatasetShareToken =
"cce9c8a9-761a-4756-b159-58ed2640e274";
const sharedExamples = await client.listSharedExamples(
multiverseMathPublicDatasetShareToken
);
expect(sharedExamples.length).toBeGreaterThan(0);
});
test("clonePublicDataset method can clone a dataset", async () => {
const client = new Client();
const datasetName = "multiverse_math_public_testing";
const multiverseMathPublicDatasetURL =
"https://beta.smith.langchain.com/public/cce9c8a9-761a-4756-b159-58ed2640e274/d";
try {
await client.clonePublicDataset(multiverseMathPublicDatasetURL, {
datasetName,
});
const clonedDataset = await client.hasDataset({ datasetName });
expect(clonedDataset).toBe(true);
const examples: Example[] = [];
for await (const ex of client.listExamples({ datasetName })) {
examples.push(ex);
}
expect(examples.length).toBeGreaterThan(0);
} finally {
try {
// Attempt to remove the newly created dataset if successful.
await client.deleteDataset({ datasetName });
} catch (_) {
// no-op if failure
}
}
});
test("annotationqueue crud", async () => {
const client = new Client();
const queueName = `test-queue-${uuidv4().substring(0, 8)}`;
const projectName = `test-project-${uuidv4().substring(0, 8)}`;
const queueId = uuidv4();
try {
// 1. Create an annotation queue
const queue = await client.createAnnotationQueue({
name: queueName,
description: "Initial description",
queueId,
});
expect(queue).toBeDefined();
expect(queue.name).toBe(queueName);
// 1a. Get the annotation queue
const fetchedQueue = await client.readAnnotationQueue(queue.id);
expect(fetchedQueue).toBeDefined();
expect(fetchedQueue.name).toBe(queueName);
// 1b. List annotation queues and check nameContains
const listedQueues = await toArray(
client.listAnnotationQueues({ nameContains: queueName })
);
expect(listedQueues.length).toBeGreaterThan(0);
expect(listedQueues.some((q) => q.id === queue.id)).toBe(true);
// 2. Create a run in a random project
await client.createProject({ projectName });
const runId = uuidv4();
await client.createRun({
id: runId,
name: "Test Run",
run_type: "chain",
inputs: { foo: "bar" },
outputs: { baz: "qux" },
project_name: projectName,
});
// Wait for run to be found in the db
const maxWaitTime = 30000; // 30 seconds
const startTime = Date.now();
let foundRun = null;
while (Date.now() - startTime < maxWaitTime) {
try {
foundRun = await client.readRun(runId);
if (foundRun) break;
} catch (error) {
// If run is not found, getRun might throw an error
// We'll ignore it and keep trying
}
await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before trying again
}
if (!foundRun) {
throw new Error(
`Run with ID ${runId} not found after ${maxWaitTime / 1000} seconds`
);
}
// 3. Add the run to the annotation queue
await client.addRunsToAnnotationQueue(fetchedQueue.id, [runId]);
// 4. Update the annotation queue description and check that it is updated
const newDescription = "Updated description";
await client.updateAnnotationQueue(queue.id, {
name: queueName,
description: newDescription,
});
const updatedQueue = await client.readAnnotationQueue(queue.id);
expect(updatedQueue.description).toBe(newDescription);
// Get the run from the annotation queue
const run = await client.getRunFromAnnotationQueue(queueId, 0);
expect(run).toBeDefined();
expect(run.id).toBe(runId);
expect(run.name).toBe("Test Run");
expect(run.run_type).toBe("chain");
expect(run.inputs).toEqual({ foo: "bar" });
expect(run.outputs).toEqual({ baz: "qux" });
} finally {
// 6. Delete the annotation queue
await client.deleteAnnotationQueue(queueId);
// Clean up the project
if (await client.hasProject({ projectName })) {
await client.deleteProject({ projectName });
}
}
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/traceable.int.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { v4 as uuidv4 } from "uuid";
import { OpenAI } from "openai";
// eslint-disable-next-line import/no-extraneous-dependencies
import { FakeStreamingLLM } from "@langchain/core/utils/testing";
import { Client } from "../client.js";
import {
getCurrentRunTree,
isTraceableFunction,
traceable,
} from "../traceable.js";
import { RunTree } from "../run_trees.js";
import { BaseRun } from "../schemas.js";
import { expect } from "@jest/globals";
import { jest } from "@jest/globals";
async function deleteProject(langsmithClient: Client, projectName: string) {
try {
await langsmithClient.readProject({ projectName });
await langsmithClient.deleteProject({ projectName });
} catch (e) {
// Pass
}
}
async function waitUntil(
condition: () => Promise<boolean>,
timeout: number,
interval: number
): Promise<void> {
const start = Date.now();
while (Date.now() - start < timeout) {
if (await condition()) {
return;
}
await new Promise((resolve) => setTimeout(resolve, interval));
}
throw new Error("Timeout");
}
async function waitUntilRunFound(
client: Client,
runId: string,
checkOutputs = false
) {
return waitUntil(
async () => {
try {
const run = await client.readRun(runId);
if (checkOutputs) {
return (
run.outputs !== null &&
run.outputs !== undefined &&
Object.keys(run.outputs).length !== 0
);
}
return true;
} catch (e) {
return false;
}
},
30_000,
5_000
);
}
test.concurrent("Test traceable wrapper with error thrown", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
const addValueTraceable = traceable(
(_: string, __: number) => {
throw new Error("I am bad");
},
{
name: "add_value",
project_name: projectName,
client: langsmithClient,
id: runId,
on_end: _getRun,
tracingEnabled: true,
}
);
expect(isTraceableFunction(addValueTraceable)).toBe(true);
try {
expect(await addValueTraceable("testing", 9)).toBe("testing9");
} catch (e: any) {
expect(e.message).toEqual("I am bad");
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.error).toEqual("Error: I am bad");
await waitUntilRunFound(langsmithClient, runId);
const storedRun = await langsmithClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
expect(storedRun.status).toEqual("error");
expect(storedRun.error).toEqual("Error: I am bad");
});
test.concurrent("Test traceable wrapper with async error thrown", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
const addValueTraceable = traceable(
async (_: string, __: number) => {
throw new Error("I am bad");
},
{
name: "add_value",
project_name: projectName,
client: langsmithClient,
id: runId,
on_end: _getRun,
tracingEnabled: true,
}
);
expect(isTraceableFunction(addValueTraceable)).toBe(true);
try {
expect(await addValueTraceable("testing", 9)).toBe("testing9");
} catch (e: any) {
expect(e.message).toEqual("I am bad");
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.error).toEqual("Error: I am bad");
expect(collectedRun!.inputs).toEqual({ args: ["testing", 9] });
await waitUntilRunFound(langsmithClient, runId);
const storedRun = await langsmithClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
expect(storedRun.status).toEqual("error");
expect(storedRun.error).toEqual("Error: I am bad");
});
test.concurrent(
"Test traceable wrapper",
async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
const addValueTraceable = traceable(
(a: string, b: number) => {
return a + b;
},
{
name: "add_value",
project_name: projectName,
client: langsmithClient,
id: runId,
on_end: _getRun,
tracingEnabled: true,
}
);
expect(await addValueTraceable("testing", 9)).toBe("testing9");
expect(isTraceableFunction(addValueTraceable)).toBe(true);
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "testing9" });
await waitUntilRunFound(langsmithClient, runId, true);
const storedRun = await langsmithClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
const runId2 = uuidv4();
const nestedAddValueTraceable = traceable(
(a: string, b: number) => {
return a + b;
},
{
name: "nested_add_value",
project_name: projectName,
client: langsmithClient,
}
);
const entryTraceable = traceable(
async (complex: { value: string }) => {
const result = await nestedAddValueTraceable(complex.value, 1);
const result2 = await nestedAddValueTraceable(result, 2);
await nestedAddValueTraceable(
new RunTree({
name: "root_nested_add_value",
project_name: projectName,
client: langsmithClient,
}),
result,
2
);
return nestedAddValueTraceable(result2, 3);
},
{
name: "run_with_nesting",
project_name: projectName,
client: langsmithClient,
id: runId2,
}
);
expect(await entryTraceable({ value: "testing" })).toBe("testing123");
expect(isTraceableFunction(entryTraceable)).toBe(true);
await waitUntilRunFound(langsmithClient, runId2, true);
const storedRun2 = await langsmithClient.readRun(runId2);
expect(storedRun2.id).toEqual(runId2);
const runId3 = uuidv4();
const llm = new FakeStreamingLLM({ sleep: 0 });
collectedRun = null;
const iterableTraceable = traceable(llm.stream.bind(llm), {
name: "iterable_traceable",
project_name: projectName,
client: langsmithClient,
id: runId3,
on_end: (r: RunTree): void => {
collectedRun = r;
},
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
const chunks = [];
for await (const chunk of await iterableTraceable("Hello there")) {
chunks.push(chunk);
}
expect(chunks.join("")).toBe("Hello there");
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).not.toBeNull();
await waitUntilRunFound(langsmithClient, runId3, true);
const storedRun3 = await langsmithClient.readRun(runId3);
expect(storedRun3.id).toEqual(runId3);
await deleteProject(langsmithClient, projectName);
async function overload(a: string, b: number): Promise<string>;
async function overload(config: { a: string; b: number }): Promise<string>;
async function overload(
...args: [a: string, b: number] | [config: { a: string; b: number }]
): Promise<string> {
if (args.length === 1) {
return args[0].a + args[0].b;
}
return args[0] + args[1];
}
const wrappedOverload = traceable(overload, {
name: "wrapped_overload",
project_name: projectName,
client: langsmithClient,
});
expect(await wrappedOverload("testing", 123)).toBe("testing123");
expect(await wrappedOverload({ a: "testing", b: 456 })).toBe("testing456");
expect(isTraceableFunction(wrappedOverload)).toBe(true);
},
180_000
);
test.concurrent("Test get run tree method", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
// Called outside a traceable function
expect(() => getCurrentRunTree()).toThrowError();
const runId = uuidv4();
const projectName = "__test_traceable_wrapper";
const nestedAddValueTraceable = traceable(
(a: string, b: number) => {
const runTree = getCurrentRunTree();
expect(runTree.id).toBeDefined();
expect(runTree.id).not.toEqual(runId);
expect(runTree.dotted_order.includes(`${runId}.`)).toBe(true);
return a + b;
},
{
name: "nested_add_value",
project_name: projectName,
client: langsmithClient,
}
);
const addValueTraceable = traceable(
(a: string, b: number) => {
const runTree = getCurrentRunTree();
expect(runTree.id).toBe(runId);
return nestedAddValueTraceable(a, b);
},
{
name: "add_value",
project_name: projectName,
client: langsmithClient,
id: runId,
}
);
expect(await addValueTraceable("testing", 9)).toBe("testing9");
});
test.concurrent("Test traceable wrapper with aggregator", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const openai = new OpenAI();
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
let tracedOutput;
const iterableTraceable = traceable(
openai.chat.completions.create.bind(openai.chat.completions),
{
name: "openai_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
tracedOutput = chunks
.map((chunk) => chunk?.choices[0]?.delta?.content ?? "")
.join("");
return tracedOutput;
},
on_end: _getRun,
tracingEnabled: true,
}
);
expect(isTraceableFunction(iterableTraceable)).toBe(true);
const chunks = [];
for await (const chunk of await iterableTraceable({
messages: [{ content: "Hello there", role: "user" }],
model: "gpt-3.5-turbo",
stream: true,
})) {
chunks.push(chunk);
// @ts-expect-error Should have typechecking on streamed output
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const _test = chunk.invalidProp;
}
expect(typeof tracedOutput).toEqual("string");
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: tracedOutput });
await waitUntilRunFound(langsmithClient, runId, true);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
});
test.concurrent("Test async generator success", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
const iterableTraceable = traceable(giveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: _getRun,
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of iterableTraceable()) {
// Pass
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0 1 2 3 4" });
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("success");
expect(storedRun3.outputs).toEqual({ outputs: "0 1 2 3 4" });
expect(storedRun3.error).toBeFalsy();
});
test.concurrent("Test async generator throws error", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
if (i == 2) {
throw new Error("I am bad");
}
}
}
const iterableTraceable = traceable(giveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: _getRun,
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
try {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of iterableTraceable()) {
// Pass
}
} catch (err: any) {
expect(err.message).toEqual("I am bad");
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0 1 2" });
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("error");
expect(storedRun3.outputs).toEqual({ outputs: "0 1 2" });
expect(storedRun3.error).toEqual("Error: I am bad");
});
test.concurrent("Test async generator break finishes run", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
const iterableTraceable = traceable(giveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: _getRun,
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of iterableTraceable()) {
break;
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0" });
expect(collectedRun!.id).toEqual(runId);
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("error");
expect(storedRun3.outputs).toEqual({ outputs: "0" });
expect(storedRun3.error).toEqual("Cancelled");
});
test.concurrent("Test async generator success", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
return giveMeNumbers();
}
const iterableTraceable = traceable(giveMeGiveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: _getRun,
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of await iterableTraceable()) {
// Pass
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0 1 2 3 4" });
expect(collectedRun!.id).toEqual(runId);
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("success");
expect(storedRun3.outputs).toEqual({ outputs: "0 1 2 3 4" });
expect(storedRun3.error).toBeFalsy();
});
test.concurrent("Test promise for async generator success", async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
if (i == 2) {
throw new Error("I am bad");
}
}
}
return giveMeNumbers();
}
const iterableTraceable = traceable(giveMeGiveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: (r: RunTree): void => {
collectedRun = r;
},
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
try {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of await iterableTraceable()) {
// Pass
}
} catch (err: any) {
expect(err.message).toEqual("I am bad");
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0 1 2" });
expect(collectedRun!.id).toEqual(runId);
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("error");
expect(storedRun3.outputs).toEqual({ outputs: "0 1 2" });
expect(storedRun3.error).toEqual("Error: I am bad");
});
test.concurrent(
"Test promise for async generator break finishes run",
async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_aggregator";
let collectedRun: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
collectedRun = r;
};
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
return giveMeNumbers();
}
const iterableTraceable = traceable(giveMeGiveMeNumbers, {
name: "i_traceable",
project_name: projectName,
client: langsmithClient,
id: runId,
aggregator: (chunks) => {
return chunks.join(" ");
},
on_end: _getRun,
tracingEnabled: true,
});
expect(isTraceableFunction(iterableTraceable)).toBe(true);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of await iterableTraceable()) {
break;
}
expect(collectedRun).not.toBeNull();
expect(collectedRun!.outputs).toEqual({ outputs: "0" });
expect(collectedRun!.id).toEqual(runId);
await waitUntilRunFound(langsmithClient, runId);
const storedRun3 = await langsmithClient.readRun(runId);
expect(storedRun3.id).toEqual(runId);
expect(storedRun3.status).toEqual("error");
}
);
test.concurrent(
"Test upload attachments and process inputs.",
async () => {
const langsmithClient = new Client({
callerOptions: { maxRetries: 0 },
});
const runId = uuidv4();
const projectName = "__test_traceable_wrapper_attachments_and_inputs";
const testAttachment1 = new Uint8Array([1, 2, 3, 4]);
const testAttachment2 = new Uint8Array([5, 6, 7, 8]);
const testAttachment3 = new ArrayBuffer(4);
new Uint8Array(testAttachment3).set([13, 14, 15, 16]);
const traceableWithAttachmentsAndInputs = traceable(
(
val: number,
text: string,
extra: string,
attachment: Uint8Array,
attachment2: ArrayBuffer
) =>
`Processed: ${val}, ${text}, ${extra}, ${attachment.length}, ${attachment2.byteLength}`,
{
name: "attachment_and_input_test",
project_name: projectName,
client: langsmithClient,
id: runId,
extractAttachments: (
val: number,
text: string,
extra: string,
attachment: Uint8Array,
attachment2: ArrayBuffer
) => [
{
test1bin: ["application/octet-stream", testAttachment1],
test2bin: ["application/octet-stream", testAttachment2],
inputbin: ["application/octet-stream", attachment],
input2bin: [
"application/octet-stream",
new Uint8Array(attachment2),
],
},
{ val, text, extra },
],
processInputs: (inputs) => {
expect(inputs).not.toHaveProperty("attachment");
expect(inputs).not.toHaveProperty("attachment2");
return {
...inputs,
processed_val: (inputs.val as number) * 2,
processed_text: (inputs.text as string).toUpperCase(),
};
},
tracingEnabled: true,
}
);
const multipartIngestRunsSpy = jest.spyOn(
langsmithClient,
"multipartIngestRuns"
);
await traceableWithAttachmentsAndInputs(
42,
"test input",
"extra data",
new Uint8Array([9, 10, 11, 12]),
testAttachment3
);
await langsmithClient.awaitPendingTraceBatches();
expect(multipartIngestRunsSpy).toHaveBeenCalled();
const callArgs = multipartIngestRunsSpy.mock.calls[0][0];
expect(callArgs.runCreates).toBeDefined();
expect(callArgs.runCreates?.length).toBe(1);
const runCreate = callArgs.runCreates?.[0];
expect(runCreate?.id).toBe(runId);
expect(runCreate?.attachments).toBeDefined();
expect(runCreate?.attachments?.["test1bin"]).toEqual([
"application/octet-stream",
testAttachment1,
]);
expect(runCreate?.attachments?.["test2bin"]).toEqual([
"application/octet-stream",
testAttachment2,
]);
expect(runCreate?.attachments?.["inputbin"]).toEqual([
"application/octet-stream",
new Uint8Array([9, 10, 11, 12]),
]);
expect(runCreate?.attachments?.["input2bin"]).toEqual([
"application/octet-stream",
new Uint8Array([13, 14, 15, 16]),
]);
await waitUntilRunFound(langsmithClient, runId);
const storedRun = await langsmithClient.readRun(runId);
expect(storedRun.id).toEqual(runId);
expect(storedRun.inputs).toEqual({
val: 42,
text: "test input",
extra: "extra data",
processed_val: 84,
processed_text: "TEST INPUT",
});
expect(storedRun.outputs).toEqual({
outputs: "Processed: 42, test input, extra data, 4, 4",
});
multipartIngestRunsSpy.mockRestore();
},
60000
);
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/vercel.test.ts | import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
import {
generateText,
streamText,
generateObject,
streamObject,
tool,
LanguageModelV1StreamPart,
} from "ai";
import { z } from "zod";
import { AISDKExporter } from "../vercel.js";
import { traceable } from "../traceable.js";
import { toArray } from "./utils.js";
import { mockClient } from "./utils/mock_client.js";
import { convertArrayToReadableStream, MockLanguageModelV1 } from "ai/test";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
const { client, callSpy } = mockClient();
const provider = new NodeTracerProvider();
provider.addSpanProcessor(
new BatchSpanProcessor(new AISDKExporter({ client }))
);
provider.register();
class ExecutionOrderSame {
$$typeof = Symbol.for("jest.asymmetricMatcher");
private expectedNs: string;
private expectedDepth: number;
constructor(depth: number, ns: string) {
this.expectedDepth = depth;
this.expectedNs = ns;
}
asymmetricMatch(other: unknown) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (!(typeof other === "string" || other instanceof String)) {
return false;
}
const segments = other.split(".");
if (segments.length !== this.expectedDepth) return false;
const last = segments.at(-1);
if (!last) return false;
const nanoseconds = last.split("Z").at(0)?.slice(-3);
return nanoseconds === this.expectedNs;
}
toString() {
return "ExecutionOrderSame";
}
getExpectedType() {
return "string";
}
toAsymmetricMatcher() {
return `ExecutionOrderSame<${this.expectedDepth}, ${this.expectedNs}>`;
}
}
class MockMultiStepLanguageModelV1 extends MockLanguageModelV1 {
generateStep = -1;
streamStep = -1;
constructor(...args: ConstructorParameters<typeof MockLanguageModelV1>) {
super(...args);
const oldDoGenerate = this.doGenerate;
this.doGenerate = async (...args) => {
this.generateStep += 1;
return await oldDoGenerate(...args);
};
const oldDoStream = this.doStream;
this.doStream = async (...args) => {
this.streamStep += 1;
return await oldDoStream(...args);
};
}
}
beforeEach(() => callSpy.mockClear());
afterAll(async () => await provider.shutdown());
test("generateText", async () => {
const model = new MockMultiStepLanguageModelV1({
doGenerate: async () => {
if (model.generateStep === 0) {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
toolCalls: [
{
toolCallType: "function",
toolName: "listOrders",
toolCallId: "tool-id",
args: JSON.stringify({ userId: "123" }),
},
],
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: `Hello, world!`,
};
},
});
await generateText({
model,
messages: [
{
role: "user",
content: "What are my orders? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runName: "generateText",
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
await provider.forceFlush();
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"generateText:0",
"mock-provider:1",
"listOrders:2",
"mock-provider:3",
],
edges: [
["generateText:0", "mock-provider:1"],
["generateText:0", "listOrders:2"],
["generateText:0", "mock-provider:3"],
],
data: {
"generateText:0": {
name: "generateText",
inputs: {
messages: [
{
type: "human",
data: { content: "What are my orders? My user ID is 123" },
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
extra: {
metadata: {
functionId: "functionId",
userId: "123",
language: "english",
},
},
dotted_order: new ExecutionOrderSame(1, "000"),
},
"mock-provider:1": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(2, "000"),
},
"listOrders:2": {
inputs: { userId: "123" },
outputs: { output: "User 123 has the following orders: 1" },
dotted_order: new ExecutionOrderSame(2, "001"),
},
"mock-provider:3": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
{
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
},
{
type: "tool",
data: {
content: '"User 123 has the following orders: 1"',
name: "listOrders",
tool_call_id: "tool-id",
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(2, "002"),
},
},
});
});
test("streamText", async () => {
const model = new MockMultiStepLanguageModelV1({
doStream: async () => {
if (model.streamStep === 0) {
return {
stream: convertArrayToReadableStream([
{
type: "tool-call",
toolCallType: "function",
toolName: "listOrders",
toolCallId: "tool-id",
args: JSON.stringify({ userId: "123" }),
},
{
type: "finish",
finishReason: "stop",
logprobs: undefined,
usage: { completionTokens: 10, promptTokens: 3 },
},
] satisfies LanguageModelV1StreamPart[]),
rawCall: { rawPrompt: null, rawSettings: {} },
};
}
return {
stream: convertArrayToReadableStream([
{ type: "text-delta", textDelta: "Hello" },
{ type: "text-delta", textDelta: ", " },
{ type: "text-delta", textDelta: `world!` },
{
type: "finish",
finishReason: "stop",
logprobs: undefined,
usage: { completionTokens: 10, promptTokens: 3 },
},
]),
rawCall: { rawPrompt: null, rawSettings: {} },
};
},
});
const result = await streamText({
model,
messages: [
{
role: "user",
content: "What are my orders? My user ID is 123",
},
],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
await toArray(result.fullStream);
await provider.forceFlush();
const actual = getAssumedTreeFromCalls(callSpy.mock.calls);
expect(actual).toMatchObject({
nodes: [
"mock-provider:0",
"mock-provider:1",
"listOrders:2",
"mock-provider:3",
],
edges: [
["mock-provider:0", "mock-provider:1"],
["mock-provider:0", "listOrders:2"],
["mock-provider:0", "mock-provider:3"],
],
data: {
"mock-provider:0": {
inputs: {
messages: [
{
type: "human",
data: { content: "What are my orders? My user ID is 123" },
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 20, prompt_tokens: 6 },
},
},
extra: {
metadata: {
functionId: "functionId",
userId: "123",
language: "english",
},
},
dotted_order: new ExecutionOrderSame(1, "000"),
},
"mock-provider:1": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
token_usage: { completion_tokens: 10, prompt_tokens: 3 },
},
},
dotted_order: new ExecutionOrderSame(2, "000"),
},
"listOrders:2": {
inputs: { userId: "123" },
outputs: { output: "User 123 has the following orders: 1" },
dotted_order: new ExecutionOrderSame(2, "001"),
},
"mock-provider:3": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
{
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
},
{
type: "tool",
data: {
content: '"User 123 has the following orders: 1"',
name: "listOrders",
tool_call_id: "tool-id",
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 10, prompt_tokens: 3 },
},
},
dotted_order: new ExecutionOrderSame(2, "002"),
},
},
});
});
test("generateObject", async () => {
const model = new MockMultiStepLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
toolCalls: [
{
toolCallType: "function",
toolName: "json",
toolCallId: "tool-id",
args: JSON.stringify({
weather: { city: "Prague", unit: "celsius" },
}),
},
],
}),
defaultObjectGenerationMode: "tool",
});
await generateObject({
model,
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});
await provider.forceFlush();
const actual = getAssumedTreeFromCalls(callSpy.mock.calls);
expect(actual).toMatchObject({
nodes: ["mock-provider:0", "mock-provider:1"],
edges: [["mock-provider:0", "mock-provider:1"]],
data: {
"mock-provider:0": {
inputs: {
input: { prompt: "What's the weather in Prague?" },
},
outputs: {
output: { weather: { city: "Prague", unit: "celsius" } },
llm_output: {
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(1, "000"),
},
"mock-provider:1": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{ type: "text", text: "What's the weather in Prague?" },
],
},
},
],
},
outputs: {
output: { weather: { city: "Prague", unit: "celsius" } },
llm_output: {
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
extra: {
metadata: {
functionId: "functionId",
userId: "123",
language: "english",
},
},
dotted_order: new ExecutionOrderSame(2, "000"),
},
},
});
});
test("streamObject", async () => {
const model = new MockMultiStepLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
toolCalls: [
{
toolCallType: "function",
toolName: "json",
toolCallId: "tool-id",
args: JSON.stringify({
weather: { city: "Prague", unit: "celsius" },
}),
},
],
}),
doStream: async () => {
return {
stream: convertArrayToReadableStream([
{
type: "tool-call-delta",
toolCallType: "function",
toolName: "json",
toolCallId: "tool-id",
argsTextDelta: JSON.stringify({
weather: { city: "Prague", unit: "celsius" },
}),
},
{
type: "finish",
finishReason: "stop",
logprobs: undefined,
usage: { completionTokens: 10, promptTokens: 3 },
},
] satisfies LanguageModelV1StreamPart[]),
rawCall: { rawPrompt: null, rawSettings: {} },
};
},
defaultObjectGenerationMode: "tool",
});
const result = await streamObject({
model,
schema: z.object({
weather: z.object({
city: z.string(),
unit: z.union([z.literal("celsius"), z.literal("fahrenheit")]),
}),
}),
prompt: "What's the weather in Prague?",
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
});
await toArray(result.partialObjectStream);
await provider.forceFlush();
const actual = getAssumedTreeFromCalls(callSpy.mock.calls);
expect(actual).toMatchObject({
nodes: ["mock-provider:0", "mock-provider:1"],
edges: [["mock-provider:0", "mock-provider:1"]],
data: {
"mock-provider:0": {
inputs: {
input: { prompt: "What's the weather in Prague?" },
},
outputs: {
output: { weather: { city: "Prague", unit: "celsius" } },
llm_output: {
token_usage: { completion_tokens: 10, prompt_tokens: 3 },
},
},
extra: {
metadata: {
functionId: "functionId",
userId: "123",
language: "english",
},
},
dotted_order: new ExecutionOrderSame(1, "000"),
},
"mock-provider:1": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{ type: "text", text: "What's the weather in Prague?" },
],
},
},
],
},
outputs: {
output: { weather: { city: "Prague", unit: "celsius" } },
llm_output: {
token_usage: { completion_tokens: 10, prompt_tokens: 3 },
},
},
dotted_order: new ExecutionOrderSame(2, "000"),
},
},
});
});
test("traceable", async () => {
const model = new MockMultiStepLanguageModelV1({
doGenerate: async () => {
if (model.generateStep === 0) {
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
toolCalls: [
{
toolCallType: "function",
toolName: "listOrders",
toolCallId: "tool-id",
args: JSON.stringify({ userId: "123" }),
},
],
};
}
return {
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: "stop",
usage: { promptTokens: 10, completionTokens: 20 },
text: `Hello, world!`,
};
},
});
const wrappedText = traceable(
async (content: string) => {
const { text } = await generateText({
model,
messages: [{ role: "user", content }],
tools: {
listOrders: tool({
description: "list all orders",
parameters: z.object({ userId: z.string() }),
execute: async ({ userId }) =>
`User ${userId} has the following orders: 1`,
}),
},
experimental_telemetry: AISDKExporter.getSettings({
isEnabled: true,
runName: "generateText",
functionId: "functionId",
metadata: { userId: "123", language: "english" },
}),
maxSteps: 10,
});
return { text };
},
{ name: "wrappedText", client, tracingEnabled: true }
);
await wrappedText("What are my orders? My user ID is 123");
await provider.forceFlush();
const actual = getAssumedTreeFromCalls(callSpy.mock.calls);
expect(actual).toMatchObject({
nodes: [
"wrappedText:0",
"generateText:1",
"mock-provider:2",
"listOrders:3",
"mock-provider:4",
],
edges: [
["wrappedText:0", "generateText:1"],
["generateText:1", "mock-provider:2"],
["generateText:1", "listOrders:3"],
["generateText:1", "mock-provider:4"],
],
data: {
"wrappedText:0": {
inputs: {
input: "What are my orders? My user ID is 123",
},
outputs: {
text: "Hello, world!",
},
dotted_order: new ExecutionOrderSame(1, "001"),
},
"generateText:1": {
name: "generateText",
extra: {
metadata: {
functionId: "functionId",
userId: "123",
language: "english",
},
},
inputs: {
messages: [
{
type: "human",
data: { content: "What are my orders? My user ID is 123" },
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(2, "000"),
},
"mock-provider:2": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(3, "000"),
},
"listOrders:3": {
inputs: { userId: "123" },
outputs: { output: "User 123 has the following orders: 1" },
dotted_order: new ExecutionOrderSame(3, "001"),
},
"mock-provider:4": {
inputs: {
messages: [
{
type: "human",
data: {
content: [
{
type: "text",
text: "What are my orders? My user ID is 123",
},
],
},
},
{
type: "ai",
data: {
content: [
{
type: "tool_use",
name: "listOrders",
id: "tool-id",
input: { userId: "123" },
},
],
additional_kwargs: {
tool_calls: [
{
id: "tool-id",
type: "function",
function: {
name: "listOrders",
id: "tool-id",
arguments: '{"userId":"123"}',
},
},
],
},
},
},
{
type: "tool",
data: {
content: '"User 123 has the following orders: 1"',
name: "listOrders",
tool_call_id: "tool-id",
},
},
],
},
outputs: {
llm_output: {
type: "ai",
data: { content: "Hello, world!" },
token_usage: { completion_tokens: 20, prompt_tokens: 10 },
},
},
dotted_order: new ExecutionOrderSame(3, "002"),
},
},
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/batch_client.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable prefer-const */
import { jest } from "@jest/globals";
import { v4 as uuidv4 } from "uuid";
import { Client, mergeRuntimeEnvIntoRunCreate } from "../client.js";
import { convertToDottedOrderFormat } from "../run_trees.js";
import { _getFetchImplementation } from "../singletons/fetch.js";
import { RunCreate } from "../schemas.js";
const parseMockRequestBody = async (body: string | ArrayBuffer) => {
if (typeof body === "string") {
return JSON.parse(body);
}
// Typing is missing
const rawMultipart = new TextDecoder().decode(body);
// Parse the multipart form data boundary from the raw text
const boundary = rawMultipart.split("\r\n")[0].trim();
// Split the multipart body into individual parts
const parts = rawMultipart.split(boundary).slice(1, -1);
const entries: [string, any][] = parts.map((part) => {
const [headers, ...contentParts] = part.trim().split("\r\n\r\n");
const content = contentParts.join("\r\n\r\n");
// Extract the name from Content-Disposition header
const nameMatch = headers.match(/name="([^"]+)"/);
const name = nameMatch ? nameMatch[1] : "";
return [name, content.trim()];
});
const reconstructedBody: any = {
post: [],
patch: [],
};
for (const [key, value] of entries) {
let [method, id, type] = key.split(".");
let parsedValue;
try {
parsedValue = JSON.parse(value);
} catch (e) {
parsedValue = value;
}
// if (method === "attachment") {
// for (const item of reconstructedBody.post) {
// if (item.id === id) {
// if (item.attachments === undefined) {
// item.attachments = [];
// }
// item[type] = parsedValue;
// }
// }
// return;
// }
if (!(method in reconstructedBody)) {
throw new Error(`${method} must be "post" or "patch"`);
}
if (!type) {
reconstructedBody[method as keyof typeof reconstructedBody].push(
parsedValue
);
} else {
for (const item of reconstructedBody[method]) {
if (item.id === id) {
item[type] = parsedValue;
}
}
}
}
return reconstructedBody;
};
// prettier-ignore
const ENDPOINT_TYPES = [
"batch",
"multipart",
];
describe.each(ENDPOINT_TYPES)(
"Batch client tracing with %s endpoint",
(endpointType) => {
const extraBatchIngestConfig =
endpointType === "batch"
? {}
: {
use_multipart_endpoint: true,
};
const expectedTraceURL =
endpointType === "batch"
? "https://api.smith.langchain.com/runs/batch"
: "https://api.smith.langchain.com/runs/multipart";
it("should create a batched run with the given input", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await new Promise((resolve) => setTimeout(resolve, 300));
const calledRequestParam: any = callSpy.mock.calls[0][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(callSpy).toHaveBeenCalledWith(
_getFetchImplementation(),
expectedTraceURL,
expect.objectContaining({
body: expect.any(endpointType === "batch" ? String : ArrayBuffer),
})
);
});
it("should not throw an error if fetch fails for batch requests", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
jest.spyOn((client as any).caller, "call").mockImplementation(() => {
throw new Error("Totally expected mock error");
});
jest
.spyOn((client as any).batchIngestCaller, "call")
.mockImplementation(() => {
throw new Error("Totally expected mock error");
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await new Promise((resolve) => setTimeout(resolve, 300));
});
it("Create + update batching should merge into a single call", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
const endTime = Math.floor(new Date().getTime() / 1000);
await client.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});
await client.awaitPendingTraceBatches();
const calledRequestParam: any = callSpy.mock.calls[0][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
outputs: {
output: ["Hi"],
},
end_time: endTime,
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(callSpy).toHaveBeenCalledWith(
_getFetchImplementation(),
expectedTraceURL,
expect.objectContaining({
body: expect.any(endpointType === "batch" ? String : ArrayBuffer),
})
);
});
it("server info fetch should retry even if initial call fails", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
let serverInfoFailedOnce = false;
jest.spyOn(client as any, "_getServerInfo").mockImplementationOnce(() => {
serverInfoFailedOnce = true;
throw new Error("[MOCK] Connection error.");
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
const endTime = Math.floor(new Date().getTime() / 1000);
await client.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});
await client.awaitPendingTraceBatches();
expect(serverInfoFailedOnce).toBe(true);
const calledRequestParam: any = callSpy.mock.calls[0][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
outputs: {
output: ["Hi"],
},
end_time: endTime,
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(callSpy).toHaveBeenCalledWith(
_getFetchImplementation(),
expectedTraceURL,
expect.objectContaining({
body: expect.any(endpointType === "batch" ? String : ArrayBuffer),
})
);
});
it("should immediately trigger a batch on root run end if blockOnRootRunFinalization is set", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
blockOnRootRunFinalization: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
// Wait for first batch to send
await new Promise((resolve) => setTimeout(resolve, 300));
const endTime = Math.floor(new Date().getTime() / 1000);
// A root run finishing triggers the second batch
await client.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});
const runId2 = uuidv4();
const dottedOrder2 = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId2
);
// Will send in a third batch, even though it's triggered around the same time as the update
await client.createRun({
id: runId2,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world 2" },
trace_id: runId2,
dotted_order: dottedOrder2,
});
await new Promise((resolve) => setTimeout(resolve, 300));
const calledRequestParam: any = callSpy.mock.calls[0][2];
const calledRequestParam2: any = callSpy.mock.calls[1][2];
const calledRequestParam3: any = callSpy.mock.calls[2][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(await parseMockRequestBody(calledRequestParam2?.body)).toEqual({
post: [],
patch: [
expect.objectContaining({
id: runId,
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
outputs: {
output: ["Hi"],
},
}),
],
});
expect(await parseMockRequestBody(calledRequestParam3?.body)).toEqual({
post: [
expect.objectContaining({
id: runId2,
run_type: "llm",
inputs: {
text: "hello world 2",
},
trace_id: runId2,
dotted_order: dottedOrder2,
}),
],
patch: [],
});
});
it("should not trigger a batch on root run end and instead batch call with previous batch if blockOnRootRunFinalization is false", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
blockOnRootRunFinalization: false,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
expect((client as any).autoBatchQueue.items.length).toBe(1);
// Wait for first batch to send
await new Promise((resolve) => setTimeout(resolve, 300));
expect((client as any).autoBatchQueue.items.length).toBe(0);
const endTime = Math.floor(new Date().getTime() / 1000);
// Start the the second batch
await client.updateRun(runId, {
outputs: { output: ["Hi"] },
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});
const runId2 = uuidv4();
const dottedOrder2 = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId2
);
// Should aggregate on the second batch
await client.createRun({
id: runId2,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world 2" },
trace_id: runId2,
dotted_order: dottedOrder2,
});
// 2 runs in the queue
expect((client as any).autoBatchQueue.items.length).toBe(2);
await client.awaitPendingTraceBatches();
expect((client as any).autoBatchQueue.items.length).toBe(0);
expect(callSpy.mock.calls.length).toEqual(2);
const calledRequestParam: any = callSpy.mock.calls[0][2];
const calledRequestParam2: any = callSpy.mock.calls[1][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: "hello world",
},
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(await parseMockRequestBody(calledRequestParam2?.body)).toEqual({
post: [
expect.objectContaining({
id: runId2,
run_type: "llm",
inputs: {
text: "hello world 2",
},
trace_id: runId2,
dotted_order: dottedOrder2,
}),
],
patch: [
expect.objectContaining({
id: runId,
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
outputs: {
output: ["Hi"],
},
}),
],
});
});
it("should send traces above the batch size and see even batches", async () => {
const client = new Client({
apiKey: "test-api-key",
batchSizeBytesLimit: 10000,
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const runIds = await Promise.all(
[...Array(15)].map(async (_, i) => {
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
const params = mergeRuntimeEnvIntoRunCreate({
id: runId,
project_name: projectName,
name: "test_run " + i,
run_type: "llm",
inputs: { text: "hello world " + i },
trace_id: runId,
dotted_order: dottedOrder,
} as RunCreate);
// Allow some extra space for other request properties
const mockRunSize = 950;
const padCount = mockRunSize - JSON.stringify(params).length;
params.inputs.text = params.inputs.text + "x".repeat(padCount);
await client.createRun(params);
return runId;
})
);
await new Promise((resolve) => setTimeout(resolve, 10));
const calledRequestParam: any = callSpy.mock.calls[0][2];
const calledRequestParam2: any = callSpy.mock.calls[1][2];
const firstBatchBody = await parseMockRequestBody(
calledRequestParam?.body
);
const secondBatchBody = await parseMockRequestBody(
calledRequestParam2?.body
);
const initialBatchBody =
firstBatchBody.post.length === 10 ? firstBatchBody : secondBatchBody;
const followupBatchBody =
firstBatchBody.post.length === 10 ? secondBatchBody : firstBatchBody;
// Queue should drain as soon as size limit is reached,
// sending both batches
expect(initialBatchBody).toEqual({
post: runIds.slice(0, 10).map((runId, i) =>
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: expect.stringContaining("hello world " + i),
},
trace_id: runId,
})
),
patch: [],
});
expect(followupBatchBody).toEqual({
post: runIds.slice(10).map((runId, i) =>
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
text: expect.stringContaining("hello world " + (i + 10)),
},
trace_id: runId,
})
),
patch: [],
});
});
it("a very low batch size limit should be equivalent to single calls", async () => {
const client = new Client({
apiKey: "test-api-key",
batchSizeBytesLimit: 1,
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: {
...extraBatchIngestConfig,
},
};
});
const projectName = "__test_batch";
const runIds = await Promise.all(
[...Array(4)].map(async (_, i) => {
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run " + i,
run_type: "llm",
inputs: { text: "hello world " + i },
trace_id: runId,
dotted_order: dottedOrder,
});
return runId;
})
);
await new Promise((resolve) => setTimeout(resolve, 300));
expect(callSpy.mock.calls.length).toEqual(4);
const calledRequestParam: any = callSpy.mock.calls[0][2];
const calledRequestParam2: any = callSpy.mock.calls[1][2];
const calledRequestParam3: any = callSpy.mock.calls[2][2];
const calledRequestParam4: any = callSpy.mock.calls[3][2];
// Queue should drain as soon as byte size limit of 1 is reached,
// sending each call individually
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runIds[0],
run_type: "llm",
inputs: {
text: "hello world 0",
},
trace_id: runIds[0],
}),
],
patch: [],
});
expect(await parseMockRequestBody(calledRequestParam2?.body)).toEqual({
post: [
expect.objectContaining({
id: runIds[1],
run_type: "llm",
inputs: {
text: "hello world 1",
},
trace_id: runIds[1],
}),
],
patch: [],
});
expect(await parseMockRequestBody(calledRequestParam3?.body)).toEqual({
post: [
expect.objectContaining({
id: runIds[2],
run_type: "llm",
inputs: {
text: "hello world 2",
},
trace_id: runIds[2],
}),
],
patch: [],
});
expect(await parseMockRequestBody(calledRequestParam4?.body)).toEqual({
post: [
expect.objectContaining({
id: runIds[3],
run_type: "llm",
inputs: {
text: "hello world 3",
},
trace_id: runIds[3],
}),
],
patch: [],
});
});
it("Use batch endpoint if info call fails", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
throw new Error("Totally expected mock error");
});
const projectName = "__test_batch";
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
});
await client.awaitPendingTraceBatches();
const calledRequestParam: any = callSpy.mock.calls[0][2];
expect(
await parseMockRequestBody(calledRequestParam?.body)
).toMatchObject({
post: [
{
id: runId,
session_name: projectName,
extra: expect.anything(),
start_time: expect.any(Number),
name: "test_run",
run_type: "llm",
inputs: { text: "hello world" },
trace_id: runId,
dotted_order: dottedOrder,
},
],
patch: [],
});
expect(callSpy).toHaveBeenCalledWith(
_getFetchImplementation(),
"https://api.smith.langchain.com/runs/batch",
expect.objectContaining({
body: expect.any(String),
})
);
});
it("Should handle circular values", async () => {
const client = new Client({
apiKey: "test-api-key",
autoBatchTracing: true,
});
const callSpy = jest
.spyOn((client as any).batchIngestCaller, "call")
.mockResolvedValue({
ok: true,
text: () => "",
});
jest.spyOn(client as any, "_getServerInfo").mockImplementation(() => {
return {
version: "foo",
batch_ingest_config: { ...extraBatchIngestConfig },
};
});
const projectName = "__test_batch";
const a: Record<string, any> = {};
const b: Record<string, any> = {};
a.b = b;
b.a = a;
const runId = uuidv4();
const dottedOrder = convertToDottedOrderFormat(
new Date().getTime() / 1000,
runId
);
await client.createRun({
id: runId,
project_name: projectName,
name: "test_run",
run_type: "llm",
inputs: a,
trace_id: runId,
dotted_order: dottedOrder,
});
const endTime = Math.floor(new Date().getTime() / 1000);
await client.updateRun(runId, {
outputs: b,
dotted_order: dottedOrder,
trace_id: runId,
end_time: endTime,
});
await client.awaitPendingTraceBatches();
const calledRequestParam: any = callSpy.mock.calls[0][2];
expect(await parseMockRequestBody(calledRequestParam?.body)).toEqual({
post: [
expect.objectContaining({
id: runId,
run_type: "llm",
inputs: {
b: {
a: {
result: "[Circular]",
},
},
},
outputs: {
a:
// Stringification happens at a different level
endpointType === "batch"
? {
result: "[Circular]",
}
: {
b: {
result: "[Circular]",
},
},
},
end_time: endTime,
trace_id: runId,
dotted_order: dottedOrder,
}),
],
patch: [],
});
expect(callSpy).toHaveBeenCalledWith(
_getFetchImplementation(),
expectedTraceURL,
expect.objectContaining({
body: expect.any(endpointType === "batch" ? String : ArrayBuffer),
})
);
});
}
);
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/traceable.test.ts | import { jest } from "@jest/globals";
import { RunTree, RunTreeConfig } from "../run_trees.js";
import { _LC_CONTEXT_VARIABLES_KEY } from "../singletons/constants.js";
import { ROOT, traceable, withRunTree } from "../traceable.js";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
import { mockClient } from "./utils/mock_client.js";
import { Client, overrideFetchImplementation } from "../index.js";
import { AsyncLocalStorageProviderSingleton } from "../singletons/traceable.js";
import { KVMap } from "../schemas.js";
test("basic traceable implementation", async () => {
const { client, callSpy } = mockClient();
const llm = traceable(
async function* llm(input: string) {
const response = input.repeat(2).split("");
for (const char of response) {
yield char;
}
},
{ client, tracingEnabled: true }
);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of llm("Hello world")) {
// pass
}
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["llm:0"],
edges: [],
});
});
test("404s should only log, not throw an error", async () => {
const overriddenFetch = jest.fn(() =>
Promise.resolve({
ok: false,
status: 404,
statusText: "Expected test error",
json: () => Promise.resolve({}),
text: () => Promise.resolve("Expected test error."),
})
);
overrideFetchImplementation(overriddenFetch);
const client = new Client({
apiUrl: "https://foobar.notreal",
autoBatchTracing: false,
});
const llm = traceable(
async function* llm(input: string) {
const response = input.repeat(2).split("");
for (const char of response) {
yield char;
}
},
{ client, tracingEnabled: true }
);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of llm("Hello world")) {
// pass
}
expect(overriddenFetch).toHaveBeenCalled();
});
test("nested traceable implementation", async () => {
const { client, callSpy } = mockClient();
const llm = traceable(async function llm(input: string) {
return input.repeat(2);
});
const str = traceable(async function* str(input: string) {
const response = input.split("").reverse();
for (const char of response) {
yield char;
}
});
const chain = traceable(
async function chain(input: string) {
const question = await llm(input);
let answer = "";
for await (const char of str(question)) {
answer += char;
}
return { question, answer };
},
{ client, tracingEnabled: true }
);
const result = await chain("Hello world");
expect(result).toEqual({
question: "Hello worldHello world",
answer: "dlrow olleHdlrow olleH",
});
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["chain:0", "llm:1", "str:2"],
edges: [
["chain:0", "llm:1"],
["chain:0", "str:2"],
],
});
});
test("nested traceable passes through LangChain context vars", (done) => {
const alsInstance = AsyncLocalStorageProviderSingleton.getInstance();
alsInstance.run(
{
[_LC_CONTEXT_VARIABLES_KEY]: { foo: "bar" },
} as any,
// eslint-disable-next-line @typescript-eslint/no-misused-promises
async () => {
try {
expect(
(alsInstance.getStore() as any)?.[_LC_CONTEXT_VARIABLES_KEY]?.foo
).toEqual("bar");
const { client, callSpy } = mockClient();
const llm = traceable(async function llm(input: string) {
expect(
(alsInstance.getStore() as any)?.[_LC_CONTEXT_VARIABLES_KEY]?.foo
).toEqual("bar");
return input.repeat(2);
});
const str = traceable(async function* str(input: string) {
const response = input.split("").reverse();
for (const char of response) {
yield char;
}
expect(
(alsInstance.getStore() as any)?.[_LC_CONTEXT_VARIABLES_KEY]?.foo
).toEqual("bar");
});
const chain = traceable(
async function chain(input: string) {
expect(
(alsInstance.getStore() as any)?.[_LC_CONTEXT_VARIABLES_KEY]?.foo
).toEqual("bar");
const question = await llm(input);
let answer = "";
for await (const char of str(question)) {
answer += char;
}
return { question, answer };
},
{ client, tracingEnabled: true }
);
const result = await chain("Hello world");
expect(result).toEqual({
question: "Hello worldHello world",
answer: "dlrow olleHdlrow olleH",
});
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["chain:0", "llm:1", "str:2"],
edges: [
["chain:0", "llm:1"],
["chain:0", "str:2"],
],
});
expect(
(alsInstance.getStore() as any)?.[_LC_CONTEXT_VARIABLES_KEY]?.foo
).toEqual("bar");
done();
} catch (e) {
done(e);
}
}
);
});
test("trace circular input and output objects", async () => {
const { client, callSpy } = mockClient();
const a: Record<string, any> = {};
const b: Record<string, any> = {};
a.b = b;
b.a = a;
const llm = traceable(
async function foo(_: Record<string, any>) {
return a;
},
{ client, tracingEnabled: true }
);
const input = {
a,
a2: a,
normalParam: {
test: true,
},
};
await llm(input);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["foo:0"],
edges: [],
data: {
"foo:0": {
inputs: {
a: {
b: {
a: {
result: "[Circular]",
},
},
},
a2: {
b: {
a: {
result: "[Circular]",
},
},
},
normalParam: {
test: true,
},
},
outputs: {
b: {
a: {
result: "[Circular]",
},
},
},
},
},
});
});
test("passing run tree manually", async () => {
const { client, callSpy } = mockClient();
const child = traceable(
async (runTree: RunTree, depth = 0): Promise<number> => {
if (depth < 2) {
return child(runTree, depth + 1);
}
return 3;
},
{ name: "child" }
);
const parent = traceable(
async function parent(runTree: RunTree) {
const first = await child(runTree);
const second = await child(runTree);
return first + second;
},
{ client, tracingEnabled: true }
);
await parent(ROOT);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"parent:0",
"child:1",
"child:2",
"child:3",
"child:4",
"child:5",
"child:6",
],
edges: [
["parent:0", "child:1"],
["child:1", "child:2"],
["child:2", "child:3"],
["parent:0", "child:4"],
["child:4", "child:5"],
["child:5", "child:6"],
],
});
});
describe("distributed tracing", () => {
it("default", async () => {
const { client, callSpy } = mockClient();
const child = traceable(
async (depth = 0): Promise<number> => {
if (depth < 2) return child(depth + 1);
return 3;
},
{ name: "child" }
);
const parent = traceable(async function parent() {
const first = await child();
const second = await child();
return first + second;
});
const clientRunTree = new RunTree({
name: "client",
client,
tracingEnabled: true,
});
await clientRunTree.postRun();
// do nothing with the client run tree
await clientRunTree.patchRun();
const response = await withRunTree(clientRunTree, () => parent());
expect(response).toBe(6);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"client:0",
"parent:1",
"child:2",
"child:3",
"child:4",
"child:5",
"child:6",
"child:7",
],
edges: [
["client:0", "parent:1"],
["parent:1", "child:2"],
["child:2", "child:3"],
["child:3", "child:4"],
["parent:1", "child:5"],
["child:5", "child:6"],
["child:6", "child:7"],
],
});
});
it("sync function", async () => {
const { client, callSpy } = mockClient();
const child = traceable(
async (depth = 0): Promise<number> => {
if (depth < 2) return child(depth + 1);
return 3;
},
{ name: "child" }
);
const parent = traceable(async function parent() {
const first = await child();
const second = await child();
return first + second;
});
const clientRunTree = new RunTree({
name: "client",
client,
tracingEnabled: true,
});
await clientRunTree.postRun();
await clientRunTree.patchRun();
let promiseOutside: Promise<unknown> = Promise.resolve();
const response = await withRunTree(clientRunTree, () => {
promiseOutside = parent();
});
expect(response).toBeUndefined();
await promiseOutside;
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"client:0",
"parent:1",
"child:2",
"child:3",
"child:4",
"child:5",
"child:6",
"child:7",
],
edges: [
["client:0", "parent:1"],
["parent:1", "child:2"],
["child:2", "child:3"],
["child:3", "child:4"],
["parent:1", "child:5"],
["child:5", "child:6"],
["child:6", "child:7"],
],
});
});
});
describe("async generators", () => {
test("success", async () => {
const { client, callSpy } = mockClient();
const iterableTraceable = traceable(
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
},
{ client, tracingEnabled: true }
);
const numbers: number[] = [];
for await (const num of iterableTraceable()) {
numbers.push(num);
}
expect(numbers).toEqual([0, 1, 2, 3, 4]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["giveMeNumbers:0"],
edges: [],
data: {
"giveMeNumbers:0": {
outputs: { outputs: [0, 1, 2, 3, 4] },
},
},
});
});
test("error", async () => {
const { client, callSpy } = mockClient();
const throwTraceable = traceable(
async function* () {
for (let i = 0; i < 5; i++) {
yield i;
if (i === 2) throw new Error("I am bad");
}
},
{ name: "throwTraceable", client, tracingEnabled: true }
);
await expect(async () => {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of throwTraceable()) {
// pass
}
}).rejects.toThrow("I am bad");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["throwTraceable:0"],
edges: [],
data: {
"throwTraceable:0": {
error: "Error: I am bad",
outputs: { outputs: [0, 1, 2] },
},
},
});
});
test("break", async () => {
const { client, callSpy } = mockClient();
const iterableTraceable = traceable(
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
},
{ client, tracingEnabled: true }
);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of iterableTraceable()) {
break;
}
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["giveMeNumbers:0"],
edges: [],
data: {
"giveMeNumbers:0": {
outputs: { outputs: [0] },
error: "Cancelled",
},
},
});
});
// https://github.com/nodejs/node/issues/42237
test("nested invocation", async () => {
const { client, callSpy } = mockClient();
const child = traceable(
async function* child() {
for (let i = 0; i < 5; i++) yield i;
},
{ name: "child", client, tracingEnabled: true }
);
const parent = traceable(
async function* parent() {
for await (const num of child()) yield num;
for await (const num of child()) yield 4 - num;
},
{ name: "parent", client, tracingEnabled: true }
);
const numbers: number[] = [];
for await (const num of parent()) {
numbers.push(num);
}
expect(numbers).toEqual([0, 1, 2, 3, 4, 4, 3, 2, 1, 0]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parent:0", "child:1", "child:2"],
edges: [
["parent:0", "child:1"],
["parent:0", "child:2"],
],
});
});
test("in promise success", async () => {
const { client, callSpy } = mockClient();
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
return giveMeNumbers();
}
const it = traceable(giveMeGiveMeNumbers, { client, tracingEnabled: true });
const numbers: number[] = [];
for await (const num of await it()) {
numbers.push(num);
}
expect(numbers).toEqual([0, 1, 2, 3, 4]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["giveMeGiveMeNumbers:0"],
edges: [],
data: {
"giveMeGiveMeNumbers:0": {
outputs: { outputs: [0, 1, 2, 3, 4] },
},
},
});
});
test("in promise error", async () => {
const { client, callSpy } = mockClient();
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
if (i === 2) throw new Error("I am bad");
}
}
return giveMeNumbers();
}
const it = traceable(giveMeGiveMeNumbers, { client, tracingEnabled: true });
await expect(async () => {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of await it()) {
// pass
}
}).rejects.toThrow("I am bad");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["giveMeGiveMeNumbers:0"],
edges: [],
data: {
"giveMeGiveMeNumbers:0": {
error: "Error: I am bad",
outputs: { outputs: [0, 1, 2] },
},
},
});
});
test("in promise break", async () => {
const { client, callSpy } = mockClient();
async function giveMeGiveMeNumbers() {
async function* giveMeNumbers() {
for (let i = 0; i < 5; i++) {
yield i;
}
}
return giveMeNumbers();
}
const it = traceable(giveMeGiveMeNumbers, { client, tracingEnabled: true });
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of await it()) {
break;
}
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["giveMeGiveMeNumbers:0"],
edges: [],
data: {
"giveMeGiveMeNumbers:0": {
outputs: { outputs: [0] },
error: "Cancelled",
},
},
});
});
// https://github.com/nodejs/node/issues/42237
test("in promise nested invocation", async () => {
const { client, callSpy } = mockClient();
const child = traceable(async function child() {
async function* child() {
for (let i = 0; i < 5; i++) yield i;
}
return child();
});
async function parent() {
async function* parent() {
for await (const num of await child()) yield num;
for await (const num of await child()) yield 4 - num;
}
return parent();
}
const it = traceable(parent, { client, tracingEnabled: true });
const numbers: number[] = [];
for await (const num of await it()) {
numbers.push(num);
}
expect(numbers).toEqual([0, 1, 2, 3, 4, 4, 3, 2, 1, 0]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parent:0", "child:1", "child:2"],
edges: [
["parent:0", "child:1"],
["parent:0", "child:2"],
],
});
});
test("readable stream", async () => {
const { client, callSpy } = mockClient();
const stream = traceable(
async function stream() {
const readStream = new ReadableStream({
async pull(controller) {
for (let i = 0; i < 5; i++) {
controller.enqueue(i);
}
controller.close();
},
});
return readStream;
},
{ client, tracingEnabled: true }
);
const numbers: number[] = [];
for await (const num of (await stream()) as unknown as AsyncGenerator<number>) {
numbers.push(num);
}
expect(numbers).toEqual([0, 1, 2, 3, 4]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["stream:0"],
edges: [],
data: {
"stream:0": {
outputs: { outputs: [0, 1, 2, 3, 4] },
},
},
});
});
test("iterable with props", async () => {
const { client, callSpy } = mockClient();
const iterableTraceable = traceable(
function iterableWithProps() {
return {
*[Symbol.asyncIterator]() {
yield 0;
},
prop: "value",
};
},
{
client,
tracingEnabled: true,
}
);
const numbers: number[] = [];
const iterableWithProps = await iterableTraceable();
for await (const num of iterableWithProps) {
numbers.push(num);
}
expect(numbers).toEqual([0]);
expect(iterableWithProps.prop).toBe("value");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["iterableWithProps:0"],
edges: [],
data: {
"iterableWithProps:0": {
outputs: { outputs: [0] },
},
},
});
});
});
describe("deferred input", () => {
test("generator", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function* parrotStream(input: Generator<string>) {
for (const token of input) {
yield token;
}
},
{ client, tracingEnabled: true }
);
const inputGenerator = function* () {
for (const token of "Hello world".split(" ")) {
yield token;
}
};
const tokens: string[] = [];
for await (const token of parrotStream(inputGenerator())) {
tokens.push(token);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: ["Hello", "world"] },
outputs: { outputs: ["Hello", "world"] },
},
},
});
});
test("async generator", async () => {
const { client, callSpy } = mockClient();
const inputStream = async function* inputStream() {
for (const token of "Hello world".split(" ")) {
yield token;
}
};
const parrotStream = traceable(
async function* parrotStream(input: AsyncGenerator<string>) {
for await (const token of input) {
yield token;
}
},
{ client, tracingEnabled: true }
);
const tokens: string[] = [];
for await (const token of parrotStream(inputStream())) {
tokens.push(token);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: ["Hello", "world"] },
outputs: { outputs: ["Hello", "world"] },
},
},
});
});
test("readable stream", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function* parrotStream(input: ReadableStream<string>) {
for await (const token of input as unknown as AsyncGenerator<string>) {
yield token;
}
},
{ client, tracingEnabled: true }
);
const readStream = new ReadableStream({
async start(controller) {
for (const token of "Hello world".split(" ")) {
controller.enqueue(token);
}
controller.close();
},
});
const tokens: string[] = [];
for await (const token of parrotStream(readStream)) {
tokens.push(token);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: ["Hello", "world"] },
outputs: { outputs: ["Hello", "world"] },
},
},
});
});
test("readable stream reader", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function* parrotStream(input: ReadableStream<string>) {
const reader = input.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
yield value;
}
} finally {
reader.releaseLock();
}
},
{ client, tracingEnabled: true }
);
const readStream = new ReadableStream({
async start(controller) {
for (const token of "Hello world".split(" ")) {
controller.enqueue(token);
}
controller.close();
},
});
const tokens: string[] = [];
for await (const token of parrotStream(readStream)) {
tokens.push(token);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: ["Hello", "world"] },
outputs: { outputs: ["Hello", "world"] },
},
},
});
});
test("promise", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function* parrotStream(input: Promise<string[]>) {
// eslint-disable-next-line no-instanceof/no-instanceof
if (!(input instanceof Promise)) {
throw new Error("Input must be a promise");
}
for (const token of await input) {
yield token;
}
},
{ client, tracingEnabled: true }
);
const tokens: string[] = [];
for await (const token of parrotStream(
Promise.resolve(["Hello", "world"])
)) {
tokens.push(token);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: ["Hello", "world"] },
outputs: { outputs: ["Hello", "world"] },
},
},
});
});
test("promise rejection", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function parrotStream(input: Promise<string[]>) {
return await input;
},
{ client, tracingEnabled: true }
);
await expect(async () => {
await parrotStream(Promise.reject(new Error("Rejected!")));
}).rejects.toThrow("Rejected!");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: { error: {} } },
error: "Error: Rejected!",
},
},
});
});
test("promise rejection, callback handling", async () => {
const { client, callSpy } = mockClient();
const parrotStream = traceable(
async function parrotStream(input: Promise<string[]>) {
return input.then((value) => value);
},
{ client, tracingEnabled: true }
);
await expect(async () => {
await parrotStream(Promise.reject(new Error("Rejected!")));
}).rejects.toThrow("Rejected!");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["parrotStream:0"],
edges: [],
data: {
"parrotStream:0": {
inputs: { input: { error: {} } },
error: "Error: Rejected!",
},
},
});
});
});
describe("generator", () => {
function gatherAll(iterator: Iterator<unknown>) {
const chunks: unknown[] = [];
// eslint-disable-next-line no-constant-condition
while (true) {
const next = iterator.next();
chunks.push(next.value);
if (next.done) break;
}
return chunks;
}
test("yield", async () => {
const { client, callSpy } = mockClient();
function* generator() {
for (let i = 0; i < 3; ++i) yield i;
}
const traced = traceable(generator, { client, tracingEnabled: true });
expect(gatherAll(await traced())).toEqual(gatherAll(generator()));
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["generator:0"],
edges: [],
data: {
"generator:0": {
outputs: { outputs: [0, 1, 2] },
},
},
});
});
test("with return", async () => {
const { client, callSpy } = mockClient();
function* generator() {
for (let i = 0; i < 3; ++i) yield i;
return 3;
}
const traced = traceable(generator, { client, tracingEnabled: true });
expect(gatherAll(await traced())).toEqual(gatherAll(generator()));
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["generator:0"],
edges: [],
data: { "generator:0": { outputs: { outputs: [0, 1, 2, 3] } } },
});
});
test("nested", async () => {
const { client, callSpy } = mockClient();
function* generator() {
function* child() {
for (let i = 0; i < 3; ++i) yield i;
}
for (let i = 0; i < 2; ++i) {
for (const num of child()) yield num;
}
return 3;
}
const traced = traceable(generator, { client, tracingEnabled: true });
expect(gatherAll(await traced())).toEqual(gatherAll(generator()));
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["generator:0"],
edges: [],
data: {
"generator:0": {
outputs: { outputs: [0, 1, 2, 0, 1, 2, 3] },
},
},
});
});
});
test("metadata", async () => {
const { client, callSpy } = mockClient();
const main = traceable(async (): Promise<number> => 42, {
client,
name: "main",
metadata: { customValue: "hello" },
tracingEnabled: true,
});
await main();
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["main:0"],
edges: [],
data: {
"main:0": {
extra: { metadata: { customValue: "hello" } },
outputs: { outputs: 42 },
},
},
});
});
test("argsConfigPath", async () => {
const { client, callSpy } = mockClient();
const main = traceable(
async (
value: number,
options: {
suffix: string;
langsmithExtra?: Partial<RunTreeConfig>;
}
): Promise<string> => `${value}${options.suffix}`,
{
client,
name: "main",
argsConfigPath: [1, "langsmithExtra"],
tracingEnabled: true,
}
);
await main(1, {
suffix: "hello",
langsmithExtra: {
name: "renamed",
tags: ["tag1", "tag2"],
metadata: { customValue: "hello" },
},
});
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["renamed:0"],
edges: [],
data: {
"renamed:0": {
extra: { metadata: { customValue: "hello" } },
tags: ["tag1", "tag2"],
inputs: {
args: [1, { suffix: "hello" }],
},
outputs: { outputs: "1hello" },
},
},
});
});
test("traceable continues execution when client throws error", async () => {
const errorClient = {
createRun: jest
.fn()
.mockRejectedValue(new Error("Expected test client error") as never),
updateRun: jest
.fn()
.mockRejectedValue(new Error("Expected test client error") as never),
};
const tracedFunction = traceable(
async (value: number): Promise<number> => value * 2,
{
client: errorClient as unknown as Client,
name: "errorTest",
tracingEnabled: true,
}
);
const result = await tracedFunction(5);
expect(result).toBe(10);
expect(errorClient.createRun).toHaveBeenCalled();
expect(errorClient.updateRun).toHaveBeenCalled();
});
test("traceable with processInputs", async () => {
const { client, callSpy } = mockClient();
const processInputs = jest.fn((inputs: Readonly<KVMap>) => {
return { ...inputs, password: "****" };
});
const func = traceable(
async function func(input: { username: string; password: string }) {
// The function should receive the original inputs
expect(input.password).toBe("secret");
return `Welcome, ${input.username}`;
},
{
client,
tracingEnabled: true,
processInputs,
}
);
await func({ username: "user1", password: "secret" });
expect(processInputs).toHaveBeenCalledWith({
username: "user1",
password: "secret",
});
// Verify that the logged inputs have the password masked
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
inputs: {
username: "user1",
password: "****",
},
outputs: { outputs: "Welcome, user1" },
},
},
});
});
test("traceable with processOutputs", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((_outputs: Readonly<KVMap>) => {
return { outputs: "Modified Output" };
});
const func = traceable(
async function func(input: string) {
return `Original Output for ${input}`;
},
{
client,
tracingEnabled: true,
processOutputs,
}
);
const result = await func("test");
expect(processOutputs).toHaveBeenCalledWith({
outputs: "Original Output for test",
});
expect(result).toBe("Original Output for test");
// Verify that the tracing data shows the modified output
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
inputs: { input: "test" },
outputs: { outputs: "Modified Output" },
},
},
});
});
test("traceable with processInputs throwing error does not affect invocation", async () => {
const { client, callSpy } = mockClient();
const processInputs = jest.fn((_inputs: Readonly<KVMap>) => {
throw new Error("totally expected test processInputs error");
});
const func = traceable(
async function func(input: { username: string }) {
// This should not be called
return `Hello, ${input.username}`;
},
{
client,
tracingEnabled: true,
processInputs,
}
);
const result = await func({ username: "user1" });
expect(processInputs).toHaveBeenCalledWith({ username: "user1" });
expect(result).toBe("Hello, user1");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
inputs: { username: "user1" },
outputs: { outputs: "Hello, user1" },
},
},
});
});
test("traceable with processOutputs throwing error does not affect invocation", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((_outputs: Readonly<KVMap>) => {
throw new Error("totally expected test processInputs error");
});
const func = traceable(
async function func(input: string) {
return `Original Output for ${input}`;
},
{
client,
tracingEnabled: true,
processOutputs,
}
);
const result = await func("test");
expect(processOutputs).toHaveBeenCalledWith({
outputs: "Original Output for test",
});
expect(result).toBe("Original Output for test");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
inputs: { input: "test" },
outputs: { outputs: "Original Output for test" },
},
},
});
});
test("traceable async generator with processOutputs", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((outputs: Readonly<KVMap>) => {
return { outputs: outputs.outputs.map((output: number) => output * 2) };
});
const func = traceable(
async function* func() {
for (let i = 1; i <= 3; i++) {
yield i;
}
},
{
client,
tracingEnabled: true,
processOutputs,
}
);
const results: number[] = [];
for await (const value of func()) {
results.push(value);
}
expect(results).toEqual([1, 2, 3]); // Original values
expect(processOutputs).toHaveBeenCalledWith({ outputs: [1, 2, 3] });
// Tracing data should reflect the processed outputs
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
outputs: { outputs: [2, 4, 6] }, // Processed outputs
},
},
});
});
test("traceable function returning object with async iterable and processOutputs", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((outputs: Readonly<KVMap>) => {
return { outputs: outputs.outputs.map((output: number) => output * 2) };
});
const func = traceable(
async function func() {
return {
data: "some data",
stream: (async function* () {
for (let i = 1; i <= 3; i++) {
yield i;
}
})(),
};
},
{
client,
tracingEnabled: true,
processOutputs,
__finalTracedIteratorKey: "stream",
}
);
const result = await func();
expect(result.data).toBe("some data");
const results: number[] = [];
for await (const value of result.stream) {
results.push(value);
}
expect(results).toEqual([1, 2, 3]);
expect(processOutputs).toHaveBeenCalledWith({ outputs: [1, 2, 3] });
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
outputs: { outputs: [2, 4, 6] },
},
},
});
});
test("traceable generator function with processOutputs", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((outputs: Readonly<KVMap>) => {
return { outputs: outputs.outputs.map((output: number) => output * 2) };
});
function* func() {
for (let i = 1; i <= 3; i++) {
yield i;
}
}
const tracedFunc = traceable(func, {
client,
tracingEnabled: true,
processOutputs,
});
const results: number[] = [];
for (const value of await tracedFunc()) {
results.push(value);
}
expect(results).toEqual([1, 2, 3]);
expect(processOutputs).toHaveBeenCalledWith({ outputs: [1, 2, 3] });
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
outputs: { outputs: [2, 4, 6] },
},
},
});
});
test("traceable with complex outputs", async () => {
const { client, callSpy } = mockClient();
const processOutputs = jest.fn((outputs: Readonly<KVMap>) => {
return { data: "****", output: outputs.output, nested: outputs.nested };
});
const func = traceable(
async function func(input: string) {
return {
data: "some sensitive data",
output: `Original Output for ${input}`,
nested: {
key: "value",
nestedOutput: `Nested Output for ${input}`,
},
};
},
{
client,
tracingEnabled: true,
processOutputs,
}
);
const result = await func("test");
expect(result).toEqual({
data: "some sensitive data",
output: "Original Output for test",
nested: {
key: "value",
nestedOutput: "Nested Output for test",
},
});
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["func:0"],
edges: [],
data: {
"func:0": {
inputs: { input: "test" },
outputs: {
data: "****",
output: "Original Output for test",
nested: {
key: "value",
nestedOutput: "Nested Output for test",
},
},
},
},
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/run_trees.int.test.ts | import { Client } from "../client.js";
import * as uuid from "uuid";
import {
RunTree,
RunTreeConfig,
convertToDottedOrderFormat,
} from "../run_trees.js";
import {
toArray,
waitUntil,
pollRunsUntilCount,
sanitizePresignedUrls,
} from "./utils.js";
test.concurrent(
"Test post and patch run",
async () => {
const projectName = `__test_run_tree_js ${uuid.v4()}`;
const langchainClient = new Client({ timeout_ms: 30_000 });
const parentRunConfig: RunTreeConfig = {
name: "parent_run",
run_type: "chain",
inputs: { text: "hello world" },
project_name: projectName,
serialized: {},
client: langchainClient,
};
const parent_run = new RunTree(parentRunConfig);
expect(parent_run.trace_id).toEqual(parent_run.id);
expect(parent_run.dotted_order).toEqual(
convertToDottedOrderFormat(parent_run.start_time, parent_run.id)
);
await parent_run.postRun();
const child_llm_run = parent_run.createChild({
name: "child_run",
run_type: "llm",
inputs: { text: "hello world" },
});
expect(child_llm_run.dotted_order).toEqual(
parent_run.dotted_order +
"." +
convertToDottedOrderFormat(
child_llm_run.start_time,
child_llm_run.id,
2
)
);
expect(child_llm_run.trace_id).toEqual(parent_run.trace_id);
await child_llm_run.postRun();
const child_chain_run = await parent_run.createChild({
name: "child_chain_run",
run_type: "chain",
inputs: { text: "hello world" },
});
await child_chain_run.postRun();
const grandchild_chain_run = await child_chain_run.createChild({
name: "grandchild_chain_run",
run_type: "chain",
inputs: { text: "hello world" },
});
await grandchild_chain_run.postRun();
await grandchild_chain_run.end({ output: ["Hi"] });
await grandchild_chain_run.patchRun();
await child_chain_run.end(undefined, "AN ERROR");
await child_chain_run.patchRun();
const child_tool_run = await parent_run.createChild({
name: "child_tool_run",
run_type: "tool",
inputs: { text: "hello world" },
});
await child_tool_run.postRun();
await child_tool_run.end({ output: ["Hi"] });
await child_tool_run.patchRun();
await child_llm_run.end({ prompts: ["hello world"] });
await child_llm_run.patchRun();
await parent_run.end({ output: ["Hi"] });
await parent_run.patchRun();
await waitUntil(
async () => {
try {
const runs = await toArray(langchainClient.listRuns({ projectName }));
return runs.length === 5;
} catch (e) {
return false;
}
},
30_000, // Wait up to 30 seconds
3000 // every 3 second
);
const runs = await toArray(langchainClient.listRuns({ projectName }));
expect(runs.length).toEqual(5);
const runMap = new Map(runs.map((run) => [run.name, run]));
expect(runMap.get("child_run")?.parent_run_id).toEqual(
runMap.get("parent_run")?.id
);
expect(runMap.get("child_chain_run")?.parent_run_id).toEqual(
runMap.get("parent_run")?.id
);
expect(runMap.get("grandchild_chain_run")?.parent_run_id).toEqual(
runMap.get("child_chain_run")?.id
);
expect(runMap.get("child_tool_run")?.parent_run_id).toEqual(
runMap.get("parent_run")?.id
);
expect(runMap.get("parent_run")?.parent_run_id).toBeNull();
await waitUntil(
async () => {
try {
const runs_ = await toArray(
langchainClient.listRuns({ traceId: runs[0].trace_id })
);
return runs_.length === 5;
} catch (e) {
return false;
}
},
30_000, // Wait up to 30 seconds
3000 // every 3 second
);
const traceRunsIter = langchainClient.listRuns({
traceId: runs[0].trace_id,
});
const traceRuns = await toArray(traceRunsIter);
expect(traceRuns.length).toEqual(5);
// Sort by dotted order and assert runs lists are equal
const sortedRuns = sanitizePresignedUrls(
runs.sort((a, b) =>
(a?.dotted_order ?? "").localeCompare(b?.dotted_order ?? "")
)
);
const sortedTraceRuns = sanitizePresignedUrls(
traceRuns.sort((a, b) =>
(a?.dotted_order ?? "").localeCompare(b?.dotted_order ?? "")
)
);
expect(sortedRuns).toEqual(sortedTraceRuns);
await langchainClient.deleteProject({ projectName });
},
120_000
);
test.concurrent(
"Test list runs multi project",
async () => {
const projectNames = [
"__My JS Tracer Project - test_list_runs_multi_project",
"__My JS Tracer Project - test_list_runs_multi_project2",
];
try {
const langchainClient = new Client({ timeout_ms: 30000 });
for (const project of projectNames) {
if (await langchainClient.hasProject({ projectName: project })) {
await langchainClient.deleteProject({ projectName: project });
}
}
const parentRunConfig: RunTreeConfig = {
name: "parent_run",
inputs: { text: "hello world" },
project_name: projectNames[0],
client: langchainClient,
};
const parent_run = new RunTree(parentRunConfig);
await parent_run.postRun();
await parent_run.end({ output: "Completed: foo" });
await parent_run.patchRun();
const parentRunConfig2: RunTreeConfig = {
name: "parent_run",
inputs: { text: "hello world" },
project_name: projectNames[1],
client: langchainClient,
};
const parent_run2 = new RunTree(parentRunConfig2);
await parent_run2.postRun();
await parent_run2.end({ output: "Completed: foo" });
await parent_run2.patchRun();
await pollRunsUntilCount(langchainClient, projectNames[0], 1);
await pollRunsUntilCount(langchainClient, projectNames[1], 1);
const runsIter = langchainClient.listRuns({
projectName: projectNames,
});
const runs = await toArray(runsIter);
expect(runs.length).toBe(2);
expect(
runs.every((run) => run?.outputs?.["output"] === "Completed: foo")
).toBe(true);
expect(runs[0].session_id).not.toBe(runs[1].session_id);
} finally {
const langchainClient = new Client();
for (const project of projectNames) {
if (await langchainClient.hasProject({ projectName: project })) {
try {
await langchainClient.deleteProject({ projectName: project });
} catch (e) {
// Pass
}
}
}
}
},
120_000
);
test.concurrent(
"Test end() write to metadata",
async () => {
const runId = uuid.v4();
const projectName = `__test_end_metadata_run_tree_js ${runId}`;
const langchainClient = new Client({ timeout_ms: 30_000 });
const parentRunConfig: RunTreeConfig = {
name: "parent_run",
id: runId,
run_type: "chain",
project_name: projectName,
client: langchainClient,
};
const parentRun = new RunTree(parentRunConfig);
await parentRun.end({ output: ["Hi"] }, undefined, undefined, {
final_metadata: runId,
});
await parentRun.postRun();
await pollRunsUntilCount(langchainClient, projectName, 1);
const runs = await toArray(langchainClient.listRuns({ id: [runId] }));
expect(runs.length).toEqual(1);
expect(runs[0].extra);
await langchainClient.deleteProject({ projectName });
},
120_000
);
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/experiment_manager.int.test.ts | import { _ExperimentManager } from "../evaluation/_runner.js";
import { Client } from "../index.js";
import { v4 as uuidv4 } from "uuid";
const TESTING_DATASET_NAME = `test_experiment_manager_${uuidv4()}`;
beforeAll(async () => {
const client = new Client();
if (!(await client.hasDataset({ datasetName: TESTING_DATASET_NAME }))) {
await client.createDataset(TESTING_DATASET_NAME, {
description: "For testing pruposes",
});
await client.createExamples({
inputs: [{ input: 1 }, { input: 2 }],
outputs: [{ output: 2 }, { output: 3 }],
datasetName: TESTING_DATASET_NAME,
});
}
});
afterAll(async () => {
const client = new Client();
await client.deleteDataset({ datasetName: TESTING_DATASET_NAME });
});
describe("experiment manager", () => {
test("can recover from collisions", async () => {
const client = new Client();
const ds = await client.readDataset({ datasetName: TESTING_DATASET_NAME });
const manager = await new _ExperimentManager({
data: TESTING_DATASET_NAME,
client,
numRepetitions: 1,
});
const experimentName = manager._experimentName;
await client.createProject({
projectName: experimentName,
referenceDatasetId: ds.id,
});
await manager.start();
expect(manager._experimentName).not.toEqual(experimentName);
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/wrapped_openai.int.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable no-process-env */
import { jest } from "@jest/globals";
import { OpenAI } from "openai";
import { wrapOpenAI } from "../wrappers/index.js";
import { Client } from "../client.js";
import { mockClient } from "./utils/mock_client.js";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
import { zodResponseFormat } from "openai/helpers/zod";
import { z } from "zod";
import { UsageMetadata } from "../schemas.js";
import fs from "fs";
test("wrapOpenAI should return type compatible with OpenAI", async () => {
let originalClient = new OpenAI();
// eslint-disable-next-line @typescript-eslint/no-unused-vars
originalClient = wrapOpenAI(originalClient);
expect(true).toBe(true);
});
test.concurrent("chat.completions", async () => {
const client = new Client({ autoBatchTracing: false });
const callSpy = jest
.spyOn((client as any).caller, "call")
.mockResolvedValue({ ok: true, text: () => "" });
const originalClient = new OpenAI();
const patchedClient = wrapOpenAI(new OpenAI(), { client });
// invoke
const original = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
});
const patched = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
});
expect(patched.choices).toEqual(original.choices);
const response = await patchedClient.chat.completions
.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
})
.asResponse();
expect(response.ok).toBe(true);
// stream
const originalStream = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
const originalChoices: unknown[] = [];
for await (const chunk of originalStream) {
originalChoices.push(chunk.choices);
}
const patchedStream = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
const patchedChoices: unknown[] = [];
for await (const chunk of patchedStream) {
patchedChoices.push(chunk.choices);
// @ts-expect-error Should type check streamed output
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const _test = chunk.invalidPrompt;
}
expect(patchedChoices).toEqual(originalChoices);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
callSpy.mockClear();
const patchedStreamToBreak = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'hello world hello again'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of patchedStreamToBreak) {
console.log(_);
break;
}
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
callSpy.mockClear();
const patchedStreamWithMetadata = await patchedClient.chat.completions.create(
{
messages: [{ role: "user", content: `Say 'foo'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
},
{
langsmithExtra: {
metadata: {
thing1: "thing2",
},
},
}
);
const patchedChoices2: unknown[] = [];
for await (const chunk of patchedStreamWithMetadata) {
patchedChoices2.push(chunk.choices);
// @ts-expect-error Should type check streamed output
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const _test = chunk.invalidPrompt;
}
expect(patchedChoices2).toEqual(originalChoices);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
callSpy.mockClear();
});
test.concurrent("chat completions with tool calling", async () => {
const client = new Client({ autoBatchTracing: false });
const callSpy = jest
// eslint-disable-next-line @typescript-eslint/no-explicit-any
.spyOn((client as any).caller, "call")
.mockResolvedValue({ ok: true, text: () => "" });
const originalClient = new OpenAI();
const patchedClient = wrapOpenAI(new OpenAI(), { client });
const removeToolCallId = (
choices:
| OpenAI.ChatCompletion.Choice[]
| OpenAI.ChatCompletionChunk.Choice[][]
) => {
if (Array.isArray(choices[0])) {
return (choices as OpenAI.ChatCompletionChunk.Choice[][]).map(
(choices) => {
return choices.map((choice) => {
choice.delta.tool_calls = choice.delta.tool_calls?.map(
(toolCall) => {
const { id, ...rest } = toolCall;
return rest;
}
) as any;
return choice;
});
}
);
} else {
return (choices as OpenAI.ChatCompletion.Choice[]).map((choice) => {
choice.message.tool_calls = choice.message.tool_calls?.map(
(toolCall) => {
const { id, ...rest } = toolCall;
return rest;
}
) as any;
return choice;
});
}
};
const toolDefinition = [
{
type: "function" as const,
function: {
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and only the city, e.g. San Francisco",
},
},
required: ["location"],
},
},
},
];
// invoke
const original = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `What is the current weather in SF?` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
tools: toolDefinition,
tool_choice: {
type: "function",
function: { name: "get_current_weather" },
},
});
const patched = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `What is the current weather in SF?` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
tools: toolDefinition,
tool_choice: {
type: "function",
function: { name: "get_current_weather" },
},
});
expect(removeToolCallId(patched.choices)).toEqual(
removeToolCallId(original.choices)
);
// stream
const originalStream = await originalClient.chat.completions.create({
messages: [{ role: "user", content: `What is the current weather in SF?` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
tools: toolDefinition,
tool_choice: {
type: "function",
function: { name: "get_current_weather" },
},
stream: true,
});
const originalChoices: any[] = [];
for await (const chunk of originalStream) {
originalChoices.push(chunk.choices);
}
const patchedStream = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `What is the current weather in SF?` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
tools: toolDefinition,
tool_choice: {
type: "function",
function: { name: "get_current_weather" },
},
stream: true,
});
const patchedChoices: any[] = [];
for await (const chunk of patchedStream) {
patchedChoices.push(chunk.choices);
// @ts-expect-error Should type check streamed output
const _test = chunk.invalidPrompt;
}
expect(removeToolCallId(patchedChoices)).toEqual(
removeToolCallId(originalChoices)
);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
callSpy.mockClear();
const patchedStream2 = await patchedClient.chat.completions.create(
{
messages: [
{ role: "user", content: `What is the current weather in SF?` },
],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
tools: toolDefinition,
tool_choice: {
type: "function",
function: { name: "get_current_weather" },
},
stream: true,
},
{
langsmithExtra: {
metadata: {
thing1: "thing2",
},
},
}
);
const patchedChoices2: any[] = [];
for await (const chunk of patchedStream2) {
patchedChoices2.push(chunk.choices);
// @ts-expect-error Should type check streamed output
const _test = chunk.invalidPrompt;
}
expect(removeToolCallId(patchedChoices2)).toEqual(
removeToolCallId(originalChoices)
);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(JSON.parse((call[2] as any).body).extra.metadata).toEqual({
thing1: "thing2",
ls_model_name: "gpt-3.5-turbo",
ls_model_type: "chat",
ls_provider: "openai",
ls_temperature: 0,
});
}
callSpy.mockClear();
});
test.concurrent("completions", async () => {
const client = new Client({ autoBatchTracing: false });
const callSpy = jest
.spyOn((client as any).caller, "call")
.mockResolvedValue({ ok: true, text: () => "" });
const originalClient = new OpenAI();
const patchedClient = wrapOpenAI(new OpenAI(), { client });
const prompt = `Say 'Hi I'm ChatGPT' then stop.`;
// invoke
const original = await originalClient.completions.create({
prompt,
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo-instruct",
});
const patched = await patchedClient.completions.create({
prompt,
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo-instruct",
});
expect(patched.choices).toEqual(original.choices);
// stream
const originalStream = await originalClient.completions.create({
prompt,
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo-instruct",
stream: true,
});
const originalChoices: unknown[] = [];
for await (const chunk of originalStream) {
originalChoices.push(chunk.choices);
// @ts-expect-error Should type check streamed output
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const _test = chunk.invalidPrompt;
}
const patchedStream = await patchedClient.completions.create({
prompt,
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo-instruct",
stream: true,
});
const patchedChoices: unknown[] = [];
for await (const chunk of patchedStream) {
patchedChoices.push(chunk.choices);
// @ts-expect-error Should type check streamed output
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const _test = chunk.invalidPrompt;
}
expect(patchedChoices).toEqual(originalChoices);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
const patchedStream2 = await patchedClient.completions.create(
{
prompt,
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo-instruct",
stream: true,
},
{
langsmithExtra: {
metadata: {
thing1: "thing2",
},
},
}
);
const patchedChoices2: unknown[] = [];
for await (const chunk of patchedStream2) {
patchedChoices2.push(chunk.choices);
// @ts-expect-error Should type check streamed output
const _test = chunk.invalidPrompt;
}
expect(patchedChoices2).toEqual(originalChoices);
expect(callSpy.mock.calls.length).toBeGreaterThanOrEqual(1);
for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
}
});
test.skip("with initialization time config", async () => {
const patchedClient = wrapOpenAI(new OpenAI(), {
project_name: "alternate_project",
metadata: {
customKey: "customVal",
},
});
const patchedStream = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `What is the current weather in SF?` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
});
const patchedChoices: unknown[] = [];
for await (const chunk of patchedStream) {
patchedChoices.push(chunk.choices);
// @ts-expect-error Should type check streamed output
const _test = chunk.invalidPrompt;
}
console.log(patchedChoices);
});
test.skip("no tracing with env var unset", async () => {
process.env.LANGCHAIN_TRACING_V2 = undefined;
process.env.LANGSMITH_TRACING_V2 = undefined;
const patchedClient = wrapOpenAI(new OpenAI());
const patched = await patchedClient.chat.completions.create({
messages: [{ role: "user", content: `Say 'bazqux'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
});
expect(patched).toBeDefined();
console.log(patched);
});
test("wrapping same instance", async () => {
const wrapped = wrapOpenAI(new OpenAI());
expect(() => wrapOpenAI(wrapped)).toThrowError(
"This instance of OpenAI client has been already wrapped once."
);
});
test("chat.concurrent extra name", async () => {
const { client, callSpy } = mockClient();
const openai = wrapOpenAI(new OpenAI(), {
client,
});
await openai.chat.completions.create(
{
messages: [{ role: "user", content: `Say 'red'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
},
{ langsmithExtra: { name: "red", metadata: { customKey: "red" } } }
);
const stream = await openai.chat.completions.create(
{
messages: [{ role: "user", content: `Say 'green'` }],
temperature: 0,
seed: 42,
model: "gpt-3.5-turbo",
stream: true,
},
{ langsmithExtra: { name: "green", metadata: { customKey: "green" } } }
);
// eslint-disable-next-line @typescript-eslint/no-unused-vars
for await (const _ of stream) {
// pass
}
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["red:0", "green:1"],
edges: [],
data: {
"red:0": {
name: "red",
extra: { metadata: { customKey: "red" } },
outputs: {
choices: [
{ index: 0, message: { role: "assistant", content: "Red" } },
],
},
},
"green:1": {
name: "green",
extra: { metadata: { customKey: "green" } },
outputs: {
choices: [
{ index: 0, message: { role: "assistant", content: "Green" } },
],
},
},
},
});
});
test.concurrent("beta.chat.completions.parse", async () => {
const { client, callSpy } = mockClient();
const openai = wrapOpenAI(new OpenAI(), {
client,
});
await openai.beta.chat.completions.parse({
model: "gpt-4o-mini",
temperature: 0,
messages: [
{
role: "user",
content: "I am Jacob",
},
],
response_format: zodResponseFormat(
z.object({
name: z.string(),
}),
"name"
),
});
for (const call of callSpy.mock.calls) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(["POST", "PATCH"]).toContain((call[2] as any)["method"]);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
expect(JSON.parse((call[2] as any).body).extra.metadata).toEqual({
ls_model_name: "gpt-4o-mini",
ls_model_type: "chat",
ls_provider: "openai",
ls_temperature: 0,
});
}
callSpy.mockClear();
});
const usageMetadataTestCases = [
{
description: "stream",
params: {
model: "gpt-4o-mini",
messages: [{ role: "user", content: "howdy" }],
stream: true,
stream_options: { include_usage: true },
},
expectUsageMetadata: true,
},
{
description: "stream no usage",
params: {
model: "gpt-4o-mini",
messages: [{ role: "user", content: "howdy" }],
stream: true,
},
expectUsageMetadata: false,
},
{
description: "default",
params: {
model: "gpt-4o-mini",
messages: [{ role: "user", content: "howdy" }],
},
expectUsageMetadata: true,
},
{
description: "reasoning",
params: {
model: "o1-mini",
messages: [
{
role: "user",
content:
"Write a bash script that takes a matrix represented as a string with format '[1,2],[3,4],[5,6]' and prints the transpose in the same format.",
},
],
},
expectUsageMetadata: true,
checkReasoningTokens: true,
},
];
describe("Usage Metadata Tests", () => {
usageMetadataTestCases.forEach(
({ description, params, expectUsageMetadata, checkReasoningTokens }) => {
it(`should handle ${description}`, async () => {
const { client, callSpy } = mockClient();
const openai = wrapOpenAI(new OpenAI(), {
tracingEnabled: true,
client,
});
const requestParams = { ...params };
let oaiUsage: OpenAI.CompletionUsage | undefined;
if (requestParams.stream) {
const stream = await openai.chat.completions.create(
requestParams as OpenAI.ChatCompletionCreateParamsStreaming
);
for await (const chunk of stream) {
if (expectUsageMetadata && chunk.usage) {
oaiUsage = chunk.usage;
}
}
} else {
const res = await openai.chat.completions.create(
requestParams as OpenAI.ChatCompletionCreateParams
);
oaiUsage = (res as OpenAI.ChatCompletion).usage;
}
let usageMetadata: UsageMetadata | undefined;
const requestBodies: any = {};
for (const call of callSpy.mock.calls) {
const request = call[2] as any;
const requestBody = JSON.parse(request.body);
if (request.method === "POST") {
requestBodies["post"] = [requestBody];
}
if (request.method === "PATCH") {
requestBodies["patch"] = [requestBody];
}
if (requestBody.outputs && requestBody.outputs.usage_metadata) {
usageMetadata = requestBody.outputs.usage_metadata;
break;
}
}
if (expectUsageMetadata) {
expect(usageMetadata).not.toBeUndefined();
expect(usageMetadata).not.toBeNull();
expect(oaiUsage).not.toBeUndefined();
expect(oaiUsage).not.toBeNull();
expect(usageMetadata!.input_tokens).toEqual(oaiUsage!.prompt_tokens);
expect(usageMetadata!.output_tokens).toEqual(
oaiUsage!.completion_tokens
);
expect(usageMetadata!.total_tokens).toEqual(oaiUsage!.total_tokens);
if (checkReasoningTokens) {
expect(usageMetadata!.output_token_details).not.toBeUndefined();
expect(
usageMetadata!.output_token_details!.reasoning
).not.toBeUndefined();
expect(usageMetadata!.output_token_details!.reasoning).toEqual(
oaiUsage!.completion_tokens_details?.reasoning_tokens
);
}
} else {
expect(usageMetadata).toBeUndefined();
expect(oaiUsage).toBeUndefined();
}
if (process.env.WRITE_TOKEN_COUNTING_TEST_DATA === "1") {
fs.writeFileSync(
`${__dirname}/test_data/langsmith_js_wrap_openai_${description.replace(
" ",
"_"
)}.json`,
JSON.stringify(requestBodies, null, 2)
);
}
callSpy.mockClear();
});
}
);
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/utils.ts | import { Client } from "../client.js";
import { v4 as uuidv4 } from "uuid";
// eslint-disable-next-line import/no-extraneous-dependencies
import { faker } from "@faker-js/faker";
import { RunCreate } from "../schemas.js";
export async function toArray<T>(iterable: AsyncIterable<T>): Promise<T[]> {
const result: T[] = [];
for await (const item of iterable) {
result.push(item);
}
return result;
}
export async function waitUntil(
condition: () => Promise<boolean>,
timeout: number,
interval: number,
prefix?: string
): Promise<void> {
const start = Date.now();
while (Date.now() - start < timeout) {
try {
if (await condition()) {
return;
}
} catch (e) {
// Pass
}
await new Promise((resolve) => setTimeout(resolve, interval));
}
const elapsed = Date.now() - start;
throw new Error(
[prefix, `Timeout after ${elapsed / 1000}s`].filter(Boolean).join(": ")
);
}
export async function pollRunsUntilCount(
client: Client,
projectName: string,
count: number,
timeout?: number
): Promise<void> {
await waitUntil(
async () => {
try {
const runs = await toArray(client.listRuns({ projectName }));
return runs.length === count;
} catch (e) {
return false;
}
},
timeout ?? 120_000, // Wait up to 120 seconds
5000 // every 5 second
);
}
export async function deleteProject(
langchainClient: Client,
projectName: string
) {
try {
await langchainClient.readProject({ projectName });
await langchainClient.deleteProject({ projectName });
} catch (e) {
// Pass
}
}
export async function deleteDataset(
langchainClient: Client,
datasetName: string
) {
try {
const existingDataset = await langchainClient.readDataset({ datasetName });
await langchainClient.deleteDataset({ datasetId: existingDataset.id });
} catch (e) {
// Pass
}
}
export async function waitUntilRunFound(
client: Client,
runId: string,
checkOutputs = false
) {
return waitUntil(
async () => {
try {
const run = await client.readRun(runId);
if (checkOutputs) {
return (
run.outputs !== null &&
run.outputs !== undefined &&
Object.keys(run.outputs).length !== 0
);
}
return true;
} catch (e) {
return false;
}
},
30_000,
5_000,
`Waiting for run "${runId}"`
);
}
export async function waitUntilProjectFound(
client: Client,
projectName: string
) {
return waitUntil(
async () => {
try {
await client.readProject({ projectName });
return true;
} catch (e) {
return false;
}
},
10_000,
5_000,
`Waiting for project "${projectName}"`
);
}
export function sanitizePresignedUrls(payload: unknown) {
return JSON.parse(JSON.stringify(payload), (key, value) => {
if (key === "presigned_url") {
try {
const url = new URL(value);
url.searchParams.set("Signature", "[SIGNATURE]");
url.searchParams.set("Expires", "[EXPIRES]");
return url.toString();
} catch {
return value;
}
}
return value;
});
}
/**
* Factory which returns a list of `RunCreate` objects.
* @param {number} count Number of runs to create (default: 10)
* @returns {Array<RunCreate>} List of `RunCreate` objects
*/
export function createRunsFactory(
projectName: string,
count = 10
): Array<RunCreate> {
return Array.from({ length: count }).map((_, idx) => ({
id: uuidv4(),
name: `${idx}-${faker.lorem.words()}`,
run_type: faker.helpers.arrayElement(["tool", "chain", "llm", "retriever"]),
inputs: {
question: faker.lorem.sentence(),
},
outputs: {
answer: faker.lorem.sentence(),
},
project_name: projectName,
}));
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/few_shot.int.test.ts | import { KVMap, ExampleSearch } from "../schemas.js";
import { Client } from "../index.js";
import { v4 as uuidv4 } from "uuid";
const TESTING_DATASET_NAME = `test_dataset_few_shot_js_${uuidv4()}`;
test("few shot search", async () => {
const client = new Client();
const schema: KVMap = {
type: "object",
properties: {
name: { type: "string" },
},
required: ["name"],
additionalProperties: false,
};
const has_dataset = await client.hasDataset({
datasetName: TESTING_DATASET_NAME,
});
if (has_dataset === true) {
await client.deleteDataset({ datasetName: TESTING_DATASET_NAME });
}
const dataset = await client.createDataset(TESTING_DATASET_NAME, {
description:
"For testing purposed. Is created & deleted for each test run.",
inputsSchema: schema,
});
// create examples
const res = await client.createExamples({
inputs: [{ name: "foo" }, { name: "bar" }],
outputs: [{ output: 2 }, { output: 3 }],
metadata: [{ somekey: "somevalue" }, { somekey: "someothervalue" }],
datasetName: TESTING_DATASET_NAME,
});
if (res.length !== 2) {
throw new Error("Failed to create examples");
}
await client.indexDataset({ datasetId: dataset.id });
let i = 0;
let examples: ExampleSearch[] = [];
while (i < 10) {
examples = await client.similarExamples(
{ name: "foo" },
dataset.id,
// specify limit of 5 so you return all examples
5
);
if (examples.length === 2) {
break;
}
// sleep for one second
await new Promise((r) => setTimeout(r, 1000));
i++;
}
expect(examples.length).toBe(2);
expect(examples[0].inputs).toEqual({ name: "foo" });
expect(examples[1].inputs).toEqual({ name: "bar" });
const filtered_examples = await client.similarExamples(
{ name: "foo" },
dataset.id,
1,
{
filter: "eq(metadata.somekey, 'somevalue')",
}
);
expect(filtered_examples.length).toBe(1);
expect(filtered_examples[0].inputs).toEqual({ name: "foo" });
});
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/tests/traceable_langchain.test.ts | import { traceable } from "../traceable.js";
import { getAssumedTreeFromCalls } from "./utils/tree.js";
import { mockClient } from "./utils/mock_client.js";
import { FakeChatModel } from "@langchain/core/utils/testing";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
import { BaseMessage, HumanMessage } from "@langchain/core/messages";
import { awaitAllCallbacks } from "@langchain/core/callbacks/promises";
import { RunnableTraceable, getLangchainCallbacks } from "../langchain.js";
import { RunnableLambda } from "@langchain/core/runnables";
describe("to langchain", () => {
const llm = new FakeChatModel({});
const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([
["human", "{text}"],
]);
const parser = new StringOutputParser();
const chain = prompt.pipe(llm).pipe(parser);
test("invoke", async () => {
const { client, callSpy } = mockClient();
const main = traceable(
async (input: { text: string }) => {
return chain.invoke(input, {
callbacks: await getLangchainCallbacks(),
});
},
{
name: "main",
client,
tracingEnabled: true,
tags: ["welcome"],
metadata: { hello: "world" },
}
);
const result = await main({ text: "Hello world" });
expect(result).toEqual("Hello world");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"main:0",
"RunnableSequence:1",
"ChatPromptTemplate:2",
"FakeChatModel:3",
"StrOutputParser:4",
],
edges: [
["main:0", "RunnableSequence:1"],
["RunnableSequence:1", "ChatPromptTemplate:2"],
["RunnableSequence:1", "FakeChatModel:3"],
["RunnableSequence:1", "StrOutputParser:4"],
],
data: {
"main:0": {
inputs: { text: "Hello world" },
outputs: { outputs: "Hello world" },
tags: ["welcome"],
extra: { metadata: { hello: "world" } },
},
},
});
});
test("stream", async () => {
const { client, callSpy } = mockClient();
const main = traceable(
async function* main(input: { text: string }) {
for await (const token of await chain.stream(input, {
callbacks: await getLangchainCallbacks(),
})) {
yield token;
}
},
{ client, tracingEnabled: true }
);
const result = [];
for await (const token of main({ text: "Hello world" })) {
result.push(token);
}
expect(result).toEqual(["Hello world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"main:0",
"RunnableSequence:1",
"ChatPromptTemplate:2",
"FakeChatModel:3",
"StrOutputParser:4",
],
edges: [
["main:0", "RunnableSequence:1"],
["RunnableSequence:1", "ChatPromptTemplate:2"],
["RunnableSequence:1", "FakeChatModel:3"],
["RunnableSequence:1", "StrOutputParser:4"],
],
});
});
test("batch", async () => {
const { client, callSpy } = mockClient();
const main = traceable(
async (input: { texts: string[] }) => {
return chain.batch(
input.texts.map((text) => ({ text })),
{ callbacks: await getLangchainCallbacks() }
);
},
{ name: "main", client, tracingEnabled: true }
);
const result = await main({ texts: ["Hello world", "Who are you?"] });
await awaitAllCallbacks();
expect(result).toEqual(["Hello world", "Who are you?"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"main:0",
"RunnableSequence:1",
"RunnableSequence:2",
"ChatPromptTemplate:3",
"ChatPromptTemplate:4",
"FakeChatModel:5",
"FakeChatModel:6",
"StrOutputParser:7",
"StrOutputParser:8",
],
edges: [
["main:0", "RunnableSequence:1"],
["main:0", "RunnableSequence:2"],
["RunnableSequence:1", "ChatPromptTemplate:3"],
["RunnableSequence:2", "ChatPromptTemplate:4"],
["RunnableSequence:1", "FakeChatModel:5"],
["RunnableSequence:2", "FakeChatModel:6"],
["RunnableSequence:1", "StrOutputParser:7"],
["RunnableSequence:2", "StrOutputParser:8"],
],
});
});
});
describe("to traceable", () => {
test("invoke", async () => {
const { client, callSpy } = mockClient();
const llm = new FakeChatModel({});
const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([
["human", "{text}"],
]);
const parser = new StringOutputParser();
const addValueTraceable = traceable(
(msg: BaseMessage) =>
new HumanMessage({ content: msg.content + " world" }),
{ name: "add_negligible_value" }
);
const chain = prompt
.pipe(llm)
.pipe(RunnableTraceable.from(addValueTraceable))
.pipe(parser);
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore client might be of different type
const tracer = new LangChainTracer({ client });
const response = await chain.invoke(
{ text: "Hello" },
{ callbacks: [tracer] }
);
// callbacks are backgrounded by default
await awaitAllCallbacks();
expect(response).toEqual("Hello world");
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"RunnableSequence:0",
"ChatPromptTemplate:1",
"FakeChatModel:2",
"add_negligible_value:3",
"StrOutputParser:4",
],
edges: [
["RunnableSequence:0", "ChatPromptTemplate:1"],
["RunnableSequence:0", "FakeChatModel:2"],
["RunnableSequence:0", "add_negligible_value:3"],
["RunnableSequence:0", "StrOutputParser:4"],
],
});
});
test("array stream", async () => {
const { client, callSpy } = mockClient();
const source = RunnableTraceable.from(
traceable(function (input: { text: string }) {
return input.text.split(" ");
})
);
const tokens: unknown[] = [];
for await (const chunk of await source.stream(
{ text: "Hello world" },
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore client might be of different type
{ callbacks: [new LangChainTracer({ client })] }
)) {
tokens.push(chunk);
}
expect(tokens).toEqual([["Hello", "world"]]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["<lambda>:0"],
edges: [],
});
});
test("generator stream", async () => {
const { client, callSpy } = mockClient();
const source = RunnableTraceable.from(
traceable(function* (input: { text: string }) {
const chunks = input.text.split(" ");
for (const chunk of chunks) {
yield chunk;
}
})
);
const tokens: unknown[] = [];
for await (const chunk of await source.stream(
{ text: "Hello world" },
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore client might be of different type
{ callbacks: [new LangChainTracer({ client })] }
)) {
tokens.push(chunk);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["<lambda>:0"],
edges: [],
});
});
test("readable stream", async () => {
const { client, callSpy } = mockClient();
const source = RunnableTraceable.from(
traceable(async function (input: { text: string }) {
const readStream = new ReadableStream({
async pull(controller) {
for (const item of input.text.split(" ")) {
controller.enqueue(item);
}
controller.close();
},
});
return readStream;
})
);
const tokens: unknown[] = [];
for await (const chunk of await source.stream(
{ text: "Hello world" },
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore client might be of different type
{ callbacks: [new LangChainTracer({ client })] }
)) {
tokens.push(chunk);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["<lambda>:0"],
edges: [],
});
});
test("async generator stream", async () => {
const { client, callSpy } = mockClient();
const source = RunnableTraceable.from(
traceable(async function* (input: { text: string }) {
const chunks = input.text.split(" ");
for (const chunk of chunks) {
yield chunk;
}
})
);
const tokens: unknown[] = [];
for await (const chunk of await source.stream(
{ text: "Hello world" },
{
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore client might be of different type
callbacks: [new LangChainTracer({ client })],
}
)) {
tokens.push(chunk);
}
expect(tokens).toEqual(["Hello", "world"]);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: ["<lambda>:0"],
edges: [],
});
});
});
test("explicit nested", async () => {
const { client, callSpy } = mockClient();
const llm = new FakeChatModel({});
const prompt = ChatPromptTemplate.fromMessages<{ text: string }>([
["human", "{text}"],
]);
const parser = new StringOutputParser();
const chain = prompt.pipe(llm).pipe(parser).withConfig({ runName: "chain" });
const wrappedModel = new RunnableTraceable({
func: traceable(
async (value: { input: string }) => {
const callbacks = await getLangchainCallbacks();
return chain.invoke(
{ text: `Wrapped input: ${value.input}` },
{ callbacks }
);
},
{ name: "wrappedModel" }
),
});
const main = traceable(
async () => {
return {
response: [
await wrappedModel.invoke(
{ input: "Are you ready?" },
{ callbacks: await getLangchainCallbacks() }
),
await wrappedModel.invoke(
{ input: "I said, Are. You. Ready?" },
{ callbacks: await getLangchainCallbacks() }
),
],
};
},
{ name: "main", client, tracingEnabled: true }
);
const result = await main();
await awaitAllCallbacks();
expect(result).toEqual({
response: [
"Wrapped input: Are you ready?",
"Wrapped input: I said, Are. You. Ready?",
],
});
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"main:0",
"wrappedModel:1",
"chain:2",
"ChatPromptTemplate:3",
"FakeChatModel:4",
"StrOutputParser:5",
"wrappedModel:6",
"chain:7",
"ChatPromptTemplate:8",
"FakeChatModel:9",
"StrOutputParser:10",
],
edges: [
["main:0", "wrappedModel:1"],
["wrappedModel:1", "chain:2"],
["chain:2", "ChatPromptTemplate:3"],
["chain:2", "FakeChatModel:4"],
["chain:2", "StrOutputParser:5"],
["main:0", "wrappedModel:6"],
["wrappedModel:6", "chain:7"],
["chain:7", "ChatPromptTemplate:8"],
["chain:7", "FakeChatModel:9"],
["chain:7", "StrOutputParser:10"],
],
});
});
// skip until the @langchain/core 0.2.17 is out
describe.skip("automatic tracing", () => {
it("root langchain", async () => {
const { callSpy, langChainTracer } = mockClient();
const lc = RunnableLambda.from(async () => "Hello from LangChain");
const ls = traceable(() => "Hello from LangSmith", { name: "traceable" });
const childA = RunnableLambda.from(async () => {
const results: string[] = [];
results.push(await lc.invoke({}));
results.push(await ls());
return results.join("\n");
});
const childB = traceable(
async () => [await lc.invoke({}), await ls()].join("\n"),
{ name: "childB" }
);
const rootLC = RunnableLambda.from(async () => {
return [
await childA.invoke({}, { runName: "childA" }),
await childB(),
].join("\n");
});
expect(
await rootLC.invoke(
{},
{ callbacks: [langChainTracer], runName: "rootLC" }
)
).toEqual(
[
"Hello from LangChain",
"Hello from LangSmith",
"Hello from LangChain",
"Hello from LangSmith",
].join("\n")
);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"rootLC:0",
"childA:1",
"RunnableLambda:2",
"traceable:3",
"childB:4",
"RunnableLambda:5",
"traceable:6",
],
edges: [
["rootLC:0", "childA:1"],
["childA:1", "RunnableLambda:2"],
["childA:1", "traceable:3"],
["rootLC:0", "childB:4"],
["childB:4", "RunnableLambda:5"],
["childB:4", "traceable:6"],
],
});
});
it("root traceable", async () => {
const { client, callSpy } = mockClient();
const lc = RunnableLambda.from(async () => "Hello from LangChain");
const ls = traceable(() => "Hello from LangSmith", { name: "traceable" });
const childA = RunnableLambda.from(async () => {
const results: string[] = [];
results.push(await lc.invoke({}));
results.push(await ls());
return results.join("\n");
});
const childB = traceable(
async () => [await lc.invoke({}), await ls()].join("\n"),
{ name: "childB" }
);
const rootLS = traceable(
async () => {
return [
await childA.invoke({}, { runName: "childA" }),
await childB(),
].join("\n");
},
{ name: "rootLS", client, tracingEnabled: true }
);
expect(await rootLS()).toEqual(
[
"Hello from LangChain",
"Hello from LangSmith",
"Hello from LangChain",
"Hello from LangSmith",
].join("\n")
);
expect(getAssumedTreeFromCalls(callSpy.mock.calls)).toMatchObject({
nodes: [
"rootLS:0",
"childA:1",
"RunnableLambda:2",
"traceable:3",
"childB:4",
"RunnableLambda:5",
"traceable:6",
],
edges: [
["rootLS:0", "childA:1"],
["childA:1", "RunnableLambda:2"],
["childA:1", "traceable:3"],
["rootLS:0", "childB:4"],
["childB:4", "RunnableLambda:5"],
["childB:4", "traceable:6"],
],
});
});
});
|
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/test_data/langsmith_js_wrap_openai_stream_no usage.json | {
"post": [
{
"session_name": "default",
"id": "a9a7fdab-0e16-4153-898a-33a0c1d9f759",
"name": "ChatOpenAI",
"start_time": 1728803136672,
"run_type": "llm",
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"serialized": {},
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"stream": true
},
"child_runs": [],
"trace_id": "a9a7fdab-0e16-4153-898a-33a0c1d9f759",
"dotted_order": "20241013T070536672001Za9a7fdab-0e16-4153-898a-33a0c1d9f759",
"tags": []
}
],
"patch": [
{
"end_time": 1728803137165,
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"stream": true
},
"outputs": {
"id": "chatcmpl-AHmxU04aGhoVnvv6sBN4AVJ13C0HJ",
"object": "chat.completion.chunk",
"created": 1728803136,
"model": "gpt-4o-mini-2024-07-18",
"system_fingerprint": "fp_8552ec53e1",
"choices": [
{
"index": 0,
"finish_reason": {
"index": 0,
"delta": {},
"logprobs": null,
"finish_reason": "stop"
},
"message": {
"role": "assistant",
"content": "Howdy! How can I assist you today?"
}
}
]
},
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"dotted_order": "20241013T070536672001Za9a7fdab-0e16-4153-898a-33a0c1d9f759",
"trace_id": "a9a7fdab-0e16-4153-898a-33a0c1d9f759",
"tags": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/test_data/langsmith_js_wrap_openai_default.json | {
"post": [
{
"session_name": "default",
"id": "dc34609e-3eeb-459d-bc2a-6fedb01d2e6e",
"name": "ChatOpenAI",
"start_time": 1728803137170,
"run_type": "llm",
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"serialized": {},
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
]
},
"child_runs": [],
"trace_id": "dc34609e-3eeb-459d-bc2a-6fedb01d2e6e",
"dotted_order": "20241013T070537170001Zdc34609e-3eeb-459d-bc2a-6fedb01d2e6e",
"tags": []
}
],
"patch": [
{
"end_time": 1728803138285,
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
]
},
"outputs": {
"id": "chatcmpl-AHmxWgRAkoZJCaH30D7gz5t1OCc30",
"object": "chat.completion",
"created": 1728803138,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Howdy! How can I assist you today?",
"refusal": null
},
"logprobs": null,
"finish_reason": "stop"
}
],
"system_fingerprint": "fp_e2bde53e6e",
"usage_metadata": {
"input_tokens": 9,
"output_tokens": 9,
"total_tokens": 18,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
},
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"dotted_order": "20241013T070537170001Zdc34609e-3eeb-459d-bc2a-6fedb01d2e6e",
"trace_id": "dc34609e-3eeb-459d-bc2a-6fedb01d2e6e",
"tags": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/test_data/langsmith_js_wrap_openai_stream.json | {
"post": [
{
"session_name": "default",
"id": "33c08c32-435c-4788-b973-b5bee4a31f87",
"name": "ChatOpenAI",
"start_time": 1728803135809,
"run_type": "llm",
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"serialized": {},
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"stream": true,
"stream_options": {
"include_usage": true
}
},
"child_runs": [],
"trace_id": "33c08c32-435c-4788-b973-b5bee4a31f87",
"dotted_order": "20241013T070535809001Z33c08c32-435c-4788-b973-b5bee4a31f87",
"tags": []
}
],
"patch": [
{
"end_time": 1728803136663,
"inputs": {
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "howdy"
}
],
"stream": true,
"stream_options": {
"include_usage": true
}
},
"outputs": {
"id": "chatcmpl-AHmxUqVIsivSmVbmtSHGuij1ld5TY",
"object": "chat.completion.chunk",
"created": 1728803136,
"model": "gpt-4o-mini-2024-07-18",
"system_fingerprint": "fp_e2bde53e6e",
"choices": [
{
"index": 0,
"finish_reason": {
"index": 0,
"delta": {},
"logprobs": null,
"finish_reason": "stop"
},
"message": {
"role": "assistant",
"content": "Howdy! How can I assist you today?"
}
}
],
"usage_metadata": {
"input_tokens": 9,
"output_tokens": 9,
"total_tokens": 18,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 0
}
}
},
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "gpt-4o-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"dotted_order": "20241013T070535809001Z33c08c32-435c-4788-b973-b5bee4a31f87",
"trace_id": "33c08c32-435c-4788-b973-b5bee4a31f87",
"tags": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/test_data/langsmith_js_wrap_openai_reasoning.json | {
"post": [
{
"session_name": "default",
"id": "e954b8e3-c337-4a05-bf0a-ca2baac3ba48",
"name": "ChatOpenAI",
"start_time": 1728803138291,
"run_type": "llm",
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "o1-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"serialized": {},
"inputs": {
"model": "o1-mini",
"messages": [
{
"role": "user",
"content": "Write a bash script that takes a matrix represented as a string with format '[1,2],[3,4],[5,6]' and prints the transpose in the same format."
}
]
},
"child_runs": [],
"trace_id": "e954b8e3-c337-4a05-bf0a-ca2baac3ba48",
"dotted_order": "20241013T070538291001Ze954b8e3-c337-4a05-bf0a-ca2baac3ba48",
"tags": []
}
],
"patch": [
{
"end_time": 1728803148730,
"inputs": {
"model": "o1-mini",
"messages": [
{
"role": "user",
"content": "Write a bash script that takes a matrix represented as a string with format '[1,2],[3,4],[5,6]' and prints the transpose in the same format."
}
]
},
"outputs": {
"id": "chatcmpl-AHmxWXSp7oUeT2kkt7kuhLOmfLKDJ",
"object": "chat.completion",
"created": 1728803138,
"model": "o1-mini-2024-09-12",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Certainly! Below is a Bash script that takes a matrix represented as a string in the format `\"[1,2],[3,4],[5,6]\"` and outputs its transpose in the same format.\n\n### Script: `transpose_matrix.sh`\n\n```bash\n#!/bin/bash\n\n# Check if an argument is provided\nif [ $# -ne 1 ]; then\n echo \"Usage: $0 \\\"[1,2],[3,4],[5,6]\\\"\"\n exit 1\nfi\n\ninput=\"$1\"\n\n# Function to trim leading and trailing brackets\ntrim_brackets() {\n local str=\"$1\"\n str=\"${str#[}\"\n str=\"${str%]}\"\n echo \"$str\"\n}\n\n# Remove outer brackets if present and split the matrix into rows\ntrimmed_input=$(trim_brackets \"$input\")\nIFS=\"],[\" read -r -a rows <<< \"$trimmed_input\"\n\n# Initialize an array of arrays to hold the matrix\ndeclare -a matrix\nnum_cols=0\n\n# Parse each row into the matrix array\nfor row in \"${rows[@]}\"; do\n IFS=',' read -r -a cols <<< \"$row\"\n matrix+=(\"${cols[@]}\")\n if [ \"${#cols[@]}\" -gt \"$num_cols\" ]; then\n num_cols=\"${#cols[@]}\"\n fi\ndone\n\n# Determine the number of rows\nnum_rows=\"${#rows[@]}\"\n\n# Initialize an array to hold the transposed matrix\ndeclare -a transpose\n\n# Build the transpose by iterating over columns and rows\nfor ((c=0; c<num_cols; c++)); do\n temp_row=\"\"\n for ((r=0; r<num_rows; r++)); do\n # Calculate the index in the flattened matrix\n index=$((r * num_cols + c))\n # Append the element if it exists, else append nothing\n if [ $index -lt \"${#matrix[@]}\" ]; then\n temp_row+=\"${matrix[index]},\"\n fi\n done\n # Remove the trailing comma and add brackets\n temp_row=\"[${temp_row%,}]\"\n transpose+=(\"$temp_row\")\ndone\n\n# Join the transposed rows with commas\noutput=$(IFS=, ; echo \"${transpose[*]}\")\n\n# Print the transposed matrix\necho \"$output\"\n```\n\n### How It Works\n\n1. **Input Handling**:\n - The script expects a single argument representing the matrix string.\n - It trims the outer brackets and splits the string into individual rows based on `\"],[\"`.\n\n2. **Parsing the Matrix**:\n - Each row is further split by commas to extract individual elements.\n - All elements are stored in a flattened array `matrix`, and the number of columns is determined.\n\n3. **Transposing the Matrix**:\n - The script iterates over each column and then each row to build the transposed matrix.\n - It ensures that even if some rows have fewer elements, it handles them gracefully.\n\n4. **Formatting the Output**:\n - Each transposed row is wrapped in brackets and joined together with commas to match the desired output format.\n\n### Usage\n\n1. **Make the Script Executable**:\n ```bash\n chmod +x transpose_matrix.sh\n ```\n\n2. **Run the Script with a Matrix String**:\n ```bash\n ./transpose_matrix.sh \"[1,2],[3,4],[5,6]\"\n ```\n **Output**:\n ```\n [1,3,5],[2,4,6]\n ```\n\n### Examples\n\n- **Example 1**:\n ```bash\n ./transpose_matrix.sh \"[1,2,3],[4,5,6]\"\n ```\n **Output**:\n ```\n [1,4],[2,5],[3,6]\n ```\n\n- **Example 2**:\n ```bash\n ./transpose_matrix.sh \"[7,8],[9,10],[11,12],[13,14]\"\n ```\n **Output**:\n ```\n [7,9,11,13],[8,10,12,14]\n ```\n\n### Notes\n\n- The script assumes that the input string is well-formed, with each row enclosed in brackets and elements separated by commas.\n- It handles matrices where different rows might have varying numbers of columns by only transposing up to the minimum number of columns present.\n\nFeel free to customize and enhance the script as needed for more complex scenarios or additional validations!",
"refusal": null
},
"finish_reason": "stop"
}
],
"system_fingerprint": "fp_692002f015",
"usage_metadata": {
"input_tokens": 43,
"output_tokens": 1901,
"total_tokens": 1944,
"input_token_details": {
"cache_read": 0
},
"output_token_details": {
"reasoning": 960
}
}
},
"extra": {
"metadata": {
"ls_provider": "openai",
"ls_model_type": "chat",
"ls_model_name": "o1-mini"
},
"runtime": {
"library": "langsmith",
"runtime": "node",
"sdk": "langsmith-js",
"sdk_version": "0.1.65"
}
},
"dotted_order": "20241013T070538291001Ze954b8e3-c337-4a05-bf0a-ca2baac3ba48",
"trace_id": "e954b8e3-c337-4a05-bf0a-ca2baac3ba48",
"tags": []
}
]
} |
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/utils/tree.ts | import { Run } from "../../schemas.js";
export function getAssumedTreeFromCalls(calls: unknown[][]) {
const edges: Array<[string, string]> = [];
const nodeMap: Record<string, Run> = {};
const idMap: string[] = [];
function upsertId(id: string) {
const idx = idMap.indexOf(id);
if (idx < 0) {
idMap.push(id);
return idMap.length - 1;
}
return idx;
}
function getId(id: string) {
const stableId = upsertId(id);
const name = nodeMap[id].name;
return [name, stableId].join(":");
}
for (let i = 0; i < calls.length; ++i) {
const call = calls[i];
const [url, fetchArgs] = call.slice(-2) as [
string,
{ method: string; body: string }
];
const req = `${fetchArgs.method} ${new URL(url as string).pathname}`;
const body: Run = JSON.parse(fetchArgs.body);
if (req === "POST /runs") {
const id = body.id;
upsertId(id);
nodeMap[id] = { ...nodeMap[id], ...body };
if (nodeMap[id].parent_run_id) {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
edges.push([nodeMap[id].parent_run_id!, nodeMap[id].id]);
}
} else if (req.startsWith("PATCH /runs/")) {
const id = req.substring("PATCH /runs/".length);
upsertId(id);
nodeMap[id] = { ...nodeMap[id], ...body };
}
}
return {
nodes: idMap.map(getId),
edges: edges.map(([source, target]) => [getId(source), getId(target)]),
data: Object.fromEntries(
Object.entries(nodeMap).map(([id, value]) => [getId(id), value] as const)
),
};
}
|
0 | lc_public_repos/langsmith-sdk/js/src/tests | lc_public_repos/langsmith-sdk/js/src/tests/utils/mock_client.ts | // eslint-disable-next-line import/no-extraneous-dependencies
import { jest } from "@jest/globals";
// eslint-disable-next-line import/no-extraneous-dependencies
import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain";
import { Client } from "../../index.js";
type ClientParams = Exclude<ConstructorParameters<typeof Client>[0], undefined>;
export const mockClient = (config?: Omit<ClientParams, "autoBatchTracing">) => {
const client = new Client({
...config,
apiKey: "MOCK",
autoBatchTracing: false,
});
const callSpy = jest
.spyOn((client as any).caller, "call")
.mockResolvedValue({ ok: true, text: () => "" });
const langChainTracer = new LangChainTracer({
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore Overriden client
client,
});
return { client, callSpy, langChainTracer };
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/wrappers/generic.ts | import type { RunTreeConfig } from "../index.js";
import { traceable } from "../traceable.js";
export const _wrapClient = <T extends object>(
sdk: T,
runName: string,
options?: Omit<RunTreeConfig, "name">
): T => {
return new Proxy(sdk, {
get(target, propKey, receiver) {
const originalValue = target[propKey as keyof T];
if (typeof originalValue === "function") {
return traceable(originalValue.bind(target), {
run_type: "llm",
...options,
name: [runName, propKey.toString()].join("."),
});
} else if (
originalValue != null &&
!Array.isArray(originalValue) &&
// eslint-disable-next-line no-instanceof/no-instanceof
!(originalValue instanceof Date) &&
typeof originalValue === "object"
) {
return _wrapClient(
originalValue,
[runName, propKey.toString()].join("."),
options
);
} else {
return Reflect.get(target, propKey, receiver);
}
},
});
};
type WrapSDKOptions = Partial<
RunTreeConfig & {
/**
* @deprecated Use `name` instead.
*/
runName: string;
}
>;
/**
* Wrap an arbitrary SDK, enabling automatic LangSmith tracing.
* Method signatures are unchanged.
*
* Note that this will wrap and trace ALL SDK methods, not just
* LLM completion methods. If the passed SDK contains other methods,
* we recommend using the wrapped instance for LLM calls only.
* @param sdk An arbitrary SDK instance.
* @param options LangSmith options.
* @returns
*/
export const wrapSDK = <T extends object>(
sdk: T,
options?: WrapSDKOptions
): T => {
const traceableOptions = options ? { ...options } : undefined;
if (traceableOptions != null) {
delete traceableOptions.runName;
delete traceableOptions.name;
}
return _wrapClient(
sdk,
options?.name ?? options?.runName ?? sdk.constructor?.name,
traceableOptions
);
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/wrappers/vercel.ts | import type { RunTreeConfig } from "../index.js";
import { traceable } from "../traceable.js";
import { _wrapClient } from "./generic.js";
/**
* Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing.
* After wrapping a model, you can use it with the Vercel AI SDK Core
* methods as normal.
*
* @example
* ```ts
* import { anthropic } from "@ai-sdk/anthropic";
* import { streamText } from "ai";
* import { wrapAISDKModel } from "langsmith/wrappers/vercel";
*
* const anthropicModel = anthropic("claude-3-haiku-20240307");
*
* const modelWithTracing = wrapAISDKModel(anthropicModel);
*
* const { textStream } = await streamText({
* model: modelWithTracing,
* prompt: "Write a vegetarian lasagna recipe for 4 people.",
* });
*
* for await (const chunk of textStream) {
* console.log(chunk);
* }
* ```
* @param model An AI SDK model instance.
* @param options LangSmith options.
* @returns
*/
export const wrapAISDKModel = <T extends object>(
model: T,
options?: Partial<RunTreeConfig>
): T => {
if (
!("doStream" in model) ||
typeof model.doStream !== "function" ||
!("doGenerate" in model) ||
typeof model.doGenerate !== "function"
) {
throw new Error(
`Received invalid input. This version of wrapAISDKModel only supports Vercel LanguageModelV1 instances.`
);
}
const runName = options?.name ?? model.constructor?.name;
return new Proxy(model, {
get(target, propKey, receiver) {
const originalValue = target[propKey as keyof T];
if (typeof originalValue === "function") {
let __finalTracedIteratorKey;
let aggregator;
if (propKey === "doStream") {
__finalTracedIteratorKey = "stream";
aggregator = (chunks: any[]) => {
return chunks.reduce(
(aggregated, chunk) => {
if (chunk.type === "text-delta") {
return {
...aggregated,
text: aggregated.text + chunk.textDelta,
};
} else if (chunk.type === "tool-call") {
return {
...aggregated,
...chunk,
};
} else if (chunk.type === "finish") {
return {
...aggregated,
usage: chunk.usage,
finishReason: chunk.finishReason,
};
} else {
return aggregated;
}
},
{
text: "",
}
);
};
}
return traceable(originalValue.bind(target), {
run_type: "llm",
name: runName,
...options,
__finalTracedIteratorKey,
aggregator,
});
} else if (
originalValue != null &&
!Array.isArray(originalValue) &&
// eslint-disable-next-line no-instanceof/no-instanceof
!(originalValue instanceof Date) &&
typeof originalValue === "object"
) {
return _wrapClient(
originalValue,
[runName, propKey.toString()].join("."),
options
);
} else {
return Reflect.get(target, propKey, receiver);
}
},
});
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/wrappers/index.ts | export * from "./openai.js";
export { wrapSDK } from "./generic.js";
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/wrappers/openai.ts | import { OpenAI } from "openai";
import type { APIPromise } from "openai/core";
import type { RunTreeConfig } from "../index.js";
import { isTraceableFunction, traceable } from "../traceable.js";
import { KVMap } from "../schemas.js";
// Extra leniency around types in case multiple OpenAI SDK versions get installed
type OpenAIType = {
beta?: {
chat?: {
completions?: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parse?: (...args: any[]) => any;
};
};
};
chat: {
completions: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
create: (...args: any[]) => any;
};
};
completions: {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
create: (...args: any[]) => any;
};
};
type ExtraRunTreeConfig = Pick<
Partial<RunTreeConfig>,
"name" | "metadata" | "tags"
>;
type PatchedOpenAIClient<T extends OpenAIType> = T & {
chat: T["chat"] & {
completions: T["chat"]["completions"] & {
create: {
(
arg: OpenAI.ChatCompletionCreateParamsStreaming,
arg2?: OpenAI.RequestOptions & { langsmithExtra?: ExtraRunTreeConfig }
): APIPromise<AsyncGenerator<OpenAI.ChatCompletionChunk>>;
} & {
(
arg: OpenAI.ChatCompletionCreateParamsNonStreaming,
arg2?: OpenAI.RequestOptions & { langsmithExtra?: ExtraRunTreeConfig }
): APIPromise<OpenAI.ChatCompletionChunk>;
};
};
};
completions: T["completions"] & {
create: {
(
arg: OpenAI.CompletionCreateParamsStreaming,
arg2?: OpenAI.RequestOptions & { langsmithExtra?: ExtraRunTreeConfig }
): APIPromise<AsyncGenerator<OpenAI.Completion>>;
} & {
(
arg: OpenAI.CompletionCreateParamsNonStreaming,
arg2?: OpenAI.RequestOptions & { langsmithExtra?: ExtraRunTreeConfig }
): APIPromise<OpenAI.Completion>;
};
};
};
function _combineChatCompletionChoices(
choices: OpenAI.ChatCompletionChunk.Choice[]
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): any {
const reversedChoices = choices.slice().reverse();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const message: { [key: string]: any } = {
role: "assistant",
content: "",
};
for (const c of reversedChoices) {
if (c.delta.role) {
message["role"] = c.delta.role;
break;
}
}
const toolCalls: {
[
key: number
]: Partial<OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall>[];
} = {};
for (const c of choices) {
if (c.delta.content) {
message.content = message.content.concat(c.delta.content);
}
if (c.delta.function_call) {
if (!message.function_call) {
message.function_call = { name: "", arguments: "" };
}
if (c.delta.function_call.name) {
message.function_call.name += c.delta.function_call.name;
}
if (c.delta.function_call.arguments) {
message.function_call.arguments += c.delta.function_call.arguments;
}
}
if (c.delta.tool_calls) {
for (const tool_call of c.delta.tool_calls) {
if (!toolCalls[c.index]) {
toolCalls[c.index] = [];
}
toolCalls[c.index].push(tool_call);
}
}
}
if (Object.keys(toolCalls).length > 0) {
message.tool_calls = [...Array(Object.keys(toolCalls).length)];
for (const [index, toolCallChunks] of Object.entries(toolCalls)) {
const idx = parseInt(index);
message.tool_calls[idx] = {
index: idx,
id: toolCallChunks.find((c) => c.id)?.id || null,
type: toolCallChunks.find((c) => c.type)?.type || null,
};
for (const chunk of toolCallChunks) {
if (chunk.function) {
if (!message.tool_calls[idx].function) {
message.tool_calls[idx].function = {
name: "",
arguments: "",
};
}
if (chunk.function.name) {
message.tool_calls[idx].function.name += chunk.function.name;
}
if (chunk.function.arguments) {
message.tool_calls[idx].function.arguments +=
chunk.function.arguments;
}
}
}
}
}
return {
index: choices[0].index,
finish_reason: reversedChoices.find((c) => c.finish_reason) || null,
message: message,
};
}
const chatAggregator = (chunks: OpenAI.ChatCompletionChunk[]) => {
if (!chunks || chunks.length === 0) {
return { choices: [{ message: { role: "assistant", content: "" } }] };
}
const choicesByIndex: {
[index: number]: OpenAI.ChatCompletionChunk.Choice[];
} = {};
for (const chunk of chunks) {
for (const choice of chunk.choices) {
if (choicesByIndex[choice.index] === undefined) {
choicesByIndex[choice.index] = [];
}
choicesByIndex[choice.index].push(choice);
}
}
const aggregatedOutput = chunks[chunks.length - 1];
aggregatedOutput.choices = Object.values(choicesByIndex).map((choices) =>
_combineChatCompletionChoices(choices)
);
return aggregatedOutput;
};
const textAggregator = (
allChunks: OpenAI.Completions.Completion[]
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): Record<string, any> => {
if (allChunks.length === 0) {
return { choices: [{ text: "" }] };
}
const allContent: string[] = [];
for (const chunk of allChunks) {
const content = chunk.choices[0].text;
if (content != null) {
allContent.push(content);
}
}
const content = allContent.join("");
const aggregatedOutput = allChunks[allChunks.length - 1];
aggregatedOutput.choices = [
{ ...aggregatedOutput.choices[0], text: content },
];
return aggregatedOutput;
};
function processChatCompletion(outputs: Readonly<KVMap>): KVMap {
const chatCompletion = outputs as OpenAI.ChatCompletion;
// copy the original object, minus usage
const result = { ...chatCompletion } as KVMap;
const usage = chatCompletion.usage;
if (usage) {
const inputTokenDetails = {
...(usage.prompt_tokens_details?.audio_tokens !== null && {
audio: usage.prompt_tokens_details?.audio_tokens,
}),
...(usage.prompt_tokens_details?.cached_tokens !== null && {
cache_read: usage.prompt_tokens_details?.cached_tokens,
}),
};
const outputTokenDetails = {
...(usage.completion_tokens_details?.audio_tokens !== null && {
audio: usage.completion_tokens_details?.audio_tokens,
}),
...(usage.completion_tokens_details?.reasoning_tokens !== null && {
reasoning: usage.completion_tokens_details?.reasoning_tokens,
}),
};
result.usage_metadata = {
input_tokens: usage.prompt_tokens ?? 0,
output_tokens: usage.completion_tokens ?? 0,
total_tokens: usage.total_tokens ?? 0,
...(Object.keys(inputTokenDetails).length > 0 && {
input_token_details: inputTokenDetails,
}),
...(Object.keys(outputTokenDetails).length > 0 && {
output_token_details: outputTokenDetails,
}),
};
}
delete result.usage;
return result;
}
/**
* Wraps an OpenAI client's completion methods, enabling automatic LangSmith
* tracing. Method signatures are unchanged, with the exception that you can pass
* an additional and optional "langsmithExtra" field within the second parameter.
* @param openai An OpenAI client instance.
* @param options LangSmith options.
* @example
* ```ts
* const patchedStream = await patchedClient.chat.completions.create(
* {
* messages: [{ role: "user", content: `Say 'foo'` }],
* model: "gpt-3.5-turbo",
* stream: true,
* },
* {
* langsmithExtra: {
* metadata: {
* additional_data: "bar",
* },
* },
* },
* );
* ```
*/
export const wrapOpenAI = <T extends OpenAIType>(
openai: T,
options?: Partial<RunTreeConfig>
): PatchedOpenAIClient<T> => {
if (
isTraceableFunction(openai.chat.completions.create) ||
isTraceableFunction(openai.completions.create)
) {
throw new Error(
"This instance of OpenAI client has been already wrapped once."
);
}
// Some internal OpenAI methods call each other, so we need to preserve original
// OpenAI methods.
const tracedOpenAIClient = { ...openai };
if (
openai.beta &&
openai.beta.chat &&
openai.beta.chat.completions &&
typeof openai.beta.chat.completions.parse === "function"
) {
tracedOpenAIClient.beta = {
...openai.beta,
chat: {
...openai.beta.chat,
completions: {
...openai.beta.chat.completions,
parse: traceable(
openai.beta.chat.completions.parse.bind(
openai.beta.chat.completions
),
{
name: "ChatOpenAI",
run_type: "llm",
aggregator: chatAggregator,
argsConfigPath: [1, "langsmithExtra"],
getInvocationParams: (payload: unknown) => {
if (typeof payload !== "object" || payload == null)
return undefined;
// we can safely do so, as the types are not exported in TSC
const params = payload as OpenAI.ChatCompletionCreateParams;
const ls_stop =
(typeof params.stop === "string"
? [params.stop]
: params.stop) ?? undefined;
return {
ls_provider: "openai",
ls_model_type: "chat",
ls_model_name: params.model,
ls_max_tokens: params.max_tokens ?? undefined,
ls_temperature: params.temperature ?? undefined,
ls_stop,
};
},
...options,
}
),
},
},
};
}
tracedOpenAIClient.chat = {
...openai.chat,
completions: {
...openai.chat.completions,
create: traceable(
openai.chat.completions.create.bind(openai.chat.completions),
{
name: "ChatOpenAI",
run_type: "llm",
aggregator: chatAggregator,
argsConfigPath: [1, "langsmithExtra"],
getInvocationParams: (payload: unknown) => {
if (typeof payload !== "object" || payload == null)
return undefined;
// we can safely do so, as the types are not exported in TSC
const params = payload as OpenAI.ChatCompletionCreateParams;
const ls_stop =
(typeof params.stop === "string" ? [params.stop] : params.stop) ??
undefined;
return {
ls_provider: "openai",
ls_model_type: "chat",
ls_model_name: params.model,
ls_max_tokens: params.max_tokens ?? undefined,
ls_temperature: params.temperature ?? undefined,
ls_stop,
};
},
processOutputs: processChatCompletion,
...options,
}
),
},
};
tracedOpenAIClient.completions = {
...openai.completions,
create: traceable(openai.completions.create.bind(openai.completions), {
name: "OpenAI",
run_type: "llm",
aggregator: textAggregator,
argsConfigPath: [1, "langsmithExtra"],
getInvocationParams: (payload: unknown) => {
if (typeof payload !== "object" || payload == null) return undefined;
// we can safely do so, as the types are not exported in TSC
const params = payload as OpenAI.CompletionCreateParams;
const ls_stop =
(typeof params.stop === "string" ? [params.stop] : params.stop) ??
undefined;
return {
ls_provider: "openai",
ls_model_type: "llm",
ls_model_name: params.model,
ls_max_tokens: params.max_tokens ?? undefined,
ls_temperature: params.temperature ?? undefined,
ls_stop,
};
},
...options,
}),
};
return tracedOpenAIClient as PatchedOpenAIClient<T>;
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/async_caller.ts | import pRetry from "p-retry";
import PQueueMod from "p-queue";
import { _getFetchImplementation } from "../singletons/fetch.js";
const STATUS_NO_RETRY = [
400, // Bad Request
401, // Unauthorized
403, // Forbidden
404, // Not Found
405, // Method Not Allowed
406, // Not Acceptable
407, // Proxy Authentication Required
408, // Request Timeout
];
const STATUS_IGNORE = [
409, // Conflict
];
type ResponseCallback = (response?: Response) => Promise<boolean>;
export interface AsyncCallerParams {
/**
* The maximum number of concurrent calls that can be made.
* Defaults to `Infinity`, which means no limit.
*/
maxConcurrency?: number;
/**
* The maximum number of retries that can be made for a single call,
* with an exponential backoff between each attempt. Defaults to 6.
*/
maxRetries?: number;
onFailedResponseHook?: ResponseCallback;
}
export interface AsyncCallerCallOptions {
signal?: AbortSignal;
}
/**
* A class that can be used to make async calls with concurrency and retry logic.
*
* This is useful for making calls to any kind of "expensive" external resource,
* be it because it's rate-limited, subject to network issues, etc.
*
* Concurrent calls are limited by the `maxConcurrency` parameter, which defaults
* to `Infinity`. This means that by default, all calls will be made in parallel.
*
* Retries are limited by the `maxRetries` parameter, which defaults to 6. This
* means that by default, each call will be retried up to 6 times, with an
* exponential backoff between each attempt.
*/
export class AsyncCaller {
protected maxConcurrency: AsyncCallerParams["maxConcurrency"];
protected maxRetries: AsyncCallerParams["maxRetries"];
queue: typeof import("p-queue")["default"]["prototype"];
private onFailedResponseHook?: ResponseCallback;
constructor(params: AsyncCallerParams) {
this.maxConcurrency = params.maxConcurrency ?? Infinity;
this.maxRetries = params.maxRetries ?? 6;
if ("default" in PQueueMod) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.queue = new (PQueueMod.default as any)({
concurrency: this.maxConcurrency,
});
} else {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.queue = new (PQueueMod as any)({ concurrency: this.maxConcurrency });
}
this.onFailedResponseHook = params?.onFailedResponseHook;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
call<A extends any[], T extends (...args: A) => Promise<any>>(
callable: T,
...args: Parameters<T>
): Promise<Awaited<ReturnType<T>>> {
const onFailedResponseHook = this.onFailedResponseHook;
return this.queue.add(
() =>
pRetry(
() =>
callable(...(args as Parameters<T>)).catch((error) => {
// eslint-disable-next-line no-instanceof/no-instanceof
if (error instanceof Error) {
throw error;
} else {
throw new Error(error);
}
}),
{
async onFailedAttempt(error) {
if (
error.message.startsWith("Cancel") ||
error.message.startsWith("TimeoutError") ||
error.message.startsWith("AbortError")
) {
throw error;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
if ((error as any)?.code === "ECONNABORTED") {
throw error;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const response: Response | undefined = (error as any)?.response;
const status = response?.status;
if (status) {
if (STATUS_NO_RETRY.includes(+status)) {
throw error;
} else if (STATUS_IGNORE.includes(+status)) {
return;
}
if (onFailedResponseHook) {
await onFailedResponseHook(response);
}
}
},
// If needed we can change some of the defaults here,
// but they're quite sensible.
retries: this.maxRetries,
randomize: true,
}
),
{ throwOnTimeout: true }
);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
callWithOptions<A extends any[], T extends (...args: A) => Promise<any>>(
options: AsyncCallerCallOptions,
callable: T,
...args: Parameters<T>
): Promise<Awaited<ReturnType<T>>> {
// Note this doesn't cancel the underlying request,
// when available prefer to use the signal option of the underlying call
if (options.signal) {
return Promise.race([
this.call<A, T>(callable, ...args),
new Promise<never>((_, reject) => {
options.signal?.addEventListener("abort", () => {
reject(new Error("AbortError"));
});
}),
]);
}
return this.call<A, T>(callable, ...args);
}
fetch(...args: Parameters<typeof fetch>): ReturnType<typeof fetch> {
return this.call(() =>
_getFetchImplementation()(...args).then(
(res: Awaited<ReturnType<typeof fetch>>) =>
res.ok ? res : Promise.reject(res)
)
);
}
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/atee.ts | export function atee<T>(
iter: AsyncGenerator<T>,
length = 2
): AsyncGenerator<T>[] {
const buffers = Array.from(
{ length },
() => [] as Array<IteratorResult<T> | IteratorReturnResult<T>>
);
return buffers.map(async function* makeIter(buffer) {
while (true) {
if (buffer.length === 0) {
const result = await iter.next();
for (const buffer of buffers) {
buffer.push(result);
}
} else if (buffer[0].done) {
return;
} else {
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
yield buffer.shift()!.value;
}
}
});
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/messages.ts | import { LangChainBaseMessage } from "../schemas.js";
export function isLangChainMessage(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
message?: any
): message is LangChainBaseMessage {
return typeof message?._getType === "function";
}
// Add index signature to data object
interface ConvertedData {
content: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[key: string]: any;
}
export function convertLangChainMessageToExample(
message: LangChainBaseMessage
) {
const converted: { type: string; data: ConvertedData } = {
type: message._getType(),
data: { content: message.content },
};
// Check for presence of keys in additional_kwargs
if (
message?.additional_kwargs &&
Object.keys(message.additional_kwargs).length > 0
) {
converted.data.additional_kwargs = { ...message.additional_kwargs };
}
return converted;
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/warn.ts | const warnedMessages: Record<string, boolean> = {};
export function warnOnce(message: string): void {
if (!warnedMessages[message]) {
console.warn(message);
warnedMessages[message] = true;
}
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/shuffle.ts | export function shuffle<T extends unknown[]>(array: T): T {
let currentIndex = array.length;
while (currentIndex !== 0) {
const randomIndex = Math.floor(Math.random() * currentIndex);
currentIndex -= 1;
const tmp = array[currentIndex];
array[currentIndex] = array[randomIndex];
array[randomIndex] = tmp;
}
return array;
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/asserts.ts | export function isPromiseMethod(
x: string | symbol
): x is "then" | "catch" | "finally" {
if (x === "then" || x === "catch" || x === "finally") {
return true;
}
return false;
}
export function isKVMap(x: unknown): x is Record<string, unknown> {
if (typeof x !== "object" || x == null) {
return false;
}
const prototype = Object.getPrototypeOf(x);
return (
(prototype === null ||
prototype === Object.prototype ||
Object.getPrototypeOf(prototype) === null) &&
!(Symbol.toStringTag in x) &&
!(Symbol.iterator in x)
);
}
export const isAsyncIterable = (x: unknown): x is AsyncIterable<unknown> =>
x != null &&
typeof x === "object" &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
typeof (x as any)[Symbol.asyncIterator] === "function";
export const isIteratorLike = (x: unknown): x is Iterator<unknown> =>
x != null &&
typeof x === "object" &&
"next" in x &&
typeof x.next === "function";
const GeneratorFunction = function* () {}.constructor;
export const isGenerator = (x: unknown): x is Generator =>
// eslint-disable-next-line no-instanceof/no-instanceof
x != null && typeof x === "function" && x instanceof GeneratorFunction;
export const isThenable = (x: unknown): x is Promise<unknown> =>
x != null &&
typeof x === "object" &&
"then" in x &&
typeof x.then === "function";
export const isReadableStream = (x: unknown): x is ReadableStream =>
x != null &&
typeof x === "object" &&
"getReader" in x &&
typeof x.getReader === "function";
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/env.ts | // Inlined from https://github.com/flexdinesh/browser-or-node
import { __version__ } from "../index.js";
declare global {
const Deno:
| {
version: {
deno: string;
};
}
| undefined;
}
let globalEnv: string;
export const isBrowser = () =>
typeof window !== "undefined" && typeof window.document !== "undefined";
export const isWebWorker = () =>
typeof globalThis === "object" &&
globalThis.constructor &&
globalThis.constructor.name === "DedicatedWorkerGlobalScope";
export const isJsDom = () =>
(typeof window !== "undefined" && window.name === "nodejs") ||
(typeof navigator !== "undefined" &&
(navigator.userAgent.includes("Node.js") ||
navigator.userAgent.includes("jsdom")));
// Supabase Edge Function provides a `Deno` global object
// without `version` property
export const isDeno = () => typeof Deno !== "undefined";
// Mark not-as-node if in Supabase Edge Function
export const isNode = () =>
typeof process !== "undefined" &&
typeof process.versions !== "undefined" &&
typeof process.versions.node !== "undefined" &&
!isDeno();
export const getEnv = () => {
if (globalEnv) {
return globalEnv;
}
if (isBrowser()) {
globalEnv = "browser";
} else if (isNode()) {
globalEnv = "node";
} else if (isWebWorker()) {
globalEnv = "webworker";
} else if (isJsDom()) {
globalEnv = "jsdom";
} else if (isDeno()) {
globalEnv = "deno";
} else {
globalEnv = "other";
}
return globalEnv;
};
export type RuntimeEnvironment = {
library: string;
libraryVersion?: string;
sdk: string;
sdk_version: string;
runtime: string;
runtimeVersion?: string;
};
let runtimeEnvironment: RuntimeEnvironment | undefined;
export function getRuntimeEnvironment(): RuntimeEnvironment {
if (runtimeEnvironment === undefined) {
const env = getEnv();
const releaseEnv = getShas();
runtimeEnvironment = {
library: "langsmith",
runtime: env,
sdk: "langsmith-js",
sdk_version: __version__,
...releaseEnv,
};
}
return runtimeEnvironment;
}
/**
* Retrieves the LangChain-specific environment variables from the current runtime environment.
* Sensitive keys (containing the word "key", "token", or "secret") have their values redacted for security.
*
* @returns {Record<string, string>}
* - A record of LangChain-specific environment variables.
*/
export function getLangChainEnvVars(): Record<string, string> {
const allEnvVars = getEnvironmentVariables() || {};
const envVars: Record<string, string> = {};
for (const [key, value] of Object.entries(allEnvVars)) {
if (key.startsWith("LANGCHAIN_") && typeof value === "string") {
envVars[key] = value;
}
}
for (const key in envVars) {
if (
(key.toLowerCase().includes("key") ||
key.toLowerCase().includes("secret") ||
key.toLowerCase().includes("token")) &&
typeof envVars[key] === "string"
) {
const value = envVars[key];
envVars[key] =
value.slice(0, 2) + "*".repeat(value.length - 4) + value.slice(-2);
}
}
return envVars;
}
/**
* Retrieves the LangChain-specific metadata from the current runtime environment.
*
* @returns {Record<string, string>}
* - A record of LangChain-specific metadata environment variables.
*/
export function getLangChainEnvVarsMetadata(): Record<string, string> {
const allEnvVars = getEnvironmentVariables() || {};
const envVars: Record<string, string> = {};
const excluded = [
"LANGCHAIN_API_KEY",
"LANGCHAIN_ENDPOINT",
"LANGCHAIN_TRACING_V2",
"LANGCHAIN_PROJECT",
"LANGCHAIN_SESSION",
"LANGSMITH_API_KEY",
"LANGSMITH_ENDPOINT",
"LANGSMITH_TRACING_V2",
"LANGSMITH_PROJECT",
"LANGSMITH_SESSION",
];
for (const [key, value] of Object.entries(allEnvVars)) {
if (
(key.startsWith("LANGCHAIN_") || key.startsWith("LANGSMITH_")) &&
typeof value === "string" &&
!excluded.includes(key) &&
!key.toLowerCase().includes("key") &&
!key.toLowerCase().includes("secret") &&
!key.toLowerCase().includes("token")
) {
if (key === "LANGCHAIN_REVISION_ID") {
envVars["revision_id"] = value;
} else {
envVars[key] = value;
}
}
}
return envVars;
}
/**
* Retrieves the environment variables from the current runtime environment.
*
* This function is designed to operate in a variety of JS environments,
* including Node.js, Deno, browsers, etc.
*
* @returns {Record<string, string> | undefined}
* - A record of environment variables if available.
* - `undefined` if the environment does not support or allows access to environment variables.
*/
export function getEnvironmentVariables(): Record<string, string> | undefined {
try {
// Check for Node.js environment
// eslint-disable-next-line no-process-env
if (typeof process !== "undefined" && process.env) {
// eslint-disable-next-line no-process-env
return Object.entries(process.env).reduce(
(acc: { [key: string]: string }, [key, value]) => {
acc[key] = String(value);
return acc;
},
{}
);
}
// For browsers and other environments, we may not have direct access to env variables
// Return undefined or any other fallback as required.
return undefined;
} catch (e) {
// Catch any errors that might occur while trying to access environment variables
return undefined;
}
}
export function getEnvironmentVariable(name: string): string | undefined {
// Certain Deno setups will throw an error if you try to access environment variables
// https://github.com/hwchase17/langchainjs/issues/1412
try {
return typeof process !== "undefined"
? // eslint-disable-next-line no-process-env
process.env?.[name]
: undefined;
} catch (e) {
return undefined;
}
}
export function getLangSmithEnvironmentVariable(
name: string
): string | undefined {
return (
getEnvironmentVariable(`LANGSMITH_${name}`) ||
getEnvironmentVariable(`LANGCHAIN_${name}`)
);
}
export function setEnvironmentVariable(name: string, value: string): void {
if (typeof process !== "undefined") {
// eslint-disable-next-line no-process-env
process.env[name] = value;
}
}
interface ICommitSHAs {
[key: string]: string;
}
let cachedCommitSHAs: ICommitSHAs | undefined;
/**
* Get the Git commit SHA from common environment variables
* used by different CI/CD platforms.
* @returns {string | undefined} The Git commit SHA or undefined if not found.
*/
export function getShas(): ICommitSHAs {
if (cachedCommitSHAs !== undefined) {
return cachedCommitSHAs;
}
const common_release_envs = [
"VERCEL_GIT_COMMIT_SHA",
"NEXT_PUBLIC_VERCEL_GIT_COMMIT_SHA",
"COMMIT_REF",
"RENDER_GIT_COMMIT",
"CI_COMMIT_SHA",
"CIRCLE_SHA1",
"CF_PAGES_COMMIT_SHA",
"REACT_APP_GIT_SHA",
"SOURCE_VERSION",
"GITHUB_SHA",
"TRAVIS_COMMIT",
"GIT_COMMIT",
"BUILD_VCS_NUMBER",
"bamboo_planRepository_revision",
"Build.SourceVersion",
"BITBUCKET_COMMIT",
"DRONE_COMMIT_SHA",
"SEMAPHORE_GIT_SHA",
"BUILDKITE_COMMIT",
] as const;
const shas: ICommitSHAs = {};
for (const env of common_release_envs) {
const envVar = getEnvironmentVariable(env);
if (envVar !== undefined) {
shas[env] = envVar;
}
}
cachedCommitSHAs = shas;
return shas;
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/_uuid.ts | import * as uuid from "uuid";
export function assertUuid(str: string, which?: string): string {
if (!uuid.validate(str)) {
const msg =
which !== undefined
? `Invalid UUID for ${which}: ${str}`
: `Invalid UUID: ${str}`;
throw new Error(msg);
}
return str;
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/error.ts | function getErrorStackTrace(e: unknown) {
if (typeof e !== "object" || e == null) return undefined;
if (!("stack" in e) || typeof e.stack !== "string") return undefined;
let stack = e.stack;
const prevLine = `${e}`;
if (stack.startsWith(prevLine)) {
stack = stack.slice(prevLine.length);
}
if (stack.startsWith("\n")) {
stack = stack.slice(1);
}
return stack;
}
export function printErrorStackTrace(e: unknown) {
const stack = getErrorStackTrace(e);
if (stack == null) return;
console.error(stack);
}
/**
* LangSmithConflictError
*
* Represents an error that occurs when there's a conflict during an operation,
* typically corresponding to HTTP 409 status code responses.
*
* This error is thrown when an attempt to create or modify a resource conflicts
* with the current state of the resource on the server. Common scenarios include:
* - Attempting to create a resource that already exists
* - Trying to update a resource that has been modified by another process
* - Violating a uniqueness constraint in the data
*
* @extends Error
*
* @example
* try {
* await createProject("existingProject");
* } catch (error) {
* if (error instanceof ConflictError) {
* console.log("A conflict occurred:", error.message);
* // Handle the conflict, e.g., by suggesting a different project name
* } else {
* // Handle other types of errors
* }
* }
*
* @property {string} name - Always set to 'ConflictError' for easy identification
* @property {string} message - Detailed error message including server response
*
* @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/409
*/
export class LangSmithConflictError extends Error {
constructor(message: string) {
super(message);
this.name = "LangSmithConflictError";
}
}
/**
* Throws an appropriate error based on the response status and body.
*
* @param response - The fetch Response object
* @param context - Additional context to include in the error message (e.g., operation being performed)
* @throws {LangSmithConflictError} When the response status is 409
* @throws {Error} For all other non-ok responses
*/
export async function raiseForStatus(
response: Response,
context: string,
consume?: boolean
): Promise<void> {
// consume the response body to release the connection
// https://undici.nodejs.org/#/?id=garbage-collection
let errorBody;
if (response.ok) {
if (consume) {
errorBody = await response.text();
}
return;
}
errorBody = await response.text();
const fullMessage = `Failed to ${context}. Received status [${response.status}]: ${response.statusText}. Server response: ${errorBody}`;
if (response.status === 409) {
throw new LangSmithConflictError(fullMessage);
}
throw new Error(fullMessage);
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/_git.ts | interface GitInfo {
remoteUrl?: string | null;
commit?: string | null;
branch?: string | null;
authorName?: string | null;
authorEmail?: string | null;
commitMessage?: string | null;
commitTime?: string | null;
dirty?: boolean | null;
tags?: string | null;
}
async function importChildProcess() {
const { exec } = await import("child_process");
return { exec };
}
const execGit = (
command: string[],
exec: (...args: any[]) => any
): Promise<string | null> => {
return new Promise((resolve) => {
exec(`git ${command.join(" ")}`, (error: any, stdout: any) => {
if (error) {
resolve(null);
} else {
resolve(stdout.trim());
}
});
});
};
export const getGitInfo = async (
remote = "origin"
): Promise<GitInfo | null> => {
let exec: (...args: any[]) => any;
try {
const execImport = await importChildProcess();
exec = execImport.exec;
} catch (e) {
// no-op
return null;
}
const isInsideWorkTree = await execGit(
["rev-parse", "--is-inside-work-tree"],
exec
);
if (!isInsideWorkTree) {
return null;
}
const [
remoteUrl,
commit,
commitTime,
branch,
tags,
dirty,
authorName,
authorEmail,
] = await Promise.all([
execGit(["remote", "get-url", remote], exec),
execGit(["rev-parse", "HEAD"], exec),
execGit(["log", "-1", "--format=%ct"], exec),
execGit(["rev-parse", "--abbrev-ref", "HEAD"], exec),
execGit(
["describe", "--tags", "--exact-match", "--always", "--dirty"],
exec
),
execGit(["status", "--porcelain"], exec).then((output) => output !== ""),
execGit(["log", "-1", "--format=%an"], exec),
execGit(["log", "-1", "--format=%ae"], exec),
]);
return {
remoteUrl,
commit,
commitTime,
branch,
tags,
dirty,
authorName,
authorEmail,
};
};
export const getDefaultRevisionId = async (): Promise<string | null> => {
let exec: (...args: any[]) => any;
try {
const execImport = await importChildProcess();
exec = execImport.exec;
} catch (e) {
// no-op
return null;
}
const commit = await execGit(["rev-parse", "HEAD"], exec);
if (!commit) {
return null;
}
return commit;
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/utils/prompts.ts | import { parse as parseVersion } from "semver";
export function isVersionGreaterOrEqual(
current_version: string,
target_version: string
): boolean {
const current = parseVersion(current_version);
const target = parseVersion(target_version);
if (!current || !target) {
throw new Error("Invalid version format.");
}
return current.compare(target) >= 0;
}
export function parsePromptIdentifier(
identifier: string
): [string, string, string] {
if (
!identifier ||
identifier.split("/").length > 2 ||
identifier.startsWith("/") ||
identifier.endsWith("/") ||
identifier.split(":").length > 2
) {
throw new Error(`Invalid identifier format: ${identifier}`);
}
const [ownerNamePart, commitPart] = identifier.split(":");
const commit = commitPart || "latest";
if (ownerNamePart.includes("/")) {
const [owner, name] = ownerNamePart.split("/", 2);
if (!owner || !name) {
throw new Error(`Invalid identifier format: ${identifier}`);
}
return [owner, name, commit];
} else {
if (!ownerNamePart) {
throw new Error(`Invalid identifier format: ${identifier}`);
}
return ["-", ownerNamePart, commit];
}
}
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/fast-safe-stringify/LICENSE | The MIT License (MIT)
Copyright (c) 2016 David Mark Clements
Copyright (c) 2017 David Mark Clements & Matteo Collina
Copyright (c) 2018 David Mark Clements, Matteo Collina & Ruben Bridgewater
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. |
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/fast-safe-stringify/index.ts | /* eslint-disable */
// @ts-nocheck
var LIMIT_REPLACE_NODE = "[...]";
var CIRCULAR_REPLACE_NODE = { result: "[Circular]" };
var arr = [];
var replacerStack = [];
function defaultOptions() {
return {
depthLimit: Number.MAX_SAFE_INTEGER,
edgesLimit: Number.MAX_SAFE_INTEGER,
};
}
// Regular stringify
export function stringify(obj, replacer?, spacer?, options?) {
try {
return JSON.stringify(obj, replacer, spacer);
} catch (e: any) {
// Fall back to more complex stringify if circular reference
if (!e.message?.includes("Converting circular structure to JSON")) {
console.warn("[WARNING]: LangSmith received unserializable value.");
return "[Unserializable]";
}
console.warn(
"[WARNING]: LangSmith received circular JSON. This will decrease tracer performance."
);
if (typeof options === "undefined") {
options = defaultOptions();
}
decirc(obj, "", 0, [], undefined, 0, options);
var res;
try {
if (replacerStack.length === 0) {
res = JSON.stringify(obj, replacer, spacer);
} else {
res = JSON.stringify(obj, replaceGetterValues(replacer), spacer);
}
} catch (_) {
return JSON.stringify(
"[unable to serialize, circular reference is too complex to analyze]"
);
} finally {
while (arr.length !== 0) {
var part = arr.pop();
if (part.length === 4) {
Object.defineProperty(part[0], part[1], part[3]);
} else {
part[0][part[1]] = part[2];
}
}
}
return res;
}
}
function setReplace(replace, val, k, parent) {
var propertyDescriptor = Object.getOwnPropertyDescriptor(parent, k);
if (propertyDescriptor.get !== undefined) {
if (propertyDescriptor.configurable) {
Object.defineProperty(parent, k, { value: replace });
arr.push([parent, k, val, propertyDescriptor]);
} else {
replacerStack.push([val, k, replace]);
}
} else {
parent[k] = replace;
arr.push([parent, k, val]);
}
}
function decirc(val, k, edgeIndex, stack, parent, depth, options) {
depth += 1;
var i;
if (typeof val === "object" && val !== null) {
for (i = 0; i < stack.length; i++) {
if (stack[i] === val) {
setReplace(CIRCULAR_REPLACE_NODE, val, k, parent);
return;
}
}
if (
typeof options.depthLimit !== "undefined" &&
depth > options.depthLimit
) {
setReplace(LIMIT_REPLACE_NODE, val, k, parent);
return;
}
if (
typeof options.edgesLimit !== "undefined" &&
edgeIndex + 1 > options.edgesLimit
) {
setReplace(LIMIT_REPLACE_NODE, val, k, parent);
return;
}
stack.push(val);
// Optimize for Arrays. Big arrays could kill the performance otherwise!
if (Array.isArray(val)) {
for (i = 0; i < val.length; i++) {
decirc(val[i], i, i, stack, val, depth, options);
}
} else {
var keys = Object.keys(val);
for (i = 0; i < keys.length; i++) {
var key = keys[i];
decirc(val[key], key, i, stack, val, depth, options);
}
}
stack.pop();
}
}
// Stable-stringify
function compareFunction(a, b) {
if (a < b) {
return -1;
}
if (a > b) {
return 1;
}
return 0;
}
function deterministicStringify(obj, replacer, spacer, options) {
if (typeof options === "undefined") {
options = defaultOptions();
}
var tmp = deterministicDecirc(obj, "", 0, [], undefined, 0, options) || obj;
var res;
try {
if (replacerStack.length === 0) {
res = JSON.stringify(tmp, replacer, spacer);
} else {
res = JSON.stringify(tmp, replaceGetterValues(replacer), spacer);
}
} catch (_) {
return JSON.stringify(
"[unable to serialize, circular reference is too complex to analyze]"
);
} finally {
// Ensure that we restore the object as it was.
while (arr.length !== 0) {
var part = arr.pop();
if (part.length === 4) {
Object.defineProperty(part[0], part[1], part[3]);
} else {
part[0][part[1]] = part[2];
}
}
}
return res;
}
function deterministicDecirc(val, k, edgeIndex, stack, parent, depth, options) {
depth += 1;
var i;
if (typeof val === "object" && val !== null) {
for (i = 0; i < stack.length; i++) {
if (stack[i] === val) {
setReplace(CIRCULAR_REPLACE_NODE, val, k, parent);
return;
}
}
try {
if (typeof val.toJSON === "function") {
return;
}
} catch (_) {
return;
}
if (
typeof options.depthLimit !== "undefined" &&
depth > options.depthLimit
) {
setReplace(LIMIT_REPLACE_NODE, val, k, parent);
return;
}
if (
typeof options.edgesLimit !== "undefined" &&
edgeIndex + 1 > options.edgesLimit
) {
setReplace(LIMIT_REPLACE_NODE, val, k, parent);
return;
}
stack.push(val);
// Optimize for Arrays. Big arrays could kill the performance otherwise!
if (Array.isArray(val)) {
for (i = 0; i < val.length; i++) {
deterministicDecirc(val[i], i, i, stack, val, depth, options);
}
} else {
// Create a temporary object in the required way
var tmp = {};
var keys = Object.keys(val).sort(compareFunction);
for (i = 0; i < keys.length; i++) {
var key = keys[i];
deterministicDecirc(val[key], key, i, stack, val, depth, options);
tmp[key] = val[key];
}
if (typeof parent !== "undefined") {
arr.push([parent, k, val]);
parent[k] = tmp;
} else {
return tmp;
}
}
stack.pop();
}
}
// wraps replacer function to handle values we couldn't replace
// and mark them as replaced value
function replaceGetterValues(replacer) {
replacer =
typeof replacer !== "undefined"
? replacer
: function (k, v) {
return v;
};
return function (key, val) {
if (replacerStack.length > 0) {
for (var i = 0; i < replacerStack.length; i++) {
var part = replacerStack[i];
if (part[1] === key && part[0] === val) {
val = part[2];
replacerStack.splice(i, 1);
break;
}
}
}
return replacer.call(this, key, val);
};
}
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/castPath.ts | import isKey from "./isKey.js";
import stringToPath from "./stringToPath.js";
/**
* Casts `value` to a path array if it's not one.
*
* @private
* @param {*} value The value to inspect.
* @param {Object} [object] The object to query keys on.
* @returns {Array} Returns the cast property path array.
*/
function castPath(value: any, object: Record<string, any>) {
if (Array.isArray(value)) {
return value;
}
return isKey(value, object) ? [value] : stringToPath(value);
}
export default castPath;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/LICENSE | The MIT License
Copyright JS Foundation and other contributors <https://js.foundation/>
Based on Underscore.js, copyright Jeremy Ashkenas,
DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
This software consists of voluntary contributions made by many
individuals. For exact contribution history, see the revision history
available at https://github.com/lodash/lodash
The following license applies to all parts of this software except as
documented below:
====
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
====
Copyright and related rights for sample code are waived via CC0. Sample
code is defined as all source code displayed within the prose of the
documentation.
CC0: http://creativecommons.org/publicdomain/zero/1.0/
====
Files located in the node_modules and vendor directories are externally
maintained libraries used by this software which have their own
licenses; we recommend you read them, as their terms may differ from the
terms above. |
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/eq.ts | /**
* Performs a
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* comparison between two values to determine if they are equivalent.
*
* @since 4.0.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* const object = { 'a': 1 }
* const other = { 'a': 1 }
*
* eq(object, object)
* // => true
*
* eq(object, other)
* // => false
*
* eq('a', 'a')
* // => true
*
* eq('a', Object('a'))
* // => false
*
* eq(NaN, NaN)
* // => true
*/
function eq(value: any, other: any) {
return value === other || (value !== value && other !== other);
}
export default eq;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/baseAssignValue.ts | /**
* The base implementation of `assignValue` and `assignMergeValue` without
* value checks.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function baseAssignValue(object: Record<string, any>, key: string, value: any) {
if (key === "__proto__") {
Object.defineProperty(object, key, {
configurable: true,
enumerable: true,
value: value,
writable: true,
});
} else {
object[key] = value;
}
}
export default baseAssignValue;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/isKey.ts | // @ts-nocheck
import isSymbol from "./isSymbol.js";
/** Used to match property names within property paths. */
const reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/;
const reIsPlainProp = /^\w*$/;
/**
* Checks if `value` is a property name and not a property path.
*
* @private
* @param {*} value The value to check.
* @param {Object} [object] The object to query keys on.
* @returns {boolean} Returns `true` if `value` is a property name, else `false`.
*/
function isKey(value, object) {
if (Array.isArray(value)) {
return false;
}
const type = typeof value;
if (
type === "number" ||
type === "boolean" ||
value == null ||
isSymbol(value)
) {
return true;
}
return (
reIsPlainProp.test(value) ||
!reIsDeepProp.test(value) ||
(object != null && value in Object(object))
);
}
export default isKey;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/isIndex.ts | // @ts-nocheck
/** Used as references for various `Number` constants. */
const MAX_SAFE_INTEGER = 9007199254740991;
/** Used to detect unsigned integer values. */
const reIsUint = /^(?:0|[1-9]\d*)$/;
/**
* Checks if `value` is a valid array-like index.
*
* @private
* @param {*} value The value to check.
* @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.
* @returns {boolean} Returns `true` if `value` is a valid index, else `false`.
*/
function isIndex(value, length) {
const type = typeof value;
length = length == null ? MAX_SAFE_INTEGER : length;
return (
!!length &&
(type === "number" || (type !== "symbol" && reIsUint.test(value))) &&
value > -1 &&
value % 1 === 0 &&
value < length
);
}
export default isIndex;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/stringToPath.ts | // @ts-nocheck
import memoizeCapped from "./memoizeCapped.js";
const charCodeOfDot = ".".charCodeAt(0);
const reEscapeChar = /\\(\\)?/g;
const rePropName = RegExp(
// Match anything that isn't a dot or bracket.
"[^.[\\]]+" +
"|" +
// Or match property names within brackets.
"\\[(?:" +
// Match a non-string expression.
"([^\"'][^[]*)" +
"|" +
// Or match strings (supports escaping characters).
"([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2" +
")\\]" +
"|" +
// Or match "" as the space between consecutive dots or empty brackets.
"(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))",
"g"
);
/**
* Converts `string` to a property path array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the property path array.
*/
const stringToPath = memoizeCapped((string: string) => {
const result = [];
if (string.charCodeAt(0) === charCodeOfDot) {
result.push("");
}
string.replace(rePropName, (match, expression, quote, subString) => {
let key = match;
if (quote) {
key = subString.replace(reEscapeChar, "$1");
} else if (expression) {
key = expression.trim();
}
result.push(key);
});
return result;
});
export default stringToPath;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/memoizeCapped.ts | // @ts-nocheck
/**
* Creates a function that memoizes the result of `func`. If `resolver` is
* provided, it determines the cache key for storing the result based on the
* arguments provided to the memoized function. By default, the first argument
* provided to the memoized function is used as the map cache key. The `func`
* is invoked with the `this` binding of the memoized function.
*
* **Note:** The cache is exposed as the `cache` property on the memoized
* function. Its creation may be customized by replacing the `memoize.Cache`
* constructor with one whose instances implement the
* [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object)
* method interface of `clear`, `delete`, `get`, `has`, and `set`.
*
* @since 0.1.0
* @category Function
* @param {Function} func The function to have its output memoized.
* @param {Function} [resolver] The function to resolve the cache key.
* @returns {Function} Returns the new memoized function.
* @example
*
* const object = { 'a': 1, 'b': 2 }
* const other = { 'c': 3, 'd': 4 }
*
* const values = memoize(values)
* values(object)
* // => [1, 2]
*
* values(other)
* // => [3, 4]
*
* object.a = 2
* values(object)
* // => [1, 2]
*
* // Modify the result cache.
* values.cache.set(object, ['a', 'b'])
* values(object)
* // => ['a', 'b']
*
* // Replace `memoize.Cache`.
* memoize.Cache = WeakMap
*/
function memoize(func, resolver) {
if (
typeof func !== "function" ||
(resolver != null && typeof resolver !== "function")
) {
throw new TypeError("Expected a function");
}
const memoized = function (...args) {
const key = resolver ? resolver.apply(this, args) : args[0];
const cache = memoized.cache;
if (cache.has(key)) {
return cache.get(key);
}
const result = func.apply(this, args);
memoized.cache = cache.set(key, result) || cache;
return result;
};
memoized.cache = new (memoize.Cache || Map)();
return memoized;
}
memoize.Cache = Map;
export default memoize;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/assignValue.ts | import baseAssignValue from "./baseAssignValue.js";
import eq from "./eq.js";
/** Used to check objects for own properties. */
const hasOwnProperty = Object.prototype.hasOwnProperty;
/**
* Assigns `value` to `key` of `object` if the existing value is not equivalent.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function assignValue(object: Record<string, any>, key: string, value: any) {
const objValue = object[key];
if (!(hasOwnProperty.call(object, key) && eq(objValue, value))) {
if (value !== 0 || 1 / value === 1 / objValue) {
baseAssignValue(object, key, value);
}
} else if (value === undefined && !(key in object)) {
baseAssignValue(object, key, value);
}
}
export default assignValue;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/toKey.ts | // @ts-nocheck
import isSymbol from "./isSymbol.js";
/** Used as references for various `Number` constants. */
const INFINITY = 1 / 0;
/**
* Converts `value` to a string key if it's not a string or symbol.
*
* @private
* @param {*} value The value to inspect.
* @returns {string|symbol} Returns the key.
*/
function toKey(value) {
if (typeof value === "string" || isSymbol(value)) {
return value;
}
const result = `${value}`;
return result === "0" && 1 / value === -INFINITY ? "-0" : result;
}
export default toKey;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/getTag.ts | // @ts-nocheck
const toString = Object.prototype.toString;
/**
* Gets the `toStringTag` of `value`.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
function getTag(value) {
if (value == null) {
return value === undefined ? "[object Undefined]" : "[object Null]";
}
return toString.call(value);
}
export default getTag;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/set.ts | // @ts-nocheck
import baseSet from "./baseSet.js";
/**
* Sets the value at `path` of `object`. If a portion of `path` doesn't exist,
* it's created. Arrays are created for missing index properties while objects
* are created for all other missing properties. Use `setWith` to customize
* `path` creation.
*
* **Note:** This method mutates `object`.
*
* Inlined to just use set functionality and patch vulnerabilities
* on existing isolated "lodash.set" package.
*
* @since 3.7.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @returns {Object} Returns `object`.
* @see has, hasIn, get, unset
* @example
*
* const object = { 'a': [{ 'b': { 'c': 3 } }] }
*
* set(object, 'a[0].b.c', 4)
* console.log(object.a[0].b.c)
* // => 4
*
* set(object, ['x', '0', 'y', 'z'], 5)
* console.log(object.x[0].y.z)
* // => 5
*/
function set(object, path, value) {
return object == null ? object : baseSet(object, path, value);
}
export default set;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/isObject.ts | // @ts-nocheck
/**
* Checks if `value` is the
* [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)
* of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)
*
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an object, else `false`.
* @example
*
* isObject({})
* // => true
*
* isObject([1, 2, 3])
* // => true
*
* isObject(Function)
* // => true
*
* isObject(null)
* // => false
*/
function isObject(value) {
const type = typeof value;
return value != null && (type === "object" || type === "function");
}
export default isObject;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/baseSet.ts | // @ts-nocheck
import assignValue from "./assignValue.js";
import castPath from "./castPath.js";
import isIndex from "./isIndex.js";
import isObject from "./isObject.js";
import toKey from "./toKey.js";
/**
* The base implementation of `set`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @param {Function} [customizer] The function to customize path creation.
* @returns {Object} Returns `object`.
*/
function baseSet(object, path, value, customizer) {
if (!isObject(object)) {
return object;
}
path = castPath(path, object);
const length = path.length;
const lastIndex = length - 1;
let index = -1;
let nested = object;
while (nested != null && ++index < length) {
const key = toKey(path[index]);
let newValue = value;
if (index !== lastIndex) {
const objValue = nested[key];
newValue = customizer ? customizer(objValue, key, nested) : undefined;
if (newValue === undefined) {
newValue = isObject(objValue)
? objValue
: isIndex(path[index + 1])
? []
: {};
}
}
assignValue(nested, key, newValue);
nested = nested[key];
}
return object;
}
export default baseSet;
|
0 | lc_public_repos/langsmith-sdk/js/src/utils | lc_public_repos/langsmith-sdk/js/src/utils/lodash/isSymbol.ts | // @ts-nocheck
import getTag from "./getTag.js";
/**
* Checks if `value` is classified as a `Symbol` primitive or object.
*
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a symbol, else `false`.
* @example
*
* isSymbol(Symbol.iterator)
* // => true
*
* isSymbol('abc')
* // => false
*/
function isSymbol(value) {
const type = typeof value;
return (
type === "symbol" ||
(type === "object" && value != null && getTag(value) === "[object Symbol]")
);
}
export default isSymbol;
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/singletons/fetch.ts | // Wrap the default fetch call due to issues with illegal invocations
// in some environments:
// https://stackoverflow.com/questions/69876859/why-does-bind-fix-failed-to-execute-fetch-on-window-illegal-invocation-err
// @ts-expect-error Broad typing to support a range of fetch implementations
const DEFAULT_FETCH_IMPLEMENTATION = (...args: any[]) => fetch(...args);
const LANGSMITH_FETCH_IMPLEMENTATION_KEY = Symbol.for(
"ls:fetch_implementation"
);
/**
* Overrides the fetch implementation used for LangSmith calls.
* You should use this if you need to use an implementation of fetch
* other than the default global (e.g. for dealing with proxies).
* @param fetch The new fetch functino to use.
*/
export const overrideFetchImplementation = (fetch: (...args: any[]) => any) => {
(globalThis as any)[LANGSMITH_FETCH_IMPLEMENTATION_KEY] = fetch;
};
/**
* @internal
*/
export const _getFetchImplementation: () => (...args: any[]) => any = () => {
return (
(globalThis as any)[LANGSMITH_FETCH_IMPLEMENTATION_KEY] ??
DEFAULT_FETCH_IMPLEMENTATION
);
};
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/singletons/types.ts | import { RunTree, RunnableConfigLike } from "../run_trees.js";
import { ROOT } from "./traceable.js";
type SmartPromise<T> = T extends AsyncGenerator
? T
: T extends Promise<unknown>
? T
: Promise<T>;
type WrapArgReturnPair<Pair> = Pair extends [
// eslint-disable-next-line @typescript-eslint/no-explicit-any
infer Args extends any[],
infer Return
]
? Args extends [RunTree, ...infer RestArgs]
? {
(
runTree: RunTree | typeof ROOT,
...args: RestArgs
): SmartPromise<Return>;
(config: RunnableConfigLike, ...args: RestArgs): SmartPromise<Return>;
}
: {
(...args: Args): SmartPromise<Return>;
(runTree: RunTree, ...rest: Args): SmartPromise<Return>;
(config: RunnableConfigLike, ...args: Args): SmartPromise<Return>;
}
: never;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type UnionToIntersection<U> = (U extends any ? (x: U) => void : never) extends (
x: infer I
) => void
? I
: never;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type TraceableFunction<Func extends (...args: any[]) => any> =
// function overloads are represented as intersections rather than unions
// matches the behavior introduced in https://github.com/microsoft/TypeScript/pull/54448
(Func extends {
(...args: infer A1): infer R1;
(...args: infer A2): infer R2;
(...args: infer A3): infer R3;
(...args: infer A4): infer R4;
(...args: infer A5): infer R5;
}
? UnionToIntersection<
WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]>
>
: Func extends {
(...args: infer A1): infer R1;
(...args: infer A2): infer R2;
(...args: infer A3): infer R3;
(...args: infer A4): infer R4;
}
? UnionToIntersection<
WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]>
>
: Func extends {
(...args: infer A1): infer R1;
(...args: infer A2): infer R2;
(...args: infer A3): infer R3;
}
? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3]>>
: Func extends {
(...args: infer A1): infer R1;
(...args: infer A2): infer R2;
}
? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2]>>
: Func extends {
(...args: infer A1): infer R1;
}
? UnionToIntersection<WrapArgReturnPair<[A1, R1]>>
: never) & {
// Other properties of Func
[K in keyof Func]: Func[K];
};
export type RunTreeLike = RunTree;
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/singletons/traceable.ts | import { isRunTree, RunTree } from "../run_trees.js";
import { TraceableFunction } from "./types.js";
interface AsyncLocalStorageInterface {
getStore: () => RunTree | undefined;
run: (context: RunTree | undefined, fn: () => void) => void;
}
class MockAsyncLocalStorage implements AsyncLocalStorageInterface {
getStore() {
return undefined;
}
run(_: RunTree | undefined, callback: () => void): void {
return callback();
}
}
const TRACING_ALS_KEY = Symbol.for("ls:tracing_async_local_storage");
const mockAsyncLocalStorage = new MockAsyncLocalStorage();
class AsyncLocalStorageProvider {
getInstance(): AsyncLocalStorageInterface {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return (globalThis as any)[TRACING_ALS_KEY] ?? mockAsyncLocalStorage;
}
initializeGlobalInstance(instance: AsyncLocalStorageInterface) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
if ((globalThis as any)[TRACING_ALS_KEY] === undefined) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(globalThis as any)[TRACING_ALS_KEY] = instance;
}
}
}
export const AsyncLocalStorageProviderSingleton =
new AsyncLocalStorageProvider();
/**
* Return the current run tree from within a traceable-wrapped function.
* Will throw an error if called outside of a traceable function.
*
* @returns The run tree for the given context.
*/
export const getCurrentRunTree = () => {
const runTree = AsyncLocalStorageProviderSingleton.getInstance().getStore();
if (!isRunTree(runTree)) {
throw new Error(
[
"Could not get the current run tree.",
"",
"Please make sure you are calling this method within a traceable function or the tracing is enabled.",
].join("\n")
);
}
return runTree;
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function withRunTree<Fn extends (...args: any[]) => any>(
runTree: RunTree,
fn: Fn
): Promise<Awaited<ReturnType<Fn>>> {
const storage = AsyncLocalStorageProviderSingleton.getInstance();
return new Promise<Awaited<ReturnType<Fn>>>((resolve, reject) => {
storage.run(
runTree,
() => void Promise.resolve(fn()).then(resolve).catch(reject)
);
});
}
export const ROOT = Symbol.for("langsmith:traceable:root");
export function isTraceableFunction(
x: unknown
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): x is TraceableFunction<any> {
return typeof x === "function" && "langsmith:traceable" in x;
}
export type { TraceableFunction } from "./types.js";
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/singletons/constants.ts | export const _LC_CONTEXT_VARIABLES_KEY = Symbol.for("lc:context_variables");
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/evaluate_comparative.ts | import { v4 as uuid4, validate } from "uuid";
import { Client } from "../index.js";
import {
ComparisonEvaluationResult as ComparisonEvaluationResultRow,
Example,
Run,
} from "../schemas.js";
import { shuffle } from "../utils/shuffle.js";
import { AsyncCaller } from "../utils/async_caller.js";
import { evaluate } from "./index.js";
import pRetry from "p-retry";
import { getCurrentRunTree, traceable } from "../traceable.js";
type ExperimentResults = Awaited<ReturnType<typeof evaluate>>;
function isExperimentResultsList(
value: ExperimentResults[] | string[]
): value is ExperimentResults[] {
return value.some((x) => typeof x !== "string");
}
async function loadExperiment(
client: Client,
experiment: string | ExperimentResults
) {
const value =
typeof experiment === "string" ? experiment : experiment.experimentName;
return client.readProject(
validate(value) ? { projectId: value } : { projectName: value }
);
}
async function loadTraces(
client: Client,
experiment: string,
options: { loadNested: boolean }
) {
const executionOrder = options.loadNested ? undefined : 1;
const runs = await client.listRuns(
validate(experiment)
? { projectId: experiment, executionOrder }
: { projectName: experiment, executionOrder }
);
const treeMap: Record<string, Run[]> = {};
const runIdMap: Record<string, Run> = {};
const results: Run[] = [];
for await (const run of runs) {
if (run.parent_run_id != null) {
treeMap[run.parent_run_id] ??= [];
treeMap[run.parent_run_id].push(run);
} else {
results.push(run);
}
runIdMap[run.id] = run;
}
for (const [parentRunId, childRuns] of Object.entries(treeMap)) {
const parentRun = runIdMap[parentRunId];
parentRun.child_runs = childRuns.sort((a, b) => {
if (a.dotted_order == null || b.dotted_order == null) return 0;
return a.dotted_order.localeCompare(b.dotted_order);
});
}
return results;
}
/** @deprecated Use ComparativeEvaluatorNew instead: (args: { runs, example, inputs, outputs, referenceOutputs }) => ... */
export type _ComparativeEvaluatorLegacy = (
runs: Run[],
example: Example
) => ComparisonEvaluationResultRow | Promise<ComparisonEvaluationResultRow>;
export type _ComparativeEvaluator = (args: {
runs: Run[];
example: Example;
inputs: Record<string, any>;
outputs: Record<string, any>[];
referenceOutputs?: Record<string, any>;
}) => ComparisonEvaluationResultRow | Promise<ComparisonEvaluationResultRow>;
export type ComparativeEvaluator =
| _ComparativeEvaluatorLegacy
| _ComparativeEvaluator;
export interface EvaluateComparativeOptions {
/**
* A list of evaluators to use for comparative evaluation.
*/
evaluators: Array<ComparativeEvaluator>;
/**
* Randomize the order of outputs for each evaluation
* @default false
*/
randomizeOrder?: boolean;
/**
* The LangSmith client to use.
* @default undefined
*/
client?: Client;
/**
* Metadata to attach to the experiment.
* @default undefined
*/
metadata?: Record<string, unknown>;
/**
* A prefix to use for your experiment name.
* @default undefined
*/
experimentPrefix?: string;
/**
* A free-form description of the experiment.
* @default undefined
*/
description?: string;
/**
* Whether to load all child runs for the experiment.
* @default false
*/
loadNested?: boolean;
/**
* The maximum number of concurrent evaluators to run.
* @default undefined
*/
maxConcurrency?: number;
}
export interface ComparisonEvaluationResults {
experimentName: string;
results: ComparisonEvaluationResultRow[];
}
export async function evaluateComparative(
experiments:
| Array<string>
| Array<Promise<ExperimentResults> | ExperimentResults>,
options: EvaluateComparativeOptions
): Promise<ComparisonEvaluationResults> {
if (experiments.length < 2) {
throw new Error("Comparative evaluation requires at least 2 experiments.");
}
if (!options.evaluators.length) {
throw new Error(
"At least one evaluator is required for comparative evaluation."
);
}
if (options.maxConcurrency && options.maxConcurrency < 0) {
throw new Error("maxConcurrency must be a positive number.");
}
const client = options.client ?? new Client();
const resolvedExperiments = await Promise.all(experiments);
const projects = await (() => {
if (!isExperimentResultsList(resolvedExperiments)) {
return Promise.all(
resolvedExperiments.map((experiment) =>
loadExperiment(client, experiment)
)
);
}
// if we know the number of runs beforehand, check if the
// number of runs in the project matches the expected number of runs
return Promise.all(
resolvedExperiments.map((experiment) =>
pRetry(
async () => {
const project = await loadExperiment(client, experiment);
if (project.run_count !== experiment?.results.length) {
throw new Error("Experiment is missing runs. Retrying.");
}
return project;
},
{ factor: 2, minTimeout: 1000, retries: 10 }
)
)
);
})();
if (new Set(projects.map((p) => p.reference_dataset_id)).size > 1) {
throw new Error("All experiments must have the same reference dataset.");
}
const referenceDatasetId = projects.at(0)?.reference_dataset_id;
if (!referenceDatasetId) {
throw new Error(
"Reference dataset is required for comparative evaluation."
);
}
if (
new Set(projects.map((p) => p.extra?.metadata?.dataset_version)).size > 1
) {
console.warn(
"Detected multiple dataset versions used by experiments, which may lead to inaccurate results."
);
}
const datasetVersion = projects.at(0)?.extra?.metadata?.dataset_version;
const id = uuid4();
const experimentName = (() => {
if (!options.experimentPrefix) {
const names = projects
.map((p) => p.name)
.filter(Boolean)
.join(" vs. ");
return `${names}-${uuid4().slice(0, 4)}`;
}
return `${options.experimentPrefix}-${uuid4().slice(0, 4)}`;
})();
// TODO: add URL to the comparative experiment
console.log(`Starting pairwise evaluation of: ${experimentName}`);
const comparativeExperiment = await client.createComparativeExperiment({
id,
name: experimentName,
experimentIds: projects.map((p) => p.id),
description: options.description,
metadata: options.metadata,
referenceDatasetId: projects.at(0)?.reference_dataset_id,
});
const viewUrl = await (async () => {
const projectId = projects.at(0)?.id ?? projects.at(1)?.id;
const datasetId = comparativeExperiment?.reference_dataset_id;
if (projectId && datasetId) {
const hostUrl = (await client.getProjectUrl({ projectId }))
.split("/projects/p/")
.at(0);
const result = new URL(`${hostUrl}/datasets/${datasetId}/compare`);
result.searchParams.set(
"selectedSessions",
projects.map((p) => p.id).join(",")
);
result.searchParams.set(
"comparativeExperiment",
comparativeExperiment.id
);
return result.toString();
}
return null;
})();
if (viewUrl != null) {
console.log(`View results at: ${viewUrl}`);
}
const experimentRuns = await Promise.all(
projects.map((p) =>
loadTraces(client, p.id, { loadNested: !!options.loadNested })
)
);
let exampleIdsIntersect: Set<string> | undefined;
for (const runs of experimentRuns) {
const exampleIdsSet = new Set(
runs
.map((r) => r.reference_example_id)
.filter((x): x is string => x != null)
);
if (!exampleIdsIntersect) {
exampleIdsIntersect = exampleIdsSet;
} else {
exampleIdsIntersect = new Set(
[...exampleIdsIntersect].filter((x) => exampleIdsSet.has(x))
);
}
}
const exampleIds = [...(exampleIdsIntersect ?? [])];
if (!exampleIds.length) {
throw new Error("No examples found in common between experiments.");
}
const exampleMap: Record<string, Example> = {};
for (let start = 0; start < exampleIds.length; start += 99) {
const exampleIdsChunk = exampleIds.slice(start, start + 99);
for await (const example of client.listExamples({
datasetId: referenceDatasetId,
exampleIds: exampleIdsChunk,
asOf: datasetVersion,
})) {
exampleMap[example.id] = example;
}
}
const runMapByExampleId: Record<string, Run[]> = {};
for (const runs of experimentRuns) {
for (const run of runs) {
if (
run.reference_example_id == null ||
!exampleIds.includes(run.reference_example_id)
) {
continue;
}
runMapByExampleId[run.reference_example_id] ??= [];
runMapByExampleId[run.reference_example_id].push(run);
}
}
const caller = new AsyncCaller({ maxConcurrency: options.maxConcurrency });
async function evaluateAndSubmitFeedback(
runs: Run[],
example: Example,
evaluator: ComparativeEvaluator
) {
const expectedRunIds = new Set(runs.map((r) => r.id));
// Check if evaluator expects an object parameter
const result =
evaluator.length === 1
? await (evaluator as _ComparativeEvaluator)({
runs: options.randomizeOrder ? shuffle(runs) : runs,
example,
inputs: example.inputs,
outputs: runs.map((run) => run.outputs || {}),
referenceOutputs: example.outputs || {},
})
: await (evaluator as _ComparativeEvaluatorLegacy)(runs, example);
for (const [runId, score] of Object.entries(result.scores)) {
// validate if the run id
if (!expectedRunIds.has(runId)) {
throw new Error(`Returning an invalid run id ${runId} from evaluator.`);
}
await client.createFeedback(runId, result.key, {
score,
sourceRunId: result.source_run_id,
comparativeExperimentId: comparativeExperiment.id,
});
}
return result;
}
const tracedEvaluators = options.evaluators.map((evaluator) =>
traceable(
async (
runs: Run[],
example: Example
): Promise<ComparisonEvaluationResultRow> => {
const evaluatorRun = getCurrentRunTree();
const result =
evaluator.length === 1
? await (evaluator as _ComparativeEvaluator)({
runs: options.randomizeOrder ? shuffle(runs) : runs,
example,
inputs: example.inputs,
outputs: runs.map((run) => run.outputs || {}),
referenceOutputs: example.outputs || {},
})
: await (evaluator as _ComparativeEvaluatorLegacy)(runs, example);
// sanitise the payload before sending to LangSmith
evaluatorRun.inputs = { runs: runs, example: example };
evaluatorRun.outputs = result;
return {
...result,
source_run_id: result.source_run_id ?? evaluatorRun.id,
};
},
{
project_name: "evaluators",
name: evaluator.name || "evaluator",
}
)
);
const promises = Object.entries(runMapByExampleId).flatMap(
([exampleId, runs]) => {
const example = exampleMap[exampleId];
if (!example) throw new Error(`Example ${exampleId} not found.`);
return tracedEvaluators.map((evaluator) =>
caller.call(
evaluateAndSubmitFeedback,
runs,
exampleMap[exampleId],
evaluator
)
);
}
);
const results: ComparisonEvaluationResultRow[] = await Promise.all(promises);
return { experimentName, results };
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/string_evaluator.ts | import { Example, Run, ScoreType, ValueType } from "../schemas.js";
import { EvaluationResult, RunEvaluator } from "./evaluator.js";
export interface GradingFunctionResult {
key?: string;
score?: ScoreType;
value?: ValueType;
comment?: string;
correction?: Record<string, unknown>;
}
export interface GradingFunctionParams {
input: string;
prediction: string;
answer?: string;
}
export interface StringEvaluatorParams {
evaluationName?: string;
inputKey?: string;
predictionKey?: string;
answerKey?: string;
gradingFunction: (
params: GradingFunctionParams
) => Promise<GradingFunctionResult>;
}
export class StringEvaluator implements RunEvaluator {
protected evaluationName?: string;
protected inputKey: string;
protected predictionKey: string;
protected answerKey?: string;
protected gradingFunction: (
params: GradingFunctionParams
) => Promise<GradingFunctionResult>;
constructor(params: StringEvaluatorParams) {
this.evaluationName = params.evaluationName;
this.inputKey = params.inputKey ?? "input";
this.predictionKey = params.predictionKey ?? "output";
this.answerKey =
params.answerKey !== undefined ? params.answerKey : "output";
this.gradingFunction = params.gradingFunction;
}
async evaluateRun(run: Run, example?: Example): Promise<EvaluationResult> {
if (!run.outputs) {
throw new Error("Run outputs cannot be undefined.");
}
const functionInputs = {
input: run.inputs[this.inputKey],
prediction: run.outputs[this.predictionKey],
answer: this.answerKey ? example?.outputs?.[this.answerKey] : null,
};
const gradingResults = await this.gradingFunction(functionInputs);
const key = gradingResults.key || this.evaluationName;
if (!key) {
throw new Error("Evaluation name cannot be undefined.");
}
return {
key,
score: gradingResults.score,
value: gradingResults.value,
comment: gradingResults.comment,
correction: gradingResults.correction,
};
}
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/langchain.ts | // eslint-disable-next-line import/no-extraneous-dependencies
import { type LoadEvaluatorOptions, loadEvaluator } from "langchain/evaluation";
import type { Run, Example } from "../schemas.js";
import { getLangchainCallbacks } from "../langchain.js";
function isStringifiable(
value: unknown
): value is string | number | boolean | bigint {
return (
typeof value === "string" ||
typeof value === "number" ||
typeof value === "boolean" ||
typeof value === "bigint"
);
}
// utility methods for extracting stringified values
// from unknown inputs and records
function getPrimitiveValue(value: unknown) {
if (isStringifiable(value)) return String(value);
if (!Array.isArray(value) && typeof value === "object" && value != null) {
const values = Object.values(value);
if (values.length === 1 && isStringifiable(values[0])) {
return String(values[0]);
}
}
return undefined;
}
/**
* @deprecated Use `evaluate` instead.
*
* This utility function loads a LangChain string evaluator and returns a function
* which can be used by newer `evaluate` function.
*
* @param type Type of string evaluator, one of "criteria" or "labeled_criteria
* @param options Options for loading the evaluator
* @returns Evaluator consumable by `evaluate`
*/
export async function getLangchainStringEvaluator(
type: "criteria" | "labeled_criteria",
options: LoadEvaluatorOptions & {
formatEvaluatorInputs?: (
run: Run,
example: Example
) => { prediction: string; reference?: string; input?: string };
}
) {
const evaluator = await loadEvaluator(type, options);
const feedbackKey = getPrimitiveValue(options.criteria) ?? type;
const formatEvaluatorInputs =
options.formatEvaluatorInputs ??
((run: Run, example: Example) => {
const prediction = getPrimitiveValue(run.outputs);
const reference = getPrimitiveValue(example.outputs);
const input = getPrimitiveValue(example.inputs);
if (prediction == null) throw new Error("Missing prediction");
if (type === "criteria") return { prediction, input };
return { prediction, reference, input };
});
return async (run: Run, example: Example) => {
const score = await evaluator.evaluateStrings(
formatEvaluatorInputs(run, example),
{ callbacks: await getLangchainCallbacks() }
);
return { key: feedbackKey, ...score };
};
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/_runner.ts | import { Client, RunTree, RunTreeConfig } from "../index.js";
import { BaseRun, Example, KVMap, Run, TracerSession } from "../schemas.js";
import { traceable } from "../traceable.js";
import { getDefaultRevisionId, getGitInfo } from "../utils/_git.js";
import { assertUuid } from "../utils/_uuid.js";
import { AsyncCaller } from "../utils/async_caller.js";
import { atee } from "../utils/atee.js";
import { getLangChainEnvVarsMetadata } from "../utils/env.js";
import { printErrorStackTrace } from "../utils/error.js";
import { randomName } from "./_random_name.js";
import {
EvaluationResult,
EvaluationResults,
RunEvaluator,
runEvaluator,
} from "./evaluator.js";
import { LangSmithConflictError } from "../utils/error.js";
import { v4 as uuidv4 } from "uuid";
import {
evaluateComparative,
ComparisonEvaluationResults,
ComparativeEvaluator,
} from "./evaluate_comparative.js";
type StandardTargetT<TInput = any, TOutput = KVMap> =
| ((input: TInput, config?: KVMap) => Promise<TOutput>)
| ((input: TInput, config?: KVMap) => TOutput)
| { invoke: (input: TInput, config?: KVMap) => TOutput }
| { invoke: (input: TInput, config?: KVMap) => Promise<TOutput> };
type ComparativeTargetT =
| Array<string>
| Array<Promise<ExperimentResults> | ExperimentResults>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export type TargetT<TInput = any, TOutput = KVMap> =
| StandardTargetT<TInput, TOutput>
| ComparativeTargetT;
// Data format: dataset-name, dataset_id, or examples
export type DataT = string | AsyncIterable<Example> | Example[];
// and reports aggregate metric(s)
/** @deprecated Use object parameter version instead: (args: { runs, examples, inputs, outputs, referenceOutputs }) => ... */
type DeprecatedSyncSummaryEvaluator = (
runs: Array<Run>,
examples: Array<Example>
) => EvaluationResult | EvaluationResults;
/** @deprecated Use object parameter version instead: (args: { runs, examples, inputs, outputs, referenceOutputs }) => ... */
type DeprecatedAsyncSummaryEvaluator = (
runs: Array<Run>,
examples: Array<Example>
) => Promise<EvaluationResult | EvaluationResults>;
// Summary evaluator runs over the whole dataset
export type SummaryEvaluatorT =
| DeprecatedSyncSummaryEvaluator
| DeprecatedAsyncSummaryEvaluator
| ((args: {
runs: Array<Run>;
examples: Array<Example>;
inputs: Array<Record<string, any>>;
outputs: Array<Record<string, any>>;
referenceOutputs?: Array<Record<string, any>>;
}) => EvaluationResult | EvaluationResults)
| ((args: {
runs: Array<Run>;
examples: Array<Example>;
inputs: Array<Record<string, any>>;
outputs: Array<Record<string, any>>;
referenceOutputs?: Array<Record<string, any>>;
}) => Promise<EvaluationResult | EvaluationResults>);
/** @deprecated Use object parameter version instead: (args: { run, example, inputs, outputs, referenceOutputs }) => ... */
type DeprecatedRunEvaluator = RunEvaluator;
/** @deprecated Use object parameter version instead: (args: { run, example, inputs, outputs, referenceOutputs }) => ... */
type DeprecatedFunctionEvaluator = (
run: Run,
example?: Example
) => EvaluationResult | EvaluationResults;
/** @deprecated Use object parameter version instead: (args: { run, example, inputs, outputs, referenceOutputs }) => ... */
type DeprecatedAsyncFunctionEvaluator = (
run: Run,
example?: Example
) => Promise<EvaluationResult | EvaluationResults>;
// Row-level evaluator
export type EvaluatorT =
| DeprecatedRunEvaluator
| DeprecatedFunctionEvaluator
| DeprecatedAsyncFunctionEvaluator
| ((args: {
run: Run;
example: Example;
inputs: Record<string, any>;
outputs: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => EvaluationResult | EvaluationResults)
| ((args: {
run: Run;
example: Example;
inputs: Record<string, any>;
outputs: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => Promise<EvaluationResult | EvaluationResults>);
interface _ForwardResults {
run: Run;
example: Example;
}
interface _ExperimentManagerArgs {
data?: DataT;
experiment?: TracerSession | string;
metadata?: KVMap;
client?: Client;
runs?: AsyncGenerator<Run>;
evaluationResults?: AsyncGenerator<EvaluationResults>;
summaryResults?: AsyncGenerator<
(runsArray: Run[]) => AsyncGenerator<EvaluationResults, any, unknown>,
any,
unknown
>;
examples?: Example[];
numRepetitions?: number;
_runsArray?: Run[];
}
type BaseEvaluateOptions = {
/**
* Metadata to attach to the experiment.
* @default undefined
*/
metadata?: KVMap;
/**
* A prefix to provide for your experiment name.
* @default undefined
*/
experimentPrefix?: string;
/**
* A free-form description of the experiment.
*/
description?: string;
/**
* The maximum number of concurrent evaluations to run.
* @default undefined
*/
maxConcurrency?: number;
/**
* The LangSmith client to use.
* @default undefined
*/
client?: Client;
/**
* The number of repetitions to perform. Each example
* will be run this many times.
* @default 1
*/
numRepetitions?: number;
};
export interface EvaluateOptions extends BaseEvaluateOptions {
/**
* A list of evaluators to run on each example.
* @default undefined
*/
evaluators?: Array<EvaluatorT>;
/**
* A list of summary evaluators to run on the entire dataset.
* @default undefined
*/
summaryEvaluators?: Array<SummaryEvaluatorT>;
/**
* The dataset to evaluate on. Can be a dataset name, a list of
* examples, or a generator of examples.
*/
data: DataT;
}
export interface ComparativeEvaluateOptions extends BaseEvaluateOptions {
/**
* A list of evaluators to run on each example.
*/
evaluators: Array<ComparativeEvaluator>;
/**
* Whether to load all child runs for the experiment.
* @default false
*/
loadNested?: boolean;
/**
* Randomize the order of outputs for each evaluation
* @default false
*/
randomizeOrder?: boolean;
}
// Function overloads
export function evaluate(
target: ComparativeTargetT,
options: ComparativeEvaluateOptions
): Promise<ComparisonEvaluationResults>;
export function evaluate(
target: StandardTargetT,
options: EvaluateOptions
): Promise<ExperimentResults>;
// Implementation signature
export function evaluate(
target: TargetT,
options: EvaluateOptions | ComparativeEvaluateOptions
): Promise<ExperimentResults | ComparisonEvaluationResults> {
return _evaluate(target, options);
}
export interface ExperimentResultRow {
run: Run;
example: Example;
evaluationResults: EvaluationResults;
}
/**
* Manage the execution of experiments.
*
* Supports lazily running predictions and evaluations in parallel to facilitate
* result streaming and early debugging.
*/
export class _ExperimentManager {
_data?: DataT;
_runs?: AsyncGenerator<Run>;
_evaluationResults?: AsyncGenerator<EvaluationResults>;
_summaryResults?: AsyncGenerator<
(runsArray: Run[]) => AsyncGenerator<EvaluationResults, any, unknown>,
any,
unknown
>;
_examples?: Example[];
_numRepetitions?: number;
_runsArray?: Run[];
client: Client;
_experiment?: TracerSession;
_experimentName: string;
_metadata: KVMap;
_description?: string;
get experimentName(): string {
if (this._experimentName) {
return this._experimentName;
} else {
throw new Error(
"Experiment name not provided, and experiment not yet started."
);
}
}
async getExamples(): Promise<Array<Example>> {
if (!this._examples) {
if (!this._data) {
throw new Error("Data not provided in this experiment.");
}
const unresolvedData = _resolveData(this._data, { client: this.client });
if (!this._examples) {
this._examples = [];
}
const exs = [];
for await (const example of unresolvedData) {
exs.push(example);
}
if (this._numRepetitions && this._numRepetitions > 0) {
const repeatedExamples = [];
for (let i = 0; i < this._numRepetitions; i++) {
repeatedExamples.push(...exs);
}
this.setExamples(repeatedExamples);
} else {
this.setExamples(exs);
}
}
return this._examples;
}
setExamples(examples: Example[]): void {
this._examples = examples;
}
get datasetId(): Promise<string> {
return this.getExamples().then((examples) => {
if (examples.length === 0) {
throw new Error("No examples found in the dataset.");
}
if (this._experiment && this._experiment.reference_dataset_id) {
return this._experiment.reference_dataset_id;
}
return examples[0].dataset_id;
});
}
get evaluationResults(): AsyncGenerator<EvaluationResults> {
if (this._evaluationResults === undefined) {
return async function* (this: _ExperimentManager) {
for (const _ of await this.getExamples()) {
yield { results: [] };
}
}.call(this);
} else {
return this._evaluationResults;
}
}
get runs(): AsyncGenerator<Run> {
if (this._runsArray && this._runsArray.length > 0) {
throw new Error("Runs already provided as an array.");
}
if (this._runs === undefined) {
throw new Error(
"Runs not provided in this experiment. Please predict first."
);
} else {
return this._runs;
}
}
constructor(args: _ExperimentManagerArgs) {
this.client = args.client ?? new Client();
if (!args.experiment) {
this._experimentName = randomName();
} else if (typeof args.experiment === "string") {
this._experimentName = `${args.experiment}-${uuidv4().slice(0, 8)}`;
} else {
if (!args.experiment.name) {
throw new Error("Experiment must have a name");
}
this._experimentName = args.experiment.name;
this._experiment = args.experiment;
}
let metadata = args.metadata || {};
if (!("revision_id" in metadata)) {
metadata = {
revision_id: getLangChainEnvVarsMetadata().revision_id,
...metadata,
};
}
this._metadata = metadata;
if (args.examples && args.examples.length) {
this.setExamples(args.examples);
}
this._data = args.data;
if (args._runsArray && args._runsArray.length) {
this._runsArray = args._runsArray;
}
this._runs = args.runs;
this._evaluationResults = args.evaluationResults;
this._summaryResults = args.summaryResults;
this._numRepetitions = args.numRepetitions;
}
_getExperiment(): TracerSession {
if (!this._experiment) {
throw new Error("Experiment not yet started.");
}
return this._experiment;
}
async _getExperimentMetadata(): Promise<KVMap> {
let projectMetadata = this._metadata ?? {};
const gitInfo = await getGitInfo();
if (gitInfo) {
projectMetadata = {
...projectMetadata,
git: gitInfo,
};
}
if (this._experiment) {
const experimentMetadata: KVMap =
this._experiment.extra && "metadata" in this._experiment.extra
? this._experiment.extra.metadata
: {};
projectMetadata = {
...experimentMetadata,
...projectMetadata,
};
}
return projectMetadata;
}
async _createProject(firstExample: Example, projectMetadata: KVMap) {
// Create the project, updating the experimentName until we find a unique one.
let project: TracerSession;
const originalExperimentName = this._experimentName;
for (let i = 0; i < 10; i++) {
try {
project = await this.client.createProject({
projectName: this._experimentName,
referenceDatasetId: firstExample.dataset_id,
metadata: projectMetadata,
description: this._description,
});
return project;
} catch (e) {
// Naming collision
if ((e as LangSmithConflictError)?.name === "LangSmithConflictError") {
const ent = uuidv4().slice(0, 6);
this._experimentName = `${originalExperimentName}-${ent}`;
} else {
throw e;
}
}
}
throw new Error(
"Could not generate a unique experiment name within 10 attempts." +
" Please try again with a different name."
);
}
async _getProject(firstExample: Example): Promise<TracerSession> {
let project: TracerSession;
if (!this._experiment) {
const projectMetadata = await this._getExperimentMetadata();
project = await this._createProject(firstExample, projectMetadata);
this._experiment = project;
}
return this._experiment;
}
protected async _printExperimentStart(): Promise<void> {
console.log(`Starting evaluation of experiment: ${this.experimentName}`);
const firstExample = this._examples?.[0];
const datasetId = firstExample?.dataset_id;
if (!datasetId || !this._experiment) return;
const datasetUrl = await this.client.getDatasetUrl({ datasetId });
const compareUrl = `${datasetUrl}/compare?selectedSessions=${this._experiment.id}`;
console.log(`View results at ${compareUrl}`);
}
async start(): Promise<_ExperimentManager> {
const examples = await this.getExamples();
const firstExample = examples[0];
const project = await this._getProject(firstExample);
await this._printExperimentStart();
this._metadata["num_repetitions"] = this._numRepetitions;
return new _ExperimentManager({
examples,
experiment: project,
metadata: this._metadata,
client: this.client,
evaluationResults: this._evaluationResults,
summaryResults: this._summaryResults,
});
}
async withPredictions(
target: StandardTargetT,
options?: {
maxConcurrency?: number;
}
): Promise<_ExperimentManager> {
const experimentResults = this._predict(target, options);
return new _ExperimentManager({
examples: await this.getExamples(),
experiment: this._experiment,
metadata: this._metadata,
client: this.client,
runs: (async function* (): AsyncGenerator<Run> {
for await (const pred of experimentResults) {
yield pred.run;
}
})(),
});
}
async withEvaluators(
evaluators: Array<EvaluatorT | RunEvaluator>,
options?: {
maxConcurrency?: number;
}
): Promise<_ExperimentManager> {
const resolvedEvaluators = _resolveEvaluators(evaluators);
const experimentResults = this._score(resolvedEvaluators, options);
const [r1, r2] = atee<ExperimentResultRow>(experimentResults);
return new _ExperimentManager({
examples: await this.getExamples(),
experiment: this._experiment,
metadata: this._metadata,
client: this.client,
runs: (async function* (): AsyncGenerator<Run> {
for await (const result of r1) {
yield result.run;
}
})(),
evaluationResults:
(async function* (): AsyncGenerator<EvaluationResults> {
for await (const result of r2) {
yield result.evaluationResults;
}
})(),
summaryResults: this._summaryResults,
});
}
async withSummaryEvaluators(
summaryEvaluators: Array<SummaryEvaluatorT>
): Promise<_ExperimentManager> {
const aggregateFeedbackGen =
this._applySummaryEvaluators(summaryEvaluators);
return new _ExperimentManager({
examples: await this.getExamples(),
experiment: this._experiment,
metadata: this._metadata,
client: this.client,
runs: this.runs,
_runsArray: this._runsArray,
evaluationResults: this._evaluationResults,
summaryResults: aggregateFeedbackGen,
});
}
async *getResults(): AsyncGenerator<ExperimentResultRow> {
const examples = await this.getExamples();
const evaluationResults: EvaluationResults[] = [];
if (!this._runsArray) {
this._runsArray = [];
for await (const run of this.runs) {
this._runsArray.push(run);
}
}
for await (const evaluationResult of this.evaluationResults) {
evaluationResults.push(evaluationResult);
}
for (let i = 0; i < this._runsArray.length; i++) {
yield {
run: this._runsArray[i],
example: examples[i],
evaluationResults: evaluationResults[i],
};
}
}
async getSummaryScores(): Promise<EvaluationResults> {
if (!this._summaryResults) {
return { results: [] };
}
const results: EvaluationResult[] = [];
for await (const evaluationResultsGenerator of this._summaryResults) {
if (typeof evaluationResultsGenerator === "function") {
// This is because runs array is not available until after this generator
// is set, so we need to pass it like so.
for await (const evaluationResults of evaluationResultsGenerator(
this._runsArray ?? []
)) {
results.push(...evaluationResults.results);
}
}
}
return { results };
}
// Private methods
/**
* Run the target function or runnable on the examples.
* @param {StandardTargetT} target The target function or runnable to evaluate.
* @param options
* @returns {AsyncGenerator<_ForwardResults>} An async generator of the results.
*/
async *_predict(
target: StandardTargetT,
options?: {
maxConcurrency?: number;
}
): AsyncGenerator<_ForwardResults> {
const maxConcurrency = options?.maxConcurrency ?? 0;
const examples = await this.getExamples();
if (maxConcurrency === 0) {
for (const example of examples) {
yield await _forward(
target,
example,
this.experimentName,
this._metadata,
this.client
);
}
} else {
const caller = new AsyncCaller({
maxConcurrency,
});
const futures: Array<Promise<_ForwardResults>> = [];
for await (const example of examples) {
futures.push(
caller.call(
_forward,
target,
example,
this.experimentName,
this._metadata,
this.client
)
);
}
for await (const future of futures) {
yield future;
}
}
// Close out the project.
await this._end();
}
async _runEvaluators(
evaluators: Array<RunEvaluator>,
currentResults: ExperimentResultRow,
fields: {
client: Client;
}
): Promise<ExperimentResultRow> {
const { run, example, evaluationResults } = currentResults;
for (const evaluator of evaluators) {
try {
const options = {
reference_example_id: example.id,
project_name: "evaluators",
metadata: {
example_version: example.modified_at
? new Date(example.modified_at).toISOString()
: new Date(example.created_at).toISOString(),
},
client: fields.client,
tracingEnabled: true,
};
const evaluatorResponse = await evaluator.evaluateRun(
run,
example,
options
);
evaluationResults.results.push(
...(await fields.client.logEvaluationFeedback(evaluatorResponse, run))
);
} catch (e) {
console.error(
`Error running evaluator ${evaluator.evaluateRun.name} on run ${run.id}: ${e}`
);
printErrorStackTrace(e);
}
}
return {
run,
example,
evaluationResults,
};
}
/**
* Run the evaluators on the prediction stream.
* Expects runs to be available in the manager.
* (e.g. from a previous prediction step)
* @param {Array<RunEvaluator>} evaluators
* @param {number} maxConcurrency
*/
async *_score(
evaluators: Array<RunEvaluator>,
options?: {
maxConcurrency?: number;
}
): AsyncGenerator<ExperimentResultRow> {
const { maxConcurrency = 0 } = options || {};
if (maxConcurrency === 0) {
for await (const currentResults of this.getResults()) {
yield this._runEvaluators(evaluators, currentResults, {
client: this.client,
});
}
} else {
const caller = new AsyncCaller({
maxConcurrency,
});
const futures: Promise<ExperimentResultRow>[] = [];
for await (const currentResults of this.getResults()) {
futures.push(
caller.call(this._runEvaluators, evaluators, currentResults, {
client: this.client,
})
);
}
for (const result of futures) {
yield result;
}
}
}
async *_applySummaryEvaluators(
summaryEvaluators: Array<SummaryEvaluatorT>
): AsyncGenerator<(runsArray: Run[]) => AsyncGenerator<EvaluationResults>> {
const projectId = this._getExperiment().id;
const examples = await this.getExamples();
const options = Array.from({ length: summaryEvaluators.length }).map(
() => ({
project_name: "evaluators",
experiment: this.experimentName,
projectId: projectId,
})
);
const wrappedEvaluators = await wrapSummaryEvaluators(
summaryEvaluators,
options
);
yield async function* (
this: _ExperimentManager,
runsArray: Run[]
): AsyncGenerator<EvaluationResults> {
const aggregateFeedback = [];
for (const evaluator of wrappedEvaluators) {
try {
const summaryEvalResult = await evaluator(runsArray, examples);
const flattenedResults =
this.client._selectEvalResults(summaryEvalResult);
aggregateFeedback.push(...flattenedResults);
for (const result of flattenedResults) {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { targetRunId, key, ...feedback } = result;
const evaluatorInfo = feedback.evaluatorInfo;
delete feedback.evaluatorInfo;
await this.client.createFeedback(null, key, {
...feedback,
projectId: projectId,
sourceInfo: evaluatorInfo,
});
}
} catch (e) {
console.error(
`Error running summary evaluator ${
evaluator.name
}: ${JSON.stringify(e, null, 2)}`
);
printErrorStackTrace(e);
}
}
yield {
results: aggregateFeedback,
};
}.bind(this);
}
async _getDatasetVersion(): Promise<string | undefined> {
const examples = await this.getExamples();
const modifiedAt = examples.map((ex) => ex.modified_at);
// Python might return microseconds, which we need
// to account for when comparing dates.
const modifiedAtTime = modifiedAt.map((date) => {
function getMiliseconds(isoString: string) {
const time = isoString.split("T").at(1);
if (!time) return "";
const regex = /[0-9]{2}:[0-9]{2}:[0-9]{2}.([0-9]+)/;
const strMiliseconds = time.match(regex)?.[1];
return strMiliseconds ?? "";
}
const jsDate = new Date(date);
let source = getMiliseconds(date);
let parsed = getMiliseconds(jsDate.toISOString());
const length = Math.max(source.length, parsed.length);
source = source.padEnd(length, "0");
parsed = parsed.padEnd(length, "0");
const microseconds =
(Number.parseInt(source, 10) - Number.parseInt(parsed, 10)) / 1000;
const time = jsDate.getTime() + microseconds;
return { date, time };
});
if (modifiedAtTime.length === 0) return undefined;
return modifiedAtTime.reduce(
(max, current) => (current.time > max.time ? current : max),
modifiedAtTime[0]
).date;
}
async _getDatasetSplits(): Promise<string[] | undefined> {
const examples = await this.getExamples();
const allSplits = examples.reduce((acc, ex) => {
if (ex.metadata && ex.metadata.dataset_split) {
if (Array.isArray(ex.metadata.dataset_split)) {
ex.metadata.dataset_split.forEach((split) => acc.add(split));
} else if (typeof ex.metadata.dataset_split === "string") {
acc.add(ex.metadata.dataset_split);
}
}
return acc;
}, new Set<string>());
return allSplits.size ? Array.from(allSplits) : undefined;
}
async _end(): Promise<void> {
const experiment = this._experiment;
if (!experiment) {
throw new Error("Experiment not yet started.");
}
const projectMetadata = await this._getExperimentMetadata();
projectMetadata["dataset_version"] = await this._getDatasetVersion();
projectMetadata["dataset_splits"] = await this._getDatasetSplits();
// Update revision_id if not already set
if (!projectMetadata["revision_id"]) {
projectMetadata["revision_id"] = await getDefaultRevisionId();
}
await this.client.updateProject(experiment.id, {
endTime: new Date().toISOString(),
metadata: projectMetadata,
});
}
}
/**
* Represents the results of an evaluate() call.
* This class provides an iterator interface to iterate over the experiment results
* as they become available. It also provides methods to access the experiment name,
* the number of results, and to wait for the results to be processed.
*/
class ExperimentResults implements AsyncIterableIterator<ExperimentResultRow> {
private manager: _ExperimentManager;
results: ExperimentResultRow[] = [];
processedCount = 0;
summaryResults: EvaluationResults;
constructor(experimentManager: _ExperimentManager) {
this.manager = experimentManager;
}
get experimentName(): string {
return this.manager.experimentName;
}
[Symbol.asyncIterator](): AsyncIterableIterator<ExperimentResultRow> {
return this;
}
async next(): Promise<IteratorResult<ExperimentResultRow>> {
if (this.processedCount < this.results.length) {
const result = this.results[this.processedCount];
this.processedCount++;
return Promise.resolve({ value: result, done: false });
} else {
return Promise.resolve({ value: undefined, done: true });
}
}
async processData(manager: _ExperimentManager): Promise<void> {
for await (const item of manager.getResults()) {
this.results.push(item);
this.processedCount++;
}
this.summaryResults = await manager.getSummaryScores();
}
get length(): number {
return this.results.length;
}
}
async function _evaluate(
target: TargetT | AsyncGenerator<Run>,
fields: (EvaluateOptions | ComparativeEvaluateOptions) & {
experiment?: TracerSession;
}
): Promise<ExperimentResults | ComparisonEvaluationResults> {
// Add check for comparative evaluation
if (Array.isArray(target)) {
const comparativeOptions = fields as ComparativeEvaluateOptions;
if (!comparativeOptions.evaluators) {
throw new Error("Evaluators are required for comparative evaluation");
}
return evaluateComparative(target, {
evaluators: comparativeOptions.evaluators,
client: comparativeOptions.client,
metadata: comparativeOptions.metadata,
experimentPrefix: comparativeOptions.experimentPrefix,
description: comparativeOptions.description,
maxConcurrency: comparativeOptions.maxConcurrency,
loadNested: comparativeOptions.loadNested ?? false,
randomizeOrder: comparativeOptions.randomizeOrder ?? false,
});
}
const client = fields.client ?? new Client();
const runs = _isCallable(target) ? null : (target as AsyncGenerator<Run>);
const standardFields = fields as EvaluateOptions;
const [experiment_, newRuns] = await _resolveExperiment(
fields.experiment ?? null,
runs,
client
);
let manager = await new _ExperimentManager({
data: Array.isArray(standardFields.data) ? undefined : standardFields.data,
examples: Array.isArray(standardFields.data)
? standardFields.data
: undefined,
client,
metadata: fields.metadata,
experiment: experiment_ ?? fields.experimentPrefix,
runs: newRuns ?? undefined,
numRepetitions: fields.numRepetitions ?? 1,
}).start();
if (_isCallable(target)) {
manager = await manager.withPredictions(target, {
maxConcurrency: fields.maxConcurrency,
});
}
if (standardFields.evaluators) {
manager = await manager.withEvaluators(standardFields.evaluators, {
maxConcurrency: fields.maxConcurrency,
});
}
if (standardFields.summaryEvaluators) {
manager = await manager.withSummaryEvaluators(
standardFields.summaryEvaluators
);
}
// Start consuming the results.
const results = new ExperimentResults(manager);
await results.processData(manager);
return results;
}
async function _forward(
fn: StandardTargetT,
example: Example,
experimentName: string,
metadata: KVMap,
client: Client
): Promise<_ForwardResults> {
let run: BaseRun | null = null;
const _getRun = (r: RunTree): void => {
run = r;
};
const options = {
reference_example_id: example.id,
on_end: _getRun,
project_name: experimentName,
metadata: {
...metadata,
example_version: example.modified_at
? new Date(example.modified_at).toISOString()
: new Date(example.created_at).toISOString(),
},
client,
tracingEnabled: true,
};
const wrappedFn =
"invoke" in fn
? traceable(async (inputs) => {
let langChainCallbacks;
try {
// TODO: Deprecate this and rely on interop on 0.2 minor bump.
const { getLangchainCallbacks } = await import("../langchain.js");
langChainCallbacks = await getLangchainCallbacks();
} catch {
// no-op
}
// Issue with retrieving LangChain callbacks, rely on interop
if (langChainCallbacks === undefined) {
return await fn.invoke(inputs);
} else {
return await fn.invoke(inputs, { callbacks: langChainCallbacks });
}
}, options)
: traceable(fn, options);
try {
await wrappedFn(example.inputs);
} catch (e) {
console.error(`Error running target function: ${e}`);
printErrorStackTrace(e);
}
if (!run) {
throw new Error(`Run not created by target function.
This is most likely due to tracing not being enabled.\n
Try setting "LANGSMITH_TRACING=true" in your environment.`);
}
return {
run,
example,
};
}
function _resolveData(
data: DataT,
options: {
client: Client;
}
): AsyncGenerator<Example> {
let isUUID = false;
try {
if (typeof data === "string") {
assertUuid(data);
isUUID = true;
}
} catch (_) {
isUUID = false;
}
if (typeof data === "string" && isUUID) {
return options.client.listExamples({
datasetId: data,
}) as AsyncGenerator<Example>;
}
if (typeof data === "string") {
return options.client.listExamples({
datasetName: data,
}) as AsyncGenerator<Example>;
}
return data as AsyncGenerator<Example>;
}
async function wrapSummaryEvaluators(
evaluators: SummaryEvaluatorT[],
optionsArray?: Partial<RunTreeConfig>[]
): Promise<
Array<DeprecatedAsyncSummaryEvaluator | DeprecatedSyncSummaryEvaluator>
> {
async function _wrap(
evaluator: SummaryEvaluatorT
): Promise<DeprecatedAsyncSummaryEvaluator | DeprecatedSyncSummaryEvaluator> {
const evalName = evaluator.name || "BatchEvaluator";
const wrapperInner = (
runs: Run[],
examples: Example[]
): Promise<EvaluationResult | EvaluationResults> => {
const wrapperSuperInner = traceable(
(
_runs_: string,
_examples_: string
): Promise<EvaluationResult | EvaluationResults> => {
// Check if the evaluator expects an object parameter
if (evaluator.length === 1) {
const inputs = examples.map((ex) => ex.inputs);
const outputs = runs.map((run) => run.outputs || {});
const referenceOutputs = examples.map((ex) => ex.outputs || {});
return Promise.resolve(
(
evaluator as (args: {
runs: Run[];
examples: Example[];
inputs: Record<string, any>[];
outputs: Record<string, any>[];
referenceOutputs?: Record<string, any>[];
}) => EvaluationResult | EvaluationResults
)({
runs,
examples,
inputs,
outputs,
referenceOutputs,
})
);
}
// Otherwise use the traditional (runs, examples) signature
return Promise.resolve(
(evaluator as DeprecatedSyncSummaryEvaluator)(runs, examples)
);
},
{ ...optionsArray, name: evalName }
);
return Promise.resolve(
wrapperSuperInner(
`Runs[] (Length=${runs.length})`,
`Examples[] (Length=${examples.length})`
)
);
};
return wrapperInner;
}
const results: Array<
DeprecatedAsyncSummaryEvaluator | DeprecatedSyncSummaryEvaluator
> = [];
for (let i = 0; i < evaluators.length; i++) {
results.push(await _wrap(evaluators[i]));
}
return results;
}
function _resolveEvaluators(
evaluators: Array<EvaluatorT>
): Array<RunEvaluator> {
const results: Array<RunEvaluator> = [];
for (const evaluator of evaluators) {
if ("evaluateRun" in evaluator) {
results.push(evaluator);
// todo fix this by porting LangChainStringEvaluator to langsmith sdk
} else if (evaluator.name === "LangChainStringEvaluator") {
throw new Error("Not yet implemented");
} else {
results.push(runEvaluator(evaluator));
}
}
return results;
}
async function _resolveExperiment(
experiment: TracerSession | null,
runs: AsyncGenerator<Run> | null,
client: Client
): Promise<
[TracerSession | string | undefined, AsyncGenerator<Run> | undefined]
> {
// TODO: Remove this, handle outside the manager
if (experiment !== null) {
if (!experiment.name) {
throw new Error("Experiment name must be defined if provided.");
}
return [experiment, undefined];
}
// If we have runs, that means the experiment was already started.
if (runs !== null) {
const results: AsyncGenerator<Run>[] = [];
for await (const item of atee<Run>(runs)) {
results.push(item);
}
const [runsClone, runsOriginal] = results;
const runsCloneIterator = runsClone[Symbol.asyncIterator]();
// todo: this is `any`. does it work properly?
const firstRun = await runsCloneIterator
.next()
.then((result) => result.value);
const retrievedExperiment = await client.readProject(firstRun.sessionId);
if (!retrievedExperiment.name) {
throw new Error("Experiment name not found for provided runs.");
}
return [retrievedExperiment, runsOriginal];
}
return [undefined, undefined];
}
function _isCallable(
target: StandardTargetT | AsyncGenerator<Run>
): target is StandardTargetT {
return Boolean(
typeof target === "function" ||
("invoke" in target && typeof target.invoke === "function")
);
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/index.ts | // Evaluation methods
export { RunEvaluator, EvaluationResult } from "./evaluator.js";
export {
StringEvaluator,
GradingFunctionParams,
GradingFunctionResult,
} from "./string_evaluator.js";
export {
evaluate,
type EvaluateOptions,
type TargetT,
type DataT,
type SummaryEvaluatorT,
type EvaluatorT,
type ExperimentResultRow,
} from "./_runner.js";
export { evaluateComparative } from "./evaluate_comparative.js";
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/evaluator.ts | import {
Example,
FeedbackConfig,
Run,
ScoreType,
ValueType,
} from "../schemas.js";
import { v4 as uuidv4 } from "uuid";
import { TraceableFunction, traceable } from "../traceable.js";
import { RunTreeConfig } from "../run_trees.js";
/**
* Represents a categorical class.
*/
export type Category = {
/**
* The value of the category.
*/
value?: number;
/**
* The label of the category.
*/
label: string;
};
/**
* Represents the result of an evaluation.
*/
export type EvaluationResult = {
/**
* The key associated with the evaluation result.
*/
key: string;
/**
* The score of the evaluation result.
*/
score?: ScoreType;
/**
* The value of the evaluation result.
*/
value?: ValueType;
/**
* A comment associated with the evaluation result.
*/
comment?: string;
/**
* A correction record associated with the evaluation result.
*/
correction?: Record<string, unknown>;
/**
* Information about the evaluator.
*/
evaluatorInfo?: Record<string, unknown>;
/**
* The source run ID of the evaluation result.
* If set, a link to the source run will be available in the UI.
*/
sourceRunId?: string;
/**
* The target run ID of the evaluation result.
* If this is not set, the target run ID is assumed to be
* the root of the trace.
*/
targetRunId?: string;
/**
* The feedback config associated with the evaluation result.
* If set, this will be used to define how a feedback key
* should be interpreted.
*/
feedbackConfig?: FeedbackConfig;
};
/**
* Batch evaluation results, if your evaluator wishes
* to return multiple scores.
*/
export type EvaluationResults = {
/**
* The evaluation results.
*/
results: Array<EvaluationResult>;
};
export interface RunEvaluator {
evaluateRun(
run: Run,
example?: Example,
options?: Partial<RunTreeConfig>
): Promise<EvaluationResult | EvaluationResults>;
}
export type RunEvaluatorLike =
| ((
run: Run,
example?: Example
) => Promise<EvaluationResult | EvaluationResults>)
| ((run: Run, example?: Example) => EvaluationResult | EvaluationResults)
| ((
run: Run,
example: Example
) => Promise<EvaluationResult | EvaluationResults>)
| ((run: Run, example: Example) => EvaluationResult | EvaluationResults)
| ((args: {
run: Run;
example: Example;
inputs: Record<string, any>;
outputs: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => EvaluationResult | EvaluationResults)
| ((args: {
run: Run;
example: Example;
inputs: Record<string, any>;
outputs: Record<string, any>;
referenceOutputs?: Record<string, any>;
}) => Promise<EvaluationResult | EvaluationResults>);
/**
* Wraps an evaluator function + implements the RunEvaluator interface.
*/
export class DynamicRunEvaluator<Func extends (...args: any[]) => any>
implements RunEvaluator
{
func: Func;
constructor(evaluator: Func) {
this.func = ((input: {
langSmithRunAndExample: { run: Run; example: Example };
}) => {
const { run, example } = input.langSmithRunAndExample;
return evaluator(
{
...run,
run,
example,
inputs: example?.inputs,
outputs: run?.outputs,
referenceOutputs: example?.outputs,
},
example
);
}) as Func;
}
private isEvaluationResults(x: unknown): x is EvaluationResults {
return (
typeof x === "object" &&
x != null &&
"results" in x &&
Array.isArray(x.results) &&
x.results.length > 0
);
}
private coerceEvaluationResults(
results: Record<string, any> | EvaluationResults,
sourceRunId: string
): EvaluationResult | EvaluationResults {
if (this.isEvaluationResults(results)) {
return {
results: results.results.map((r) =>
this.coerceEvaluationResult(r, sourceRunId, false)
),
};
}
return this.coerceEvaluationResult(
results as Record<string, any>,
sourceRunId,
true
);
}
private coerceEvaluationResult(
result: EvaluationResult | Record<string, any>,
sourceRunId: string,
allowNoKey = false
): EvaluationResult {
if ("key" in result) {
if (!result.sourceRunId) {
result.sourceRunId = sourceRunId;
}
return result as EvaluationResult;
}
if (!("key" in result)) {
if (allowNoKey) {
result["key"] = this.func.name;
}
}
return {
sourceRunId,
...result,
} as EvaluationResult;
}
/**
* Evaluates a run with an optional example and returns the evaluation result.
* @param run The run to evaluate.
* @param example The optional example to use for evaluation.
* @returns A promise that extracts to the evaluation result.
*/
async evaluateRun(
run: Run,
example?: Example,
options?: Partial<RunTreeConfig>
): Promise<EvaluationResult | EvaluationResults> {
const sourceRunId = uuidv4();
const metadata: Record<string, any> = {
targetRunId: run.id,
};
if ("session_id" in run) {
metadata["experiment"] = run.session_id;
}
if (typeof this.func !== "function") {
throw new Error("Target must be runnable function");
}
const wrappedTraceableFunc: TraceableFunction<Func> = traceable<Func>(
this.func,
{
project_name: "evaluators",
name: "evaluator",
id: sourceRunId,
...options,
}
);
const result = (await wrappedTraceableFunc(
// Pass data via `langSmithRunAndExample` key to avoid conflicts with other
// inputs. This key is extracted in the wrapped function, with `run` and
// `example` passed to evaluator function as arguments.
{ langSmithRunAndExample: { run, example } },
{ metadata }
)) as EvaluationResults | Record<string, any>;
// Check the one required property of EvaluationResult since 'instanceof' is not possible
if ("key" in result) {
if (!result.sourceRunId) {
result.sourceRunId = sourceRunId;
}
return result as EvaluationResult;
}
if (typeof result !== "object") {
throw new Error("Evaluator function must return an object.");
}
return this.coerceEvaluationResults(result, sourceRunId);
}
}
export function runEvaluator(func: RunEvaluatorLike): RunEvaluator {
return new DynamicRunEvaluator(func);
}
|
0 | lc_public_repos/langsmith-sdk/js/src | lc_public_repos/langsmith-sdk/js/src/evaluation/_random_name.ts | const adjectives = [
"abandoned",
"aching",
"advanced",
"ample",
"artistic",
"back",
"best",
"bold",
"brief",
"clear",
"cold",
"complicated",
"cooked",
"crazy",
"crushing",
"damp",
"dear",
"definite",
"dependable",
"diligent",
"drab",
"earnest",
"elderly",
"enchanted",
"essential",
"excellent",
"extraneous",
"fixed",
"flowery",
"formal",
"fresh",
"frosty",
"giving",
"glossy",
"healthy",
"helpful",
"impressionable",
"kind",
"large",
"left",
"long",
"loyal",
"mealy",
"memorable",
"monthly",
"new",
"notable",
"only",
"ordinary",
"passionate",
"perfect",
"pertinent",
"proper",
"puzzled",
"reflecting",
"respectful",
"roasted",
"scholarly",
"shiny",
"slight",
"sparkling",
"spotless",
"stupendous",
"sunny",
"tart",
"terrific",
"timely",
"unique",
"upbeat",
"vacant",
"virtual",
"warm",
"weary",
"whispered",
"worthwhile",
"yellow",
];
const nouns = [
"account",
"acknowledgment",
"address",
"advertising",
"airplane",
"animal",
"appointment",
"arrival",
"artist",
"attachment",
"attitude",
"availability",
"backpack",
"bag",
"balance",
"bass",
"bean",
"beauty",
"bibliography",
"bill",
"bite",
"blossom",
"boat",
"book",
"box",
"boy",
"bread",
"bridge",
"broccoli",
"building",
"butter",
"button",
"cabbage",
"cake",
"camera",
"camp",
"candle",
"candy",
"canvas",
"car",
"card",
"carrot",
"cart",
"case",
"cat",
"chain",
"chair",
"chalk",
"chance",
"change",
"channel",
"character",
"charge",
"charm",
"chart",
"check",
"cheek",
"cheese",
"chef",
"cherry",
"chicken",
"child",
"church",
"circle",
"class",
"clay",
"click",
"clock",
"cloth",
"cloud",
"clove",
"club",
"coach",
"coal",
"coast",
"coat",
"cod",
"coffee",
"collar",
"color",
"comb",
"comfort",
"comic",
"committee",
"community",
"company",
"comparison",
"competition",
"condition",
"connection",
"control",
"cook",
"copper",
"copy",
"corn",
"cough",
"country",
"cover",
"crate",
"crayon",
"cream",
"creator",
"crew",
"crown",
"current",
"curtain",
"curve",
"cushion",
"dad",
"daughter",
"day",
"death",
"debt",
"decision",
"deer",
"degree",
"design",
"desire",
"desk",
"detail",
"development",
"digestion",
"dime",
"dinner",
"direction",
"dirt",
"discovery",
"discussion",
"disease",
"disgust",
"distance",
"distribution",
"division",
"doctor",
"dog",
"door",
"drain",
"drawer",
"dress",
"drink",
"driving",
"dust",
"ear",
"earth",
"edge",
"education",
"effect",
"egg",
"end",
"energy",
"engine",
"error",
"event",
"example",
"exchange",
"existence",
"expansion",
"experience",
"expert",
"eye",
"face",
"fact",
"fall",
"family",
"farm",
"father",
"fear",
"feeling",
"field",
"finger",
"fire",
"fish",
"flag",
"flight",
"floor",
"flower",
"fold",
"food",
"football",
"force",
"form",
"frame",
"friend",
"frog",
"fruit",
"fuel",
"furniture",
"game",
"garden",
"gate",
"girl",
"glass",
"glove",
"goat",
"gold",
"government",
"grade",
"grain",
"grass",
"green",
"grip",
"group",
"growth",
"guide",
"guitar",
"hair",
"hall",
"hand",
"harbor",
"harmony",
"hat",
"head",
"health",
"heart",
"heat",
"hill",
"history",
"hobbies",
"hole",
"hope",
"horn",
"horse",
"hospital",
"hour",
"house",
"humor",
"idea",
"impulse",
"income",
"increase",
"industry",
"ink",
"insect",
"instrument",
"insurance",
"interest",
"invention",
"iron",
"island",
"jelly",
"jet",
"jewel",
"join",
"judge",
"juice",
"jump",
"kettle",
"key",
"kick",
"kiss",
"kitten",
"knee",
"knife",
"knowledge",
"land",
"language",
"laugh",
"law",
"lead",
"learning",
"leather",
"leg",
"lettuce",
"level",
"library",
"lift",
"light",
"limit",
"line",
"linen",
"lip",
"liquid",
"list",
"look",
"loss",
"love",
"lunch",
"machine",
"man",
"manager",
"map",
"marble",
"mark",
"market",
"mass",
"match",
"meal",
"measure",
"meat",
"meeting",
"memory",
"metal",
"middle",
"milk",
"mind",
"mine",
"minute",
"mist",
"mitten",
"mom",
"money",
"monkey",
"month",
"moon",
"morning",
"mother",
"motion",
"mountain",
"mouth",
"muscle",
"music",
"nail",
"name",
"nation",
"neck",
"need",
"news",
"night",
"noise",
"note",
"number",
"nut",
"observation",
"offer",
"oil",
"operation",
"opinion",
"orange",
"order",
"organization",
"ornament",
"oven",
"page",
"pail",
"pain",
"paint",
"pan",
"pancake",
"paper",
"parcel",
"parent",
"part",
"passenger",
"paste",
"payment",
"peace",
"pear",
"pen",
"pencil",
"person",
"pest",
"pet",
"picture",
"pie",
"pin",
"pipe",
"pizza",
"place",
"plane",
"plant",
"plastic",
"plate",
"play",
"pleasure",
"plot",
"plough",
"pocket",
"point",
"poison",
"police",
"pollution",
"popcorn",
"porter",
"position",
"pot",
"potato",
"powder",
"power",
"price",
"print",
"process",
"produce",
"product",
"profit",
"property",
"prose",
"protest",
"pull",
"pump",
"punishment",
"purpose",
"push",
"quarter",
"question",
"quiet",
"quill",
"quilt",
"quince",
"rabbit",
"rail",
"rain",
"range",
"rat",
"rate",
"ray",
"reaction",
"reading",
"reason",
"record",
"regret",
"relation",
"religion",
"representative",
"request",
"respect",
"rest",
"reward",
"rhythm",
"rice",
"river",
"road",
"roll",
"room",
"root",
"rose",
"route",
"rub",
"rule",
"run",
"sack",
"sail",
"salt",
"sand",
"scale",
"scarecrow",
"scarf",
"scene",
"scent",
"school",
"science",
"scissors",
"screw",
"sea",
"seat",
"secretary",
"seed",
"selection",
"self",
"sense",
"servant",
"shade",
"shake",
"shame",
"shape",
"sheep",
"sheet",
"shelf",
"ship",
"shirt",
"shock",
"shoe",
"shop",
"show",
"side",
"sign",
"silk",
"sink",
"sister",
"size",
"sky",
"sleep",
"smash",
"smell",
"smile",
"smoke",
"snail",
"snake",
"sneeze",
"snow",
"soap",
"society",
"sock",
"soda",
"sofa",
"son",
"song",
"sort",
"sound",
"soup",
"space",
"spark",
"speed",
"sponge",
"spoon",
"spray",
"spring",
"spy",
"square",
"stamp",
"star",
"start",
"statement",
"station",
"steam",
"steel",
"stem",
"step",
"stew",
"stick",
"stitch",
"stocking",
"stomach",
"stone",
"stop",
"store",
"story",
"stove",
"stranger",
"straw",
"stream",
"street",
"stretch",
"string",
"structure",
"substance",
"sugar",
"suggestion",
"suit",
"summer",
"sun",
"support",
"surprise",
"sweater",
"swim",
"system",
"table",
"tail",
"talk",
"tank",
"taste",
"tax",
"tea",
"teaching",
"team",
"tendency",
"test",
"texture",
"theory",
"thing",
"thought",
"thread",
"throat",
"thumb",
"thunder",
"ticket",
"time",
"tin",
"title",
"toad",
"toe",
"tooth",
"toothpaste",
"touch",
"town",
"toy",
"trade",
"train",
"transport",
"tray",
"treatment",
"tree",
"trick",
"trip",
"trouble",
"trousers",
"truck",
"tub",
"turkey",
"turn",
"twist",
"umbrella",
"uncle",
"underwear",
"unit",
"use",
"vacation",
"value",
"van",
"vase",
"vegetable",
"veil",
"vein",
"verse",
"vessel",
"view",
"visitor",
"voice",
"volcano",
"walk",
"wall",
"war",
"wash",
"waste",
"watch",
"water",
"wave",
"wax",
"way",
"wealth",
"weather",
"week",
"weight",
"wheel",
"whip",
"whistle",
"window",
"wine",
"wing",
"winter",
"wire",
"wish",
"woman",
"wood",
"wool",
"word",
"work",
"worm",
"wound",
"wrist",
"writer",
"yard",
"yoke",
"zebra",
"zinc",
"zipper",
"zone",
];
/**
* Generate a random name.
* @returns {string} A random name.
*/
export function randomName(): string {
const adjective = adjectives[Math.floor(Math.random() * adjectives.length)];
const noun = nouns[Math.floor(Math.random() * nouns.length)];
const number = Math.floor(Math.random() * 100);
return `${adjective}-${noun}-${number}`;
}
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/scripts/bump-version.js | import { readFileSync, writeFileSync } from "fs";
import process from "process";
const packageJson = JSON.parse(readFileSync("package.json"));
let newVersion;
if (process.argv.length > 2) {
newVersion = process.argv[2];
} else {
const versionParts = packageJson.version.split(".");
versionParts[2] = parseInt(versionParts[2]) + 1;
newVersion = versionParts.join(".");
}
console.log(`Bumping version to ${newVersion}`);
packageJson.version = newVersion;
writeFileSync("package.json", JSON.stringify(packageJson, null, 2) + "\n");
const indexFilePath = "src/index.ts";
let indexFileContent = readFileSync(indexFilePath, "utf-8");
indexFileContent = indexFileContent.replace(
/export const __version__ = "[0-9]+\.[0-9]+\.[0-9]+";/g,
`export const __version__ = "${newVersion}";`
);
writeFileSync(indexFilePath, indexFileContent);
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/scripts/create-entrypoints.js | import * as fs from "fs";
import * as path from "path";
// This lists all the entrypoints for the library. Each key corresponds to an
// importable path, eg. `import { Foo } from "langsmith/client"`.
// The value is the path to the file in `src/` that exports the entrypoint.
// This is used to generate the `exports` field in package.json.
// Order is not important.
const entrypoints = {
client: "client",
run_trees: "run_trees",
traceable: "traceable",
evaluation: "evaluation/index",
"evaluation/langchain": "evaluation/langchain",
schemas: "schemas",
langchain: "langchain",
vercel: "vercel",
wrappers: "wrappers/index",
anonymizer: "anonymizer/index",
"wrappers/openai": "wrappers/openai",
"wrappers/vercel": "wrappers/vercel",
"singletons/traceable": "singletons/traceable",
};
const updateJsonFile = (relativePath, updateFunction) => {
const contents = fs.readFileSync(relativePath).toString();
const res = updateFunction(JSON.parse(contents));
fs.writeFileSync(relativePath, JSON.stringify(res, null, 2) + "\n");
};
const generateFiles = () => {
const files = [...Object.entries(entrypoints), ["index", "index"]].flatMap(
([key, value]) => {
const nrOfDots = key.split("/").length - 1;
const relativePath = "../".repeat(nrOfDots) || "./";
const compiledPath = `${relativePath}dist/${value}.js`;
return [
[
`${key}.cjs`,
`module.exports = require('${relativePath}dist/${value}.cjs');`,
],
[`${key}.js`, `export * from '${compiledPath}'`],
[`${key}.d.ts`, `export * from '${compiledPath}'`],
[`${key}.d.cts`, `export * from '${compiledPath}'`],
];
}
);
return Object.fromEntries(files);
};
const updateConfig = () => {
// Update tsconfig.json `typedocOptions.entryPoints` field
updateJsonFile("./tsconfig.json", (json) => ({
...json,
typedocOptions: {
...json.typedocOptions,
entryPoints: [...Object.keys(entrypoints)].map(
(key) => `src/${entrypoints[key]}.ts`
),
},
}));
const generatedFiles = generateFiles();
const filenames = Object.keys(generatedFiles);
// Update package.json `exports` and `files` fields
updateJsonFile("./package.json", (json) => ({
...json,
exports: Object.assign(
Object.fromEntries(
["index", ...Object.keys(entrypoints)].map((key) => {
let entryPoint = {
types: {
import: `./${key}.d.ts`,
require: `./${key}.d.cts`,
default: `./${key}.d.ts`,
},
import: `./${key}.js`,
require: `./${key}.cjs`,
};
return [key === "index" ? "." : `./${key}`, entryPoint];
})
),
{
"./package.json": "./package.json",
}
),
files: ["dist/", ...filenames],
}));
// Write generated files
Object.entries(generatedFiles).forEach(([filename, content]) => {
fs.mkdirSync(path.dirname(filename), {
recursive: true,
});
fs.writeFileSync(filename, content);
});
const gitignore = fs.readFileSync("./.gitignore").toString();
const lines = gitignore.split("\n");
const startMarker = "## GENERATED create-entrypoints.js";
const endMarker = "## END GENERATED create-entrypoints.js";
const startIdx = lines.findIndex((line) => line.includes(startMarker));
const endIdx = lines.findIndex((line) => line.includes(endMarker));
const newLines = [
...lines.slice(0, startIdx + 1),
...filenames.map((fname) => `/${fname}`),
...lines.slice(endIdx),
];
fs.writeFileSync("./.gitignore", newLines.join("\n"));
};
const cleanGenerated = () => {
const filenames = Object.keys(generateFiles());
filenames.forEach((fname) => {
try {
fs.unlinkSync(fname);
} catch {
// ignore error
}
});
};
const command = process.argv[2];
if (command === "clean") {
cleanGenerated();
} else {
updateConfig();
}
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/scripts/move-cjs-to-dist.js | import { resolve, dirname, parse, format } from "node:path";
import { readdir, readFile, writeFile } from "node:fs/promises";
import { fileURLToPath } from "node:url";
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
async function moveAndRename(source, dest) {
for (const file of await readdir(abs(source), { withFileTypes: true })) {
if (file.isDirectory()) {
await moveAndRename(`${source}/${file.name}`, `${dest}/${file.name}`);
} else if (file.isFile()) {
const parsed = parse(file.name);
// Ignore anything that's not a .js file
if (parsed.ext !== ".js") {
continue;
}
// Rewrite any require statements to use .cjs
const content = await readFile(abs(`${source}/${file.name}`), "utf8");
const rewritten = content.replace(/require\("(\..+?).js"\)/g, (_, p1) => {
return `require("${p1}.cjs")`;
});
// Rename the file to .cjs
const renamed = format({ name: parsed.name, ext: ".cjs" });
await writeFile(abs(`${dest}/${renamed}`), rewritten, "utf8");
}
}
}
moveAndRename("../dist-cjs", "../dist").catch((err) => {
console.error(err);
process.exit(1);
});
|
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/scripts/check-version.js | import { readFileSync } from "fs";
const indexFilePath = "src/index.ts";
const packageJson = JSON.parse(readFileSync("package.json"));
let indexFileContent = readFileSync(indexFilePath, "utf-8");
const packageVersion = packageJson.version;
const indexVersion = indexFileContent.match(
/__version__\s*=\s*['"]([^'"]+)['"]/
)[1];
if (packageVersion !== indexVersion) {
throw new Error(
`Version mismatch! package.json version: ${packageVersion}, index.ts version: ${indexVersion}`
);
}
console.log(`Version check passed: ${packageVersion} === ${indexVersion}`); |
0 | lc_public_repos/langsmith-sdk/js | lc_public_repos/langsmith-sdk/js/scripts/check-npm-version.js | import { execSync } from 'child_process';
import fs from 'fs';
import { fileURLToPath } from 'url';
import path from 'path';
// Convert the URL to a file path
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// Adjust the path to your package.json as necessary
const packageJsonPath = path.join(__dirname, '../package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, { encoding: 'utf-8' }));
const { version } = packageJson;
const { name: packageName } = packageJson;
try {
const npmVersion = execSync(`npm view ${packageName} version`, { encoding: 'utf-8' }).trim();
if (npmVersion && version <= npmVersion) {
console.error(`Current version ${version} is not greater than npm version ${npmVersion}.`);
process.exit(1); // Exit with error
} else {
console.log(`Current version ${version} is greater than npm version ${npmVersion}. Proceeding with publish.`);
}
} catch (error) {
console.error('Error checking version:', error);
process.exit(1); // Exit with error if the check fails
}
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/vendor/orjson-info.txt | cloned from commit 8ece0d4bec4553934aa5883c15efdc3ccc49515f
then change crate-type to add "rlib" type so it can be used directly from Rust
then apply diff:
diff --git a/Cargo.toml b/Cargo.toml
index 516f4d0..b1b2727 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -65,7 +65,7 @@ itoa = { version = "1", default-features = false }
itoap = { version = "1", default-features = false, features = ["std", "simd"] }
jiff = { version = "^0.1", default-features = false, features = ["alloc"] }
once_cell = { version = "1", default-features = false, features = ["alloc", "race"] }
-pyo3-ffi = { path = "include/pyo3/pyo3-ffi", default-features = false, features = ["extension-module"]}
+pyo3-ffi = { path = "../pyo3/pyo3-ffi", default-features = false, features = ["extension-module"]}
ryu = { version = "1", default-features = false }
serde = { version = "1", default-features = false }
serde_json = { version = "1", default-features = false, features = ["std", "float_roundtrip"] }
@@ -77,7 +77,7 @@ xxhash-rust = { version = "^0.8", default-features = false, features = ["xxh3"]
[build-dependencies]
cc = { version = "1" }
-pyo3-build-config = { path = "include/pyo3/pyo3-build-config" }
+pyo3-build-config = { path = "../pyo3/pyo3-build-config" }
version_check = { version = "0.9" }
[profile.dev]
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/vendor/pyo3-info.txt | cloned from commit a42e53e888227724928c7f370cad47153a13b329
then apply diff:
diff --git a/pyo3-ffi/src/modsupport.rs b/pyo3-ffi/src/modsupport.rs
index 4a18d30f..6da2795b 100644
--- a/pyo3-ffi/src/modsupport.rs
+++ b/pyo3-ffi/src/modsupport.rs
@@ -36,6 +36,13 @@ extern "C" {
pub fn Py_BuildValue(arg1: *const c_char, ...) -> *mut PyObject;
// skipped Py_VaBuildValue
+ #[cfg(Py_3_13)]
+ pub fn PyModule_Add(
+ module: *mut PyObject,
+ name: *const c_char,
+ value: *mut PyObject,
+ ) -> core::ffi::c_int;
+
#[cfg(Py_3_10)]
#[cfg_attr(PyPy, link_name = "PyPyModule_AddObjectRef")]
pub fn PyModule_AddObjectRef(
diff --git a/pyo3-ffi/src/object.rs b/pyo3-ffi/src/object.rs
index fc3484be..d2fa1930 100644
--- a/pyo3-ffi/src/object.rs
+++ b/pyo3-ffi/src/object.rs
@@ -211,7 +211,7 @@ pub unsafe fn Py_SIZE(ob: *mut PyObject) -> Py_ssize_t {
#[inline(always)]
#[cfg(all(Py_3_12, not(Py_GIL_DISABLED)))]
-unsafe fn _Py_IsImmortal(op: *mut PyObject) -> c_int {
+pub unsafe fn _Py_IsImmortal(op: *mut PyObject) -> c_int {
#[cfg(target_pointer_width = "64")]
{
(((*op).ob_refcnt.ob_refcnt as crate::PY_INT32_T) < 0) as c_int
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/LICENSE-APACHE | Copyright (c) 2017-present PyO3 Project and Contributors. https://github.com/PyO3
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/noxfile.py | from contextlib import contextmanager
import json
import os
import re
import shutil
import subprocess
import sys
import sysconfig
import tempfile
from functools import lru_cache
from glob import glob
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
import nox
import nox.command
try:
import tomllib as toml
except ImportError:
try:
import toml
except ImportError:
toml = None
nox.options.sessions = ["test", "clippy", "rustfmt", "ruff", "docs"]
PYO3_DIR = Path(__file__).parent
PYO3_TARGET = Path(os.environ.get("CARGO_TARGET_DIR", PYO3_DIR / "target")).absolute()
PYO3_GUIDE_SRC = PYO3_DIR / "guide" / "src"
PYO3_GUIDE_TARGET = PYO3_TARGET / "guide"
PYO3_DOCS_TARGET = PYO3_TARGET / "doc"
PY_VERSIONS = ("3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13")
PYPY_VERSIONS = ("3.9", "3.10")
FREE_THREADED_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED"))
@nox.session(venv_backend="none")
def test(session: nox.Session) -> None:
test_rust(session)
test_py(session)
@nox.session(name="test-rust", venv_backend="none")
def test_rust(session: nox.Session):
_run_cargo_test(session, package="pyo3-build-config")
_run_cargo_test(session, package="pyo3-macros-backend")
_run_cargo_test(session, package="pyo3-macros")
_run_cargo_test(session, package="pyo3-ffi")
_run_cargo_test(session)
# the free-threaded build ignores abi3, so we skip abi3
# tests to avoid unnecessarily running the tests twice
if not FREE_THREADED_BUILD:
_run_cargo_test(session, features="abi3")
if "skip-full" not in session.posargs:
_run_cargo_test(session, features="full")
if not FREE_THREADED_BUILD:
_run_cargo_test(session, features="abi3 full")
@nox.session(name="test-py", venv_backend="none")
def test_py(session: nox.Session) -> None:
_run(session, "nox", "-f", "pytests/noxfile.py", external=True)
for example in glob("examples/*/noxfile.py"):
_run(session, "nox", "-f", example, external=True)
for example in glob("pyo3-ffi/examples/*/noxfile.py"):
_run(session, "nox", "-f", example, external=True)
@nox.session(venv_backend="none")
def coverage(session: nox.Session) -> None:
session.env.update(_get_coverage_env())
_run_cargo(session, "llvm-cov", "clean", "--workspace")
test(session)
generate_coverage_report(session)
@nox.session(name="set-coverage-env", venv_backend="none")
def set_coverage_env(session: nox.Session) -> None:
"""For use in GitHub Actions to set coverage environment variables."""
with open(os.environ["GITHUB_ENV"], "a") as env_file:
for k, v in _get_coverage_env().items():
print(f"{k}={v}", file=env_file)
@nox.session(name="generate-coverage-report", venv_backend="none")
def generate_coverage_report(session: nox.Session) -> None:
cov_format = "codecov"
output_file = "coverage.json"
if "lcov" in session.posargs:
cov_format = "lcov"
output_file = "lcov.info"
_run_cargo(
session,
"llvm-cov",
"--package=pyo3",
"--package=pyo3-build-config",
"--package=pyo3-macros-backend",
"--package=pyo3-macros",
"--package=pyo3-ffi",
"report",
f"--{cov_format}",
"--output-path",
output_file,
)
@nox.session(venv_backend="none")
def rustfmt(session: nox.Session):
_run_cargo(session, "fmt", "--all", "--check")
_run_cargo(session, "fmt", _FFI_CHECK, "--all", "--check")
@nox.session(name="ruff")
def ruff(session: nox.Session):
session.install("ruff")
_run(session, "ruff", "format", ".", "--check")
_run(session, "ruff", "check", ".")
@nox.session(name="clippy", venv_backend="none")
def clippy(session: nox.Session) -> bool:
if not _clippy(session) and _clippy_additional_workspaces(session):
session.error("one or more jobs failed")
def _clippy(session: nox.Session, *, env: Dict[str, str] = None) -> bool:
success = True
env = env or os.environ
for feature_set in _get_feature_sets():
try:
_run_cargo(
session,
"clippy",
*feature_set,
"--all-targets",
"--workspace",
"--",
"--deny=warnings",
env=env,
)
except nox.command.CommandFailed:
success = False
return success
def _clippy_additional_workspaces(session: nox.Session) -> bool:
# pyo3-benches and pyo3-ffi-check are in isolated workspaces so that their
# dependencies do not interact with MSRV
success = True
try:
_run_cargo(session, "clippy", _BENCHES)
except Exception:
success = False
# Run pyo3-ffi-check only on when not cross-compiling, because it needs to
# have Python headers to feed to bindgen which gets messy when cross-compiling.
target = os.environ.get("CARGO_BUILD_TARGET")
if target is None or _get_rust_default_target() == target:
try:
_build_docs_for_ffi_check(session)
_run_cargo(session, "clippy", _FFI_CHECK, "--workspace", "--all-targets")
except Exception:
success = False
return success
@nox.session(venv_backend="none")
def bench(session: nox.Session) -> bool:
_run_cargo(session, "bench", _BENCHES, *session.posargs)
@nox.session()
def codspeed(session: nox.Session) -> bool:
# rust benchmarks
os.chdir(PYO3_DIR / "pyo3-benches")
_run_cargo(session, "codspeed", "build")
_run_cargo(session, "codspeed", "run")
# python benchmarks
os.chdir(PYO3_DIR / "pytests")
session.install(".[dev]", "pytest-codspeed")
_run(session, "pytest", "--codspeed", external=True)
@nox.session(name="clippy-all", venv_backend="none")
def clippy_all(session: nox.Session) -> None:
success = True
def _clippy_with_config(env: Dict[str, str]) -> None:
nonlocal success
success &= _clippy(session, env=env)
_for_all_version_configs(session, _clippy_with_config)
success &= _clippy_additional_workspaces(session)
if not success:
session.error("one or more jobs failed")
@nox.session(name="check-all", venv_backend="none")
def check_all(session: nox.Session) -> None:
success = True
def _check(env: Dict[str, str]) -> None:
nonlocal success
for feature_set in _get_feature_sets():
try:
_run_cargo(
session,
"check",
*feature_set,
"--all-targets",
"--workspace",
env=env,
)
except Exception:
success = False
_for_all_version_configs(session, _check)
if not success:
session.error("one or more jobs failed")
@nox.session(venv_backend="none")
def publish(session: nox.Session) -> None:
_run_cargo_publish(session, package="pyo3-build-config")
_run_cargo_publish(session, package="pyo3-macros-backend")
_run_cargo_publish(session, package="pyo3-macros")
_run_cargo_publish(session, package="pyo3-ffi")
_run_cargo_publish(session, package="pyo3")
@nox.session(venv_backend="none")
def contributors(session: nox.Session) -> None:
import requests
if len(session.posargs) < 1:
raise Exception("base commit positional argument missing")
base = session.posargs[0]
page = 1
head = "HEAD"
if len(session.posargs) == 2:
head = session.posargs[1]
if len(session.posargs) > 2:
raise Exception("too many arguments")
authors = set()
while True:
resp = requests.get(
f"https://api.github.com/repos/PyO3/pyo3/compare/{base}...{head}",
params={"page": page, "per_page": 100},
)
body = resp.json()
if resp.status_code != 200:
raise Exception(
f"failed to retrieve commits: {resp.status_code} {body['message']}"
)
for commit in body["commits"]:
try:
authors.add(commit["author"]["login"])
except Exception:
continue
if "next" in resp.links:
page += 1
else:
break
authors = sorted(list(authors), key=lambda author: author.lower())
for author in authors:
print(f"@{author}")
class EmscriptenInfo:
def __init__(self):
self.emscripten_dir = PYO3_DIR / "emscripten"
self.builddir = PYO3_DIR / ".nox/emscripten"
self.builddir.mkdir(exist_ok=True, parents=True)
self.pyversion = sys.version.split()[0]
self.pymajor, self.pyminor, self.pymicro = self.pyversion.split(".")
self.pymicro, self.pydev = re.match(
"([0-9]*)([^0-9].*)?", self.pymicro
).groups()
if self.pydev is None:
self.pydev = ""
self.pymajorminor = f"{self.pymajor}.{self.pyminor}"
self.pymajorminormicro = f"{self.pymajorminor}.{self.pymicro}"
@nox.session(name="build-emscripten", venv_backend="none")
def build_emscripten(session: nox.Session):
info = EmscriptenInfo()
_run(
session,
"make",
"-C",
str(info.emscripten_dir),
f"PYTHON={sys.executable}",
f"BUILDROOT={info.builddir}",
f"PYMAJORMINORMICRO={info.pymajorminormicro}",
f"PYPRERELEASE={info.pydev}",
external=True,
)
@nox.session(name="test-emscripten", venv_backend="none")
def test_emscripten(session: nox.Session):
info = EmscriptenInfo()
libdir = info.builddir / f"install/Python-{info.pyversion}/lib"
pythonlibdir = libdir / f"python{info.pymajorminor}"
target = "wasm32-unknown-emscripten"
session.env["CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_RUNNER"] = "python " + str(
info.emscripten_dir / "runner.py"
)
session.env["RUSTFLAGS"] = " ".join(
[
f"-L native={libdir}",
"-C link-arg=--preload-file",
f"-C link-arg={pythonlibdir}@/lib/python{info.pymajorminor}",
f"-C link-arg=-lpython{info.pymajorminor}",
"-C link-arg=-lexpat",
"-C link-arg=-lmpdec",
"-C link-arg=-lz",
"-C link-arg=-lbz2",
"-C link-arg=-sALLOW_MEMORY_GROWTH=1",
]
)
session.env["CARGO_BUILD_TARGET"] = target
session.env["PYO3_CROSS_LIB_DIR"] = pythonlibdir
_run(session, "rustup", "target", "add", target, "--toolchain", "stable")
_run(
session,
"bash",
"-c",
f"source {info.builddir/'emsdk/emsdk_env.sh'} && cargo test",
)
@nox.session(venv_backend="none")
def docs(session: nox.Session) -> None:
rustdoc_flags = ["-Dwarnings"]
toolchain_flags = []
cargo_flags = []
if "open" in session.posargs:
cargo_flags.append("--open")
if "nightly" in session.posargs:
rustdoc_flags.append("--cfg docsrs")
toolchain_flags.append("+nightly")
cargo_flags.extend(["-Z", "unstable-options", "-Z", "rustdoc-scrape-examples"])
if "nightly" in session.posargs and "internal" in session.posargs:
rustdoc_flags.append("--Z unstable-options")
rustdoc_flags.append("--document-hidden-items")
rustdoc_flags.extend(("--html-after-content", ".netlify/internal_banner.html"))
cargo_flags.append("--document-private-items")
else:
cargo_flags.extend(["--exclude=pyo3-macros", "--exclude=pyo3-macros-backend"])
rustdoc_flags.append(session.env.get("RUSTDOCFLAGS", ""))
session.env["RUSTDOCFLAGS"] = " ".join(rustdoc_flags)
shutil.rmtree(PYO3_DOCS_TARGET, ignore_errors=True)
_run_cargo(
session,
*toolchain_flags,
"doc",
"--lib",
"--no-default-features",
"--features=full",
"--no-deps",
"--workspace",
*cargo_flags,
)
@nox.session(name="build-guide", venv_backend="none")
def build_guide(session: nox.Session):
shutil.rmtree(PYO3_GUIDE_TARGET, ignore_errors=True)
_run(session, "mdbook", "build", "-d", PYO3_GUIDE_TARGET, "guide", *session.posargs)
for license in ("LICENSE-APACHE", "LICENSE-MIT"):
target_file = PYO3_GUIDE_TARGET / license
target_file.unlink(missing_ok=True)
shutil.copy(PYO3_DIR / license, target_file)
@nox.session(name="check-guide", venv_backend="none")
def check_guide(session: nox.Session):
# reuse other sessions, but with default args
posargs = [*session.posargs]
del session.posargs[:]
build_guide(session)
docs(session)
session.posargs.extend(posargs)
if toml is None:
session.error("requires Python 3.11 or `toml` to be installed")
pyo3_version = toml.loads((PYO3_DIR / "Cargo.toml").read_text())["package"][
"version"
]
remaps = {
f"file://{PYO3_GUIDE_SRC}/([^/]*/)*?%7B%7B#PYO3_DOCS_URL}}}}": f"file://{PYO3_DOCS_TARGET}",
f"https://pyo3.rs/v{pyo3_version}": f"file://{PYO3_GUIDE_TARGET}",
"https://pyo3.rs/main/": f"file://{PYO3_GUIDE_TARGET}/",
"https://pyo3.rs/latest/": f"file://{PYO3_GUIDE_TARGET}/",
"%7B%7B#PYO3_DOCS_VERSION}}": "latest",
}
remap_args = []
for key, value in remaps.items():
remap_args.extend(("--remap", f"{key} {value}"))
# check all links in the guide
_run(
session,
"lychee",
"--include-fragments",
str(PYO3_GUIDE_SRC),
*remap_args,
*session.posargs,
)
# check external links in the docs
# (intra-doc links are checked by rustdoc)
_run(
session,
"lychee",
str(PYO3_DOCS_TARGET),
*remap_args,
f"--exclude=file://{PYO3_DOCS_TARGET}",
"--exclude=http://www.adobe.com/",
*session.posargs,
)
@nox.session(name="format-guide", venv_backend="none")
def format_guide(session: nox.Session):
fence_line = "//! ```\n"
for path in Path("guide").glob("**/*.md"):
session.log("Working on %s", path)
lines = iter(path.read_text().splitlines(True))
new_lines = []
for line in lines:
new_lines.append(line)
if not re.search("```rust(,.*)?$", line):
continue
# Found a code block fence, gobble up its lines and write to temp. file
prefix = line[: line.index("```")]
with tempfile.NamedTemporaryFile("w", delete=False) as file:
tempname = file.name
file.write(fence_line)
for line in lines:
if line == prefix + "```\n":
break
file.write(("//! " + line[len(prefix) :]).rstrip() + "\n")
file.write(fence_line)
# Format it (needs nightly rustfmt for `format_code_in_doc_comments`)
_run(
session,
"rustfmt",
"+nightly",
"--config",
"format_code_in_doc_comments=true",
"--config",
"reorder_imports=false",
tempname,
)
# Re-read the formatted file, add its lines, and delete it
with open(tempname, "r") as file:
for line in file:
if line == fence_line:
continue
new_lines.append((prefix + line[4:]).rstrip() + "\n")
os.unlink(tempname)
new_lines.append(prefix + "```\n")
path.write_text("".join(new_lines))
@nox.session(name="address-sanitizer", venv_backend="none")
def address_sanitizer(session: nox.Session):
_run_cargo(
session,
"+nightly",
"test",
"--release",
"-Zbuild-std",
f"--target={_get_rust_default_target()}",
"--",
"--test-threads=1",
env={
"RUSTFLAGS": "-Zsanitizer=address",
"RUSTDOCFLAGS": "-Zsanitizer=address",
"ASAN_OPTIONS": "detect_leaks=0",
},
)
_IGNORE_CHANGELOG_PR_CATEGORIES = (
"release",
"docs",
)
@nox.session(name="check-changelog")
def check_changelog(session: nox.Session):
if not _is_github_actions():
session.error("Can only check changelog on github actions")
event_path = os.environ["GITHUB_EVENT_PATH"]
with open(event_path) as event_file:
event = json.load(event_file)
for category in _IGNORE_CHANGELOG_PR_CATEGORIES:
if event["pull_request"]["title"].startswith(f"{category}:"):
session.skip(f"PR title starts with {category}")
for label in event["pull_request"]["labels"]:
if label["name"] == "CI-skip-changelog":
session.skip("CI-skip-changelog label applied")
issue_number = event["pull_request"]["number"]
newsfragments = PYO3_DIR / "newsfragments"
fragments = tuple(
filter(
Path.exists,
(
newsfragments / f"{issue_number}.{change_type}.md"
for change_type in ("packaging", "added", "changed", "removed", "fixed")
),
)
)
if not fragments:
session.error(
"Changelog entry not found, please add one (or more) to the `newsfragments` directory.\n"
"Alternatively, start the PR title with `docs:` if this PR is a docs-only PR.\n"
"See https://github.com/PyO3/pyo3/blob/main/Contributing.md#documenting-changes for more information."
)
print("Found newsfragments:")
for fragment in fragments:
print(fragment.name)
@nox.session(name="set-msrv-package-versions", venv_backend="none")
def set_msrv_package_versions(session: nox.Session):
from collections import defaultdict
if toml is None:
session.error("requires Python 3.11 or `toml` to be installed")
projects = (
None,
"examples/decorator",
"examples/maturin-starter",
"examples/setuptools-rust-starter",
"examples/word-count",
)
min_pkg_versions = {
"regex": "1.9.6",
"proptest": "1.2.0",
"trybuild": "1.0.89",
"eyre": "0.6.8",
"allocator-api2": "0.2.10",
"indexmap": "2.5.0", # to be compatible with hashbrown 0.14
"hashbrown": "0.14.5", # https://github.com/rust-lang/hashbrown/issues/574
}
# run cargo update first to ensure that everything is at highest
# possible version, so that this matches what CI will resolve to.
for project in projects:
if project is None:
_run_cargo(session, "update")
else:
_run_cargo(session, "update", f"--manifest-path={project}/Cargo.toml")
for project in projects:
lock_file = Path(project or "") / "Cargo.lock"
def load_pkg_versions():
cargo_lock = toml.loads(lock_file.read_text())
# Cargo allows to depends on multiple versions of the same package
pkg_versions = defaultdict(list)
for pkg in cargo_lock["package"]:
name = pkg["name"]
if name not in min_pkg_versions:
continue
pkg_versions[name].append(pkg["version"])
return pkg_versions
pkg_versions = load_pkg_versions()
for pkg_name, min_version in min_pkg_versions.items():
versions = pkg_versions.get(pkg_name, [])
for version in versions:
if version != min_version:
pkg_id = pkg_name + ":" + version
_run_cargo_set_package_version(
session, pkg_id, min_version, project=project
)
# assume `_run_cargo_set_package_version` has changed something
# and re-read `Cargo.lock`
pkg_versions = load_pkg_versions()
# As a smoke test, cargo metadata solves all dependencies, so
# will break if any crates rely on cargo features not
# supported on MSRV
for project in projects:
if project is None:
_run_cargo(session, "metadata", silent=True)
else:
_run_cargo(
session,
"metadata",
f"--manifest-path={project}/Cargo.toml",
silent=True,
)
@nox.session(name="ffi-check")
def ffi_check(session: nox.Session):
_build_docs_for_ffi_check(session)
_run_cargo(session, "run", _FFI_CHECK)
@nox.session(name="test-version-limits")
def test_version_limits(session: nox.Session):
env = os.environ.copy()
with _config_file() as config_file:
env["PYO3_CONFIG_FILE"] = config_file.name
assert "3.6" not in PY_VERSIONS
config_file.set("CPython", "3.6")
_run_cargo(session, "check", env=env, expect_error=True)
assert "3.14" not in PY_VERSIONS
config_file.set("CPython", "3.14")
_run_cargo(session, "check", env=env, expect_error=True)
# 3.14 CPython should build with forward compatibility
env["PYO3_USE_ABI3_FORWARD_COMPATIBILITY"] = "1"
_run_cargo(session, "check", env=env)
assert "3.8" not in PYPY_VERSIONS
config_file.set("PyPy", "3.8")
_run_cargo(session, "check", env=env, expect_error=True)
assert "3.11" not in PYPY_VERSIONS
config_file.set("PyPy", "3.11")
_run_cargo(session, "check", env=env, expect_error=True)
@nox.session(name="check-feature-powerset", venv_backend="none")
def check_feature_powerset(session: nox.Session):
if toml is None:
session.error("requires Python 3.11 or `toml` to be installed")
cargo_toml = toml.loads((PYO3_DIR / "Cargo.toml").read_text())
EXCLUDED_FROM_FULL = {
"nightly",
"extension-module",
"full",
"default",
"auto-initialize",
"generate-import-lib",
"multiple-pymethods", # Because it's not supported on wasm
}
features = cargo_toml["features"]
full_feature = set(features["full"])
abi3_features = {feature for feature in features if feature.startswith("abi3")}
abi3_version_features = abi3_features - {"abi3"}
expected_full_feature = features.keys() - EXCLUDED_FROM_FULL - abi3_features
uncovered_features = expected_full_feature - full_feature
if uncovered_features:
session.error(
f"some features missing from `full` meta feature: {uncovered_features}"
)
experimental_features = {
feature for feature in features if feature.startswith("experimental-")
}
full_without_experimental = full_feature - experimental_features
if len(experimental_features) >= 2:
# justification: we always assume that feature within these groups are
# mutually exclusive to simplify CI
features_to_group = [
full_without_experimental,
experimental_features,
]
elif len(experimental_features) == 1:
# no need to make an experimental features group
features_to_group = [full_without_experimental]
else:
session.error("no experimental features exist; please simplify the noxfile")
features_to_skip = [
*(EXCLUDED_FROM_FULL),
*abi3_version_features,
]
# deny warnings
env = os.environ.copy()
rust_flags = env.get("RUSTFLAGS", "")
env["RUSTFLAGS"] = f"{rust_flags} -Dwarnings"
subcommand = "hack"
if "minimal-versions" in session.posargs:
subcommand = "minimal-versions"
comma_join = ",".join
_run_cargo(
session,
subcommand,
"--feature-powerset",
'--optional-deps=""',
f'--skip="{comma_join(features_to_skip)}"',
*(f"--group-features={comma_join(group)}" for group in features_to_group),
"check",
"--all-targets",
env=env,
)
@nox.session(name="update-ui-tests", venv_backend="none")
def update_ui_tests(session: nox.Session):
env = os.environ.copy()
env["TRYBUILD"] = "overwrite"
command = ["test", "--test", "test_compile_error"]
_run_cargo(session, *command, env=env)
_run_cargo(session, *command, "--features=full", env=env)
_run_cargo(session, *command, "--features=abi3,full", env=env)
def _build_docs_for_ffi_check(session: nox.Session) -> None:
# pyo3-ffi-check needs to scrape docs of pyo3-ffi
env = os.environ.copy()
env["PYO3_PYTHON"] = sys.executable
_run_cargo(session, "doc", _FFI_CHECK, "-p", "pyo3-ffi", "--no-deps", env=env)
@lru_cache()
def _get_rust_info() -> Tuple[str, ...]:
output = _get_output("rustc", "-vV")
return tuple(output.splitlines())
def _get_rust_version() -> Tuple[int, int, int, List[str]]:
for line in _get_rust_info():
if line.startswith(_RELEASE_LINE_START):
version = line[len(_RELEASE_LINE_START) :].strip()
# e.g. 1.67.0-beta.2
(version_number, *extra) = version.split("-", maxsplit=1)
return (*map(int, version_number.split(".")), extra)
def _get_rust_default_target() -> str:
for line in _get_rust_info():
if line.startswith(_HOST_LINE_START):
return line[len(_HOST_LINE_START) :].strip()
@lru_cache()
def _get_feature_sets() -> Tuple[Tuple[str, ...], ...]:
"""Returns feature sets to use for clippy job"""
cargo_target = os.getenv("CARGO_BUILD_TARGET", "")
if "wasm32-wasi" not in cargo_target:
# multiple-pymethods not supported on wasm
return (
("--no-default-features",),
(
"--no-default-features",
"--features=abi3",
),
("--features=full multiple-pymethods",),
("--features=abi3 full multiple-pymethods",),
)
else:
return (
("--no-default-features",),
(
"--no-default-features",
"--features=abi3",
),
("--features=full",),
("--features=abi3 full",),
)
_RELEASE_LINE_START = "release: "
_HOST_LINE_START = "host: "
def _get_coverage_env() -> Dict[str, str]:
env = {}
output = _get_output("cargo", "llvm-cov", "show-env")
for line in output.strip().splitlines():
(key, value) = line.split("=", maxsplit=1)
# Strip single or double quotes from the variable value
# - quote used by llvm-cov differs between Windows and Linux
if value and value[0] in ("'", '"'):
value = value[1:-1]
env[key] = value
# Ensure that examples/ and pytests/ all build to the correct target directory to collect
# coverage artifacts.
env["CARGO_TARGET_DIR"] = env["CARGO_LLVM_COV_TARGET_DIR"]
return env
def _run(session: nox.Session, *args: str, **kwargs: Any) -> None:
"""Wrapper for _run(session, which creates nice groups on GitHub Actions."""
is_github_actions = _is_github_actions()
failed = False
if is_github_actions:
# Insert ::group:: at the start of nox's command line output
print("::group::", end="", flush=True, file=sys.stderr)
try:
session.run(*args, **kwargs)
except nox.command.CommandFailed:
failed = True
raise
finally:
if is_github_actions:
print("::endgroup::", file=sys.stderr)
# Defer the error message until after the group to make them easier
# to find in the log
if failed:
command = " ".join(args)
print(f"::error::`{command}` failed", file=sys.stderr)
def _run_cargo(
session: nox.Session, *args: str, expect_error: bool = False, **kwargs: Any
) -> None:
if expect_error:
if "success_codes" in kwargs:
raise ValueError("expect_error overrides success_codes")
kwargs["success_codes"] = [101]
_run(session, "cargo", *args, **kwargs, external=True)
def _run_cargo_test(
session: nox.Session,
*,
package: Optional[str] = None,
features: Optional[str] = None,
) -> None:
command = ["cargo"]
if "careful" in session.posargs:
# do explicit setup so failures in setup can be seen
_run_cargo(session, "careful", "setup")
command.append("careful")
command.extend(("test", "--no-fail-fast"))
if "release" in session.posargs:
command.append("--release")
if package:
command.append(f"--package={package}")
if features:
command.append(f"--features={features}")
_run(session, *command, external=True)
def _run_cargo_publish(session: nox.Session, *, package: str) -> None:
_run_cargo(session, "publish", f"--package={package}")
def _run_cargo_set_package_version(
session: nox.Session,
pkg_id: str,
version: str,
*,
project: Optional[str] = None,
) -> None:
command = ["cargo", "update", "-p", pkg_id, "--precise", version, "--workspace"]
if project:
command.append(f"--manifest-path={project}/Cargo.toml")
_run(session, *command, external=True)
def _get_output(*args: str) -> str:
return subprocess.run(args, capture_output=True, text=True, check=True).stdout
def _for_all_version_configs(
session: nox.Session, job: Callable[[Dict[str, str]], None]
) -> None:
env = os.environ.copy()
with _config_file() as config_file:
env["PYO3_CONFIG_FILE"] = config_file.name
def _job_with_config(implementation, version):
session.log(f"{implementation} {version}")
config_file.set(implementation, version)
job(env)
for version in PY_VERSIONS:
_job_with_config("CPython", version)
for version in PYPY_VERSIONS:
_job_with_config("PyPy", version)
class _ConfigFile:
def __init__(self, config_file) -> None:
self._config_file = config_file
def set(
self, implementation: str, version: str, build_flags: Iterable[str] = ()
) -> None:
"""Set the contents of this config file to the given implementation and version."""
self._config_file.seek(0)
self._config_file.truncate(0)
self._config_file.write(
f"""\
implementation={implementation}
version={version}
build_flags={','.join(build_flags)}
suppress_build_script_link_lines=true
"""
)
self._config_file.flush()
@property
def name(self) -> str:
return self._config_file.name
@contextmanager
def _config_file() -> Iterator[_ConfigFile]:
"""Creates a temporary config file which can be repeatedly set to different values."""
with tempfile.NamedTemporaryFile("r+") as config:
yield _ConfigFile(config)
def _is_github_actions() -> bool:
return "GITHUB_ACTIONS" in os.environ
_BENCHES = "--manifest-path=pyo3-benches/Cargo.toml"
_FFI_CHECK = "--manifest-path=pyo3-ffi-check/Cargo.toml"
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/Code-of-Conduct.md | # Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/.towncrier.template.md | {% for section_text, section in sections.items() %}{%- if section %}{{section_text}}{% endif -%}
{% if section %}
{% for category in ['packaging', 'added', 'changed', 'removed', 'fixed' ] if category in section %}
### {{ definitions[category]['name'] }}
{% if definitions[category]['showcontent'] %}
{% for text, pull_requests in section[category].items() %}
- {{ text }} {{ pull_requests|join(', ') }}
{% endfor %}
{% else %}
- {{ section[category]['']|join(', ') }}
{% endif %}
{% endfor %}{% else %}No significant changes.{% endif %}{% endfor %}
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/Cargo.toml | [package]
name = "pyo3"
version = "0.23.0-dev"
description = "Bindings to Python interpreter"
authors = ["PyO3 Project and Contributors <https://github.com/PyO3>"]
readme = "README.md"
keywords = ["pyo3", "python", "cpython", "ffi"]
homepage = "https://github.com/pyo3/pyo3"
repository = "https://github.com/pyo3/pyo3"
documentation = "https://docs.rs/crate/pyo3/"
categories = ["api-bindings", "development-tools::ffi"]
license = "MIT OR Apache-2.0"
exclude = ["/.gitignore", ".cargo/config", "/codecov.yml", "/Makefile", "/pyproject.toml", "/noxfile.py", "/.github", "/tests/test_compile_error.rs", "/tests/ui"]
edition = "2021"
rust-version = "1.63"
[dependencies]
cfg-if = "1.0"
libc = "0.2.62"
memoffset = "0.9"
once_cell = "1.13"
# ffi bindings to the python interpreter, split into a separate crate so they can be used independently
pyo3-ffi = { path = "pyo3-ffi", version = "=0.23.0-dev" }
# support crates for macros feature
pyo3-macros = { path = "pyo3-macros", version = "=0.23.0-dev", optional = true }
indoc = { version = "2.0.1", optional = true }
unindent = { version = "0.2.1", optional = true }
# support crate for multiple-pymethods feature
inventory = { version = "0.3.0", optional = true }
# crate integrations that can be added using the eponymous features
anyhow = { version = "1.0.1", optional = true }
chrono = { version = "0.4.25", default-features = false, optional = true }
chrono-tz = { version = ">= 0.10, < 0.11", default-features = false, optional = true }
either = { version = "1.9", optional = true }
eyre = { version = ">= 0.6.8, < 0.7", optional = true }
hashbrown = { version = ">= 0.14.5, < 0.16", optional = true }
indexmap = { version = ">= 2.5.0, < 3", optional = true }
num-bigint = { version = "0.4.2", optional = true }
num-complex = { version = ">= 0.4.6, < 0.5", optional = true }
num-rational = {version = "0.4.1", optional = true }
rust_decimal = { version = "1.15", default-features = false, optional = true }
serde = { version = "1.0", optional = true }
smallvec = { version = "1.0", optional = true }
[target.'cfg(not(target_has_atomic = "64"))'.dependencies]
portable-atomic = "1.0"
[dev-dependencies]
assert_approx_eq = "1.1.0"
chrono = "0.4.25"
chrono-tz = ">= 0.10, < 0.11"
# Required for "and $N others" normalization
trybuild = ">=1.0.70"
proptest = { version = "1.0", default-features = false, features = ["std"] }
send_wrapper = "0.6"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.61"
rayon = "1.6.1"
futures = "0.3.28"
tempfile = "3.12.0"
static_assertions = "1.1.0"
uuid = {version = "1.10.0", features = ["v4"] }
[build-dependencies]
pyo3-build-config = { path = "pyo3-build-config", version = "=0.23.0-dev", features = ["resolve-config"] }
[features]
default = ["macros"]
# Enables support for `async fn` for `#[pyfunction]` and `#[pymethods]`.
experimental-async = ["macros", "pyo3-macros/experimental-async"]
# Enables pyo3::inspect module and additional type information on FromPyObject
# and IntoPy traits
experimental-inspect = []
# Enables macros: #[pyclass], #[pymodule], #[pyfunction] etc.
macros = ["pyo3-macros", "indoc", "unindent"]
# Enables multiple #[pymethods] per #[pyclass]
multiple-pymethods = ["inventory", "pyo3-macros/multiple-pymethods"]
# Use this feature when building an extension module.
# It tells the linker to keep the python symbols unresolved,
# so that the module can also be used with statically linked python interpreters.
extension-module = ["pyo3-ffi/extension-module"]
# Use the Python limited API. See https://www.python.org/dev/peps/pep-0384/ for more.
abi3 = ["pyo3-build-config/abi3", "pyo3-ffi/abi3"]
# With abi3, we can manually set the minimum Python version.
abi3-py37 = ["abi3-py38", "pyo3-build-config/abi3-py37", "pyo3-ffi/abi3-py37"]
abi3-py38 = ["abi3-py39", "pyo3-build-config/abi3-py38", "pyo3-ffi/abi3-py38"]
abi3-py39 = ["abi3-py310", "pyo3-build-config/abi3-py39", "pyo3-ffi/abi3-py39"]
abi3-py310 = ["abi3-py311", "pyo3-build-config/abi3-py310", "pyo3-ffi/abi3-py310"]
abi3-py311 = ["abi3-py312", "pyo3-build-config/abi3-py311", "pyo3-ffi/abi3-py311"]
abi3-py312 = ["abi3", "pyo3-build-config/abi3-py312", "pyo3-ffi/abi3-py312"]
# Automatically generates `python3.dll` import libraries for Windows targets.
generate-import-lib = ["pyo3-ffi/generate-import-lib"]
# Changes `Python::with_gil` to automatically initialize the Python interpreter if needed.
auto-initialize = []
# Enables `Clone`ing references to Python objects `Py<T>` which panics if the GIL is not held.
py-clone = []
# Optimizes PyObject to Vec conversion and so on.
nightly = []
# Activates all additional features
# This is mostly intended for testing purposes - activating *all* of these isn't particularly useful.
full = [
"macros",
# "multiple-pymethods", # Not supported by wasm
"anyhow",
"chrono",
"chrono-tz",
"either",
"experimental-async",
"experimental-inspect",
"eyre",
"hashbrown",
"indexmap",
"num-bigint",
"num-complex",
"num-rational",
"py-clone",
"rust_decimal",
"serde",
"smallvec",
]
[workspace]
members = [
"pyo3-ffi",
"pyo3-build-config",
"pyo3-macros",
"pyo3-macros-backend",
"pytests",
"examples",
]
[package.metadata.docs.rs]
no-default-features = true
features = ["full", "gil-refs"]
rustdoc-args = ["--cfg", "docsrs"]
[workspace.lints.clippy]
checked_conversions = "warn"
dbg_macro = "warn"
explicit_into_iter_loop = "warn"
explicit_iter_loop = "warn"
filter_map_next = "warn"
flat_map_option = "warn"
let_unit_value = "warn"
manual_assert = "warn"
manual_ok_or = "warn"
todo = "warn"
unnecessary_wraps = "warn"
useless_transmute = "warn"
used_underscore_binding = "warn"
[workspace.lints.rust]
elided_lifetimes_in_paths = "warn"
invalid_doc_attributes = "warn"
rust_2018_idioms = { level = "warn", priority = -1 }
rust_2021_prelude_collisions = "warn"
unused_lifetimes = "warn"
[workspace.lints.rustdoc]
broken_intra_doc_links = "warn"
bare_urls = "warn"
[lints]
workspace = true
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file. For help with updating to new
PyO3 versions, please see the [migration guide](https://pyo3.rs/latest/migration.html).
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
To see unreleased changes, please see the [CHANGELOG on the main branch guide](https://pyo3.rs/main/changelog.html).
<!-- towncrier release notes start -->
## [0.22.5] - 2024-10-15
### Fixed
- Fix regression in 0.22.4 of naming collision in `__clear__` slot and `clear` method generated code. [#4619](https://github.com/PyO3/pyo3/pull/4619)
## [0.22.4] - 2024-10-12
### Added
- Add FFI definition `PyWeakref_GetRef` and `compat::PyWeakref_GetRef`. [#4528](https://github.com/PyO3/pyo3/pull/4528)
### Changed
- Deprecate `_borrowed` methods on `PyWeakRef` and `PyWeakrefProxy` (just use the owning forms). [#4590](https://github.com/PyO3/pyo3/pull/4590)
### Fixed
- Revert removal of private FFI function `_PyLong_NumBits` on Python 3.13 and later. [#4450](https://github.com/PyO3/pyo3/pull/4450)
- Fix `__traverse__` functions for base classes not being called by subclasses created with `#[pyclass(extends = ...)]`. [#4563](https://github.com/PyO3/pyo3/pull/4563)
- Fix regression in 0.22.3 failing compiles under `#![forbid(unsafe_code)]`. [#4574](https://github.com/PyO3/pyo3/pull/4574)
- Workaround possible use-after-free in `_borrowed` methods on `PyWeakRef` and `PyWeakrefProxy` by leaking their contents. [#4590](https://github.com/PyO3/pyo3/pull/4590)
- Fix crash calling `PyType_GetSlot` on static types before Python 3.10. [#4599](https://github.com/PyO3/pyo3/pull/4599)
## [0.22.3] - 2024-09-15
### Added
- Add `pyo3::ffi::compat` namespace with compatibility shims for C API functions added in recent versions of Python.
- Add FFI definition `PyDict_GetItemRef` on Python 3.13 and newer, and `compat::PyDict_GetItemRef` for all versions. [#4355](https://github.com/PyO3/pyo3/pull/4355)
- Add FFI definition `PyList_GetItemRef` on Python 3.13 and newer, and `pyo3_ffi::compat::PyList_GetItemRef` for all versions. [#4410](https://github.com/PyO3/pyo3/pull/4410)
- Add FFI definitions `compat::Py_NewRef` and `compat::Py_XNewRef`. [#4445](https://github.com/PyO3/pyo3/pull/4445)
- Add FFI definitions `compat::PyObject_CallNoArgs` and `compat::PyObject_CallMethodNoArgs`. [#4461](https://github.com/PyO3/pyo3/pull/4461)
- Add `GilOnceCell<Py<T>>::clone_ref`. [#4511](https://github.com/PyO3/pyo3/pull/4511)
### Changed
- Improve error messages for `#[pyfunction]` defined inside `#[pymethods]`. [#4349](https://github.com/PyO3/pyo3/pull/4349)
- Improve performance of calls to Python by using the vectorcall calling convention where possible. [#4456](https://github.com/PyO3/pyo3/pull/4456)
- Mention the type name in the exception message when trying to instantiate a class with no constructor defined. [#4481](https://github.com/PyO3/pyo3/pull/4481)
### Removed
- Remove private FFI definition `_Py_PackageContext`. [#4420](https://github.com/PyO3/pyo3/pull/4420)
### Fixed
- Fix compile failure in declarative `#[pymodule]` under presence of `#![no_implicit_prelude]`. [#4328](https://github.com/PyO3/pyo3/pull/4328)
- Fix use of borrowed reference in `PyDict::get_item` (unsafe in free-threaded Python). [#4355](https://github.com/PyO3/pyo3/pull/4355)
- Fix `#[pyclass(eq)]` macro hygiene issues for structs and enums. [#4359](https://github.com/PyO3/pyo3/pull/4359)
- Fix hygiene/span issues of `'#[pyfunction]` and `#[pymethods]` generated code which affected expansion in `macro_rules` context. [#4382](https://github.com/PyO3/pyo3/pull/4382)
- Fix `unsafe_code` lint error in `#[pyclass]` generated code. [#4396](https://github.com/PyO3/pyo3/pull/4396)
- Fix async functions returning a tuple only returning the first element to Python. [#4407](https://github.com/PyO3/pyo3/pull/4407)
- Fix use of borrowed reference in `PyList::get_item` (unsafe in free-threaded Python). [#4410](https://github.com/PyO3/pyo3/pull/4410)
- Correct FFI definition `PyArg_ParseTupleAndKeywords` to take `*const *const c_char` instead of `*mut *mut c_char` on Python 3.13 and up. [#4420](https://github.com/PyO3/pyo3/pull/4420)
- Fix a soundness bug with `PyClassInitializer`: panic if adding subclass to existing instance via `PyClassInitializer::from(Py<BaseClass>).add_subclass(SubClass)`. [#4454](https://github.com/PyO3/pyo3/pull/4454)
- Fix illegal reference counting op inside implementation of `__traverse__` handlers. [#4479](https://github.com/PyO3/pyo3/pull/4479)
## [0.22.2] - 2024-07-17
### Packaging
- Require opt-in to freethreaded Python using the `UNSAFE_PYO3_BUILD_FREE_THREADED=1` environment variable (it is not yet supported by PyO3). [#4327](https://github.com/PyO3/pyo3/pull/4327)
### Changed
- Use FFI function calls for reference counting on all abi3 versions. [#4324](https://github.com/PyO3/pyo3/pull/4324)
- `#[pymodule(...)]` now directly accepts all relevant `#[pyo3(...)]` options. [#4330](https://github.com/PyO3/pyo3/pull/4330)
### Fixed
- Fix compile failure in declarative `#[pymodule]` under presence of `#![no_implicit_prelude]`. [#4328](https://github.com/PyO3/pyo3/pull/4328)
- Fix compile failure due to c-string literals on Rust < 1.79. [#4353](https://github.com/PyO3/pyo3/pull/4353)
## [0.22.1] - 2024-07-06
### Added
- Add `#[pyo3(submodule)]` option for declarative `#[pymodule]`s. [#4301](https://github.com/PyO3/pyo3/pull/4301)
- Implement `PartialEq<bool>` for `Bound<'py, PyBool>`. [#4305](https://github.com/PyO3/pyo3/pull/4305)
### Fixed
- Return `NotImplemented` instead of raising `TypeError` from generated equality method when comparing different types. [#4287](https://github.com/PyO3/pyo3/pull/4287)
- Handle full-path `#[pyo3::prelude::pymodule]` and similar for `#[pyclass]` and `#[pyfunction]` in declarative modules.[#4288](https://github.com/PyO3/pyo3/pull/4288)
- Fix 128-bit int regression on big-endian platforms with Python <3.13. [#4291](https://github.com/PyO3/pyo3/pull/4291)
- Stop generating code that will never be covered with declarative modules. [#4297](https://github.com/PyO3/pyo3/pull/4297)
- Fix invalid deprecation warning for trailing optional on `#[setter]` function. [#4304](https://github.com/PyO3/pyo3/pull/4304)
## [0.22.0] - 2024-06-24
### Packaging
- Update `heck` dependency to 0.5. [#3966](https://github.com/PyO3/pyo3/pull/3966)
- Extend range of supported versions of `chrono-tz` optional dependency to include version 0.10. [#4061](https://github.com/PyO3/pyo3/pull/4061)
- Update MSRV to 1.63. [#4129](https://github.com/PyO3/pyo3/pull/4129)
- Add optional `num-rational` feature to add conversions with Python's `fractions.Fraction`. [#4148](https://github.com/PyO3/pyo3/pull/4148)
- Support Python 3.13. [#4184](https://github.com/PyO3/pyo3/pull/4184)
### Added
- Add `PyWeakref`, `PyWeakrefReference` and `PyWeakrefProxy`. [#3835](https://github.com/PyO3/pyo3/pull/3835)
- Support `#[pyclass]` on enums that have tuple variants. [#4072](https://github.com/PyO3/pyo3/pull/4072)
- Add support for scientific notation in `Decimal` conversion. [#4079](https://github.com/PyO3/pyo3/pull/4079)
- Add `pyo3_disable_reference_pool` conditional compilation flag to avoid the overhead of the global reference pool at the cost of known limitations as explained in the performance section of the guide. [#4095](https://github.com/PyO3/pyo3/pull/4095)
- Add `#[pyo3(constructor = (...))]` to customize the generated constructors for complex enum variants. [#4158](https://github.com/PyO3/pyo3/pull/4158)
- Add `PyType::module`, which always matches Python `__module__`. [#4196](https://github.com/PyO3/pyo3/pull/4196)
- Add `PyType::fully_qualified_name` which matches the "fully qualified name" defined in [PEP 737](https://peps.python.org/pep-0737). [#4196](https://github.com/PyO3/pyo3/pull/4196)
- Add `PyTypeMethods::mro` and `PyTypeMethods::bases`. [#4197](https://github.com/PyO3/pyo3/pull/4197)
- Add `#[pyclass(ord)]` to implement ordering based on `PartialOrd`. [#4202](https://github.com/PyO3/pyo3/pull/4202)
- Implement `ToPyObject` and `IntoPy<PyObject>` for `PyBackedStr` and `PyBackedBytes`. [#4205](https://github.com/PyO3/pyo3/pull/4205)
- Add `#[pyclass(hash)]` option to implement `__hash__` in terms of the `Hash` implementation [#4206](https://github.com/PyO3/pyo3/pull/4206)
- Add `#[pyclass(eq)]` option to generate `__eq__` based on `PartialEq`, and `#[pyclass(eq_int)]` for simple enums to implement equality based on their discriminants. [#4210](https://github.com/PyO3/pyo3/pull/4210)
- Implement `From<Bound<'py, T>>` for `PyClassInitializer<T>`. [#4214](https://github.com/PyO3/pyo3/pull/4214)
- Add `as_super` methods to `PyRef` and `PyRefMut` for accesing the base class by reference. [#4219](https://github.com/PyO3/pyo3/pull/4219)
- Implement `PartialEq<str>` for `Bound<'py, PyString>`. [#4245](https://github.com/PyO3/pyo3/pull/4245)
- Implement `PyModuleMethods::filename` on PyPy. [#4249](https://github.com/PyO3/pyo3/pull/4249)
- Implement `PartialEq<[u8]>` for `Bound<'py, PyBytes>`. [#4250](https://github.com/PyO3/pyo3/pull/4250)
- Add `pyo3_ffi::c_str` macro to create `&'static CStr` on Rust versions which don't have 1.77's `c""` literals. [#4255](https://github.com/PyO3/pyo3/pull/4255)
- Support `bool` conversion with `numpy` 2.0's `numpy.bool` type [#4258](https://github.com/PyO3/pyo3/pull/4258)
- Add `PyAnyMethods::{bitnot, matmul, floor_div, rem, divmod}`. [#4264](https://github.com/PyO3/pyo3/pull/4264)
### Changed
- Change the type of `PySliceIndices::slicelength` and the `length` parameter of `PySlice::indices()`. [#3761](https://github.com/PyO3/pyo3/pull/3761)
- Deprecate implicit default for trailing optional arguments [#4078](https://github.com/PyO3/pyo3/pull/4078)
- `Clone`ing pointers into the Python heap has been moved behind the `py-clone` feature, as it must panic without the GIL being held as a soundness fix. [#4095](https://github.com/PyO3/pyo3/pull/4095)
- Add `#[track_caller]` to all `Py<T>`, `Bound<'py, T>` and `Borrowed<'a, 'py, T>` methods which can panic. [#4098](https://github.com/PyO3/pyo3/pull/4098)
- Change `PyAnyMethods::dir` to be fallible and return `PyResult<Bound<'py, PyList>>` (and similar for `PyAny::dir`). [#4100](https://github.com/PyO3/pyo3/pull/4100)
- The global reference pool (to track pending reference count decrements) is now initialized lazily to avoid the overhead of taking a mutex upon function entry when the functionality is not actually used. [#4178](https://github.com/PyO3/pyo3/pull/4178)
- Emit error messages when using `weakref` or `dict` when compiling for `abi3` for Python older than 3.9. [#4194](https://github.com/PyO3/pyo3/pull/4194)
- Change `PyType::name` to always match Python `__name__`. [#4196](https://github.com/PyO3/pyo3/pull/4196)
- Remove CPython internal ffi call for complex number including: add, sub, mul, div, neg, abs, pow. Added PyAnyMethods::{abs, pos, neg} [#4201](https://github.com/PyO3/pyo3/pull/4201)
- Deprecate implicit integer comparision for simple enums in favor of `#[pyclass(eq_int)]`. [#4210](https://github.com/PyO3/pyo3/pull/4210)
- Set the `module=` attribute of declarative modules' child `#[pymodule]`s and `#[pyclass]`es. [#4213](https://github.com/PyO3/pyo3/pull/4213)
- Set the `module` option for complex enum variants from the value set on the complex enum `module`. [#4228](https://github.com/PyO3/pyo3/pull/4228)
- Respect the Python "limited API" when building for the `abi3` feature on PyPy or GraalPy. [#4237](https://github.com/PyO3/pyo3/pull/4237)
- Optimize code generated by `#[pyo3(get)]` on `#[pyclass]` fields. [#4254](https://github.com/PyO3/pyo3/pull/4254)
- `PyCFunction::new`, `PyCFunction::new_with_keywords` and `PyCFunction::new_closure` now take `&'static CStr` name and doc arguments (previously was `&'static str`). [#4255](https://github.com/PyO3/pyo3/pull/4255)
- The `experimental-declarative-modules` feature is now stabilized and available by default. [#4257](https://github.com/PyO3/pyo3/pull/4257)
### Fixed
- Fix panic when `PYO3_CROSS_LIB_DIR` is set to a missing path. [#4043](https://github.com/PyO3/pyo3/pull/4043)
- Fix a compile error when exporting an exception created with `create_exception!` living in a different Rust module using the `declarative-module` feature. [#4086](https://github.com/PyO3/pyo3/pull/4086)
- Fix FFI definitions of `PY_VECTORCALL_ARGUMENTS_OFFSET` and `PyVectorcall_NARGS` to fix a false-positive assertion. [#4104](https://github.com/PyO3/pyo3/pull/4104)
- Disable `PyUnicode_DATA` on PyPy: not exposed by PyPy. [#4116](https://github.com/PyO3/pyo3/pull/4116)
- Correctly handle `#[pyo3(from_py_with = ...)]` attribute on dunder (`__magic__`) method arguments instead of silently ignoring it. [#4117](https://github.com/PyO3/pyo3/pull/4117)
- Fix a compile error when declaring a standalone function or class method with a Python name that is a Rust keyword. [#4226](https://github.com/PyO3/pyo3/pull/4226)
- Fix declarative modules discarding doc comments on the `mod` node. [#4236](https://github.com/PyO3/pyo3/pull/4236)
- Fix `__dict__` attribute missing for `#[pyclass(dict)]` instances when building for `abi3` on Python 3.9. [#4251](https://github.com/PyO3/pyo3/pull/4251)
## [0.21.2] - 2024-04-16
### Changed
- Deprecate the `PySet::empty()` gil-ref constructor. [#4082](https://github.com/PyO3/pyo3/pull/4082)
### Fixed
- Fix compile error for `async fn` in `#[pymethods]` with a `&self` receiver and more than one additional argument. [#4035](https://github.com/PyO3/pyo3/pull/4035)
- Improve error message for wrong receiver type in `__traverse__`. [#4045](https://github.com/PyO3/pyo3/pull/4045)
- Fix compile error when exporting a `#[pyclass]` living in a different Rust module using the `experimental-declarative-modules` feature. [#4054](https://github.com/PyO3/pyo3/pull/4054)
- Fix `missing_docs` lint triggering on documented `#[pymodule]` functions. [#4067](https://github.com/PyO3/pyo3/pull/4067)
- Fix undefined symbol errors for extension modules on AIX (by linking `libpython`). [#4073](https://github.com/PyO3/pyo3/pull/4073)
## [0.21.1] - 2024-04-01
### Added
- Implement `Send` and `Sync` for `PyBackedStr` and `PyBackedBytes`. [#4007](https://github.com/PyO3/pyo3/pull/4007)
- Implement `Clone`, `Debug`, `PartialEq`, `Eq`, `PartialOrd`, `Ord` and `Hash` implementation for `PyBackedBytes` and `PyBackedStr`, and `Display` for `PyBackedStr`. [#4020](https://github.com/PyO3/pyo3/pull/4020)
- Add `import_exception_bound!` macro to import exception types without generating GIL Ref functionality for them. [#4027](https://github.com/PyO3/pyo3/pull/4027)
### Changed
- Emit deprecation warning for uses of GIL Refs as `#[setter]` function arguments. [#3998](https://github.com/PyO3/pyo3/pull/3998)
- Add `#[inline]` hints on many `Bound` and `Borrowed` methods. [#4024](https://github.com/PyO3/pyo3/pull/4024)
### Fixed
- Handle `#[pyo3(from_py_with = "")]` in `#[setter]` methods [#3995](https://github.com/PyO3/pyo3/pull/3995)
- Allow extraction of `&Bound` in `#[setter]` methods. [#3998](https://github.com/PyO3/pyo3/pull/3998)
- Fix some uncovered code blocks emitted by `#[pymodule]`, `#[pyfunction]` and `#[pyclass]` macros. [#4009](https://github.com/PyO3/pyo3/pull/4009)
- Fix typo in the panic message when a class referenced in `pyo3::import_exception!` does not exist. [#4012](https://github.com/PyO3/pyo3/pull/4012)
- Fix compile error when using an async `#[pymethod]` with a receiver and additional arguments. [#4015](https://github.com/PyO3/pyo3/pull/4015)
## [0.21.0] - 2024-03-25
### Added
- Add support for GraalPy (24.0 and up). [#3247](https://github.com/PyO3/pyo3/pull/3247)
- Add `PyMemoryView` type. [#3514](https://github.com/PyO3/pyo3/pull/3514)
- Allow `async fn` in for `#[pyfunction]` and `#[pymethods]`, with the `experimental-async` feature. [#3540](https://github.com/PyO3/pyo3/pull/3540) [#3588](https://github.com/PyO3/pyo3/pull/3588) [#3599](https://github.com/PyO3/pyo3/pull/3599) [#3931](https://github.com/PyO3/pyo3/pull/3931)
- Implement `PyTypeInfo` for `PyEllipsis`, `PyNone` and `PyNotImplemented`. [#3577](https://github.com/PyO3/pyo3/pull/3577)
- Support `#[pyclass]` on enums that have non-unit variants. [#3582](https://github.com/PyO3/pyo3/pull/3582)
- Support `chrono` feature with `abi3` feature. [#3664](https://github.com/PyO3/pyo3/pull/3664)
- `FromPyObject`, `IntoPy<PyObject>` and `ToPyObject` are implemented on `std::duration::Duration` [#3670](https://github.com/PyO3/pyo3/pull/3670)
- Add `PyString::to_cow`. Add `Py<PyString>::to_str`, `Py<PyString>::to_cow`, and `Py<PyString>::to_string_lossy`, as ways to access Python string data safely beyond the GIL lifetime. [#3677](https://github.com/PyO3/pyo3/pull/3677)
- Add `Bound<T>` and `Borrowed<T>` smart pointers as a new API for accessing Python objects. [#3686](https://github.com/PyO3/pyo3/pull/3686)
- Add `PyNativeType::as_borrowed` to convert "GIL refs" to the new `Bound` smart pointer. [#3692](https://github.com/PyO3/pyo3/pull/3692)
- Add `FromPyObject::extract_bound` method, to migrate `FromPyObject` implementations to the Bound API. [#3706](https://github.com/PyO3/pyo3/pull/3706)
- Add `gil-refs` feature to allow continued use of the deprecated GIL Refs APIs. [#3707](https://github.com/PyO3/pyo3/pull/3707)
- Add methods to `PyAnyMethods` for binary operators (`add`, `sub`, etc.) [#3712](https://github.com/PyO3/pyo3/pull/3712)
- Add `chrono-tz` feature allowing conversion between `chrono_tz::Tz` and `zoneinfo.ZoneInfo` [#3730](https://github.com/PyO3/pyo3/pull/3730)
- Add FFI definition `PyType_GetModuleByDef`. [#3734](https://github.com/PyO3/pyo3/pull/3734)
- Conversion between `std::time::SystemTime` and `datetime.datetime` [#3736](https://github.com/PyO3/pyo3/pull/3736)
- Add `Py::as_any` and `Py::into_any`. [#3785](https://github.com/PyO3/pyo3/pull/3785)
- Add `PyStringMethods::encode_utf8`. [#3801](https://github.com/PyO3/pyo3/pull/3801)
- Add `PyBackedStr` and `PyBackedBytes`, as alternatives to `&str` and `&bytes` where a Python object owns the data. [#3802](https://github.com/PyO3/pyo3/pull/3802) [#3991](https://github.com/PyO3/pyo3/pull/3991)
- Allow `#[pymodule]` macro on Rust `mod` blocks, with the `experimental-declarative-modules` feature. [#3815](https://github.com/PyO3/pyo3/pull/3815)
- Implement `ExactSizeIterator` for `set` and `frozenset` iterators on `abi3` feature. [#3849](https://github.com/PyO3/pyo3/pull/3849)
- Add `Py::drop_ref` to explicitly drop a `Py`` and immediately decrease the Python reference count if the GIL is already held. [#3871](https://github.com/PyO3/pyo3/pull/3871)
- Allow `#[pymodule]` macro on single argument functions that take `&Bound<'_, PyModule>`. [#3905](https://github.com/PyO3/pyo3/pull/3905)
- Implement `FromPyObject` for `Cow<str>`. [#3928](https://github.com/PyO3/pyo3/pull/3928)
- Implement `Default` for `GILOnceCell`. [#3971](https://github.com/PyO3/pyo3/pull/3971)
- Add `PyDictMethods::into_mapping`, `PyListMethods::into_sequence` and `PyTupleMethods::into_sequence`. [#3982](https://github.com/PyO3/pyo3/pull/3982)
### Changed
- `PyDict::from_sequence` now takes a single argument of type `&PyAny` (previously took two arguments `Python` and `PyObject`). [#3532](https://github.com/PyO3/pyo3/pull/3532)
- Deprecate `Py::is_ellipsis` and `PyAny::is_ellipsis` in favour of `any.is(py.Ellipsis())`. [#3577](https://github.com/PyO3/pyo3/pull/3577)
- Split some `PyTypeInfo` functionality into new traits `HasPyGilRef` and `PyTypeCheck`. [#3600](https://github.com/PyO3/pyo3/pull/3600)
- Deprecate `PyTryFrom` and `PyTryInto` traits in favor of `any.downcast()` via the `PyTypeCheck` and `PyTypeInfo` traits. [#3601](https://github.com/PyO3/pyo3/pull/3601)
- Allow async methods to accept `&self`/`&mut self` [#3609](https://github.com/PyO3/pyo3/pull/3609)
- `FromPyObject` for set types now also accept `frozenset` objects as input. [#3632](https://github.com/PyO3/pyo3/pull/3632)
- `FromPyObject` for `bool` now also accepts NumPy's `bool_` as input. [#3638](https://github.com/PyO3/pyo3/pull/3638)
- Add `AsRefSource` associated type to `PyNativeType`. [#3653](https://github.com/PyO3/pyo3/pull/3653)
- Rename `.is_true` to `.is_truthy` on `PyAny` and `Py<PyAny>` to clarify that the test is not based on identity with or equality to the True singleton. [#3657](https://github.com/PyO3/pyo3/pull/3657)
- `PyType::name` is now `PyType::qualname` whereas `PyType::name` efficiently accesses the full name which includes the module name. [#3660](https://github.com/PyO3/pyo3/pull/3660)
- The `Iter(A)NextOutput` types are now deprecated and `__(a)next__` can directly return anything which can be converted into Python objects, i.e. awaitables do not need to be wrapped into `IterANextOutput` or `Option` any more. `Option` can still be used as well and returning `None` will trigger the fast path for `__next__`, stopping iteration without having to raise a `StopIteration` exception. [#3661](https://github.com/PyO3/pyo3/pull/3661)
- Implement `FromPyObject` on `chrono::DateTime<Tz>` for all `Tz`, not just `FixedOffset` and `Utc`. [#3663](https://github.com/PyO3/pyo3/pull/3663)
- Add lifetime parameter to `PyTzInfoAccess` trait. For the deprecated gil-ref API, the trait is now implemented for `&'py PyTime` and `&'py PyDateTime` instead of `PyTime` and `PyDate`. [#3679](https://github.com/PyO3/pyo3/pull/3679)
- Calls to `__traverse__` become no-ops for unsendable pyclasses if on the wrong thread, thereby avoiding hard aborts at the cost of potential leakage. [#3689](https://github.com/PyO3/pyo3/pull/3689)
- Include `PyNativeType` in `pyo3::prelude`. [#3692](https://github.com/PyO3/pyo3/pull/3692)
- Improve performance of `extract::<i64>` (and other integer types) by avoiding call to `__index__()` converting the value to an integer for 3.10+. Gives performance improvement of around 30% for successful extraction. [#3742](https://github.com/PyO3/pyo3/pull/3742)
- Relax bound of `FromPyObject` for `Py<T>` to just `T: PyTypeCheck`. [#3776](https://github.com/PyO3/pyo3/pull/3776)
- `PySet` and `PyFrozenSet` iterators now always iterate the equivalent of `iter(set)`. (A "fast path" with no noticeable performance benefit was removed.) [#3849](https://github.com/PyO3/pyo3/pull/3849)
- Move implementations of `FromPyObject` for `&str`, `Cow<str>`, `&[u8]` and `Cow<[u8]>` onto a temporary trait `FromPyObjectBound` when `gil-refs` feature is deactivated. [#3928](https://github.com/PyO3/pyo3/pull/3928)
- Deprecate `GILPool`, `Python::with_pool`, and `Python::new_pool`. [#3947](https://github.com/PyO3/pyo3/pull/3947)
### Removed
- Remove all functionality deprecated in PyO3 0.19. [#3603](https://github.com/PyO3/pyo3/pull/3603)
### Fixed
- Match PyPy 7.3.14 in removing PyPy-only symbol `Py_MAX_NDIMS` in favour of `PyBUF_MAX_NDIM`. [#3757](https://github.com/PyO3/pyo3/pull/3757)
- Fix segmentation fault using `datetime` types when an invalid `datetime` module is on sys.path. [#3818](https://github.com/PyO3/pyo3/pull/3818)
- Fix `non_local_definitions` lint warning triggered by many PyO3 macros. [#3901](https://github.com/PyO3/pyo3/pull/3901)
- Disable `PyCode` and `PyCode_Type` on PyPy: `PyCode_Type` is not exposed by PyPy. [#3934](https://github.com/PyO3/pyo3/pull/3934)
## [0.21.0-beta.0] - 2024-03-10
Prerelease of PyO3 0.21. See [the GitHub diff](https://github.com/pyo3/pyo3/compare/v0.21.0-beta.0...v0.21.0) for what changed between 0.21.0-beta.0 and the final release.
## [0.20.3] - 2024-02-23
### Packaging
- Add `portable-atomic` dependency. [#3619](https://github.com/PyO3/pyo3/pull/3619)
- Check maximum version of Python at build time and for versions not yet supported require opt-in to the `abi3` stable ABI by the environment variable `PYO3_USE_ABI3_FORWARD_COMPATIBILITY=1`. [#3821](https://github.com/PyO3/pyo3/pull/3821)
### Fixed
- Use `portable-atomic` to support platforms without 64-bit atomics. [#3619](https://github.com/PyO3/pyo3/pull/3619)
- Fix compilation failure with `either` feature enabled without `experimental-inspect` enabled. [#3834](https://github.com/PyO3/pyo3/pull/3834)
## [0.20.2] - 2024-01-04
### Packaging
- Pin `pyo3` and `pyo3-ffi` dependencies on `pyo3-build-config` to require the same patch version, i.e. `pyo3` 0.20.2 requires _exactly_ `pyo3-build-config` 0.20.2. [#3721](https://github.com/PyO3/pyo3/pull/3721)
### Fixed
- Fix compile failure when building `pyo3` 0.20.0 with latest `pyo3-build-config` 0.20.X. [#3724](https://github.com/PyO3/pyo3/pull/3724)
- Fix docs.rs build. [#3722](https://github.com/PyO3/pyo3/pull/3722)
## [0.20.1] - 2023-12-30
### Added
- Add optional `either` feature to add conversions for `either::Either<L, R>` sum type. [#3456](https://github.com/PyO3/pyo3/pull/3456)
- Add optional `smallvec` feature to add conversions for `smallvec::SmallVec`. [#3507](https://github.com/PyO3/pyo3/pull/3507)
- Add `take` and `into_inner` methods to `GILOnceCell` [#3556](https://github.com/PyO3/pyo3/pull/3556)
- `#[classmethod]` methods can now also receive `Py<PyType>` as their first argument. [#3587](https://github.com/PyO3/pyo3/pull/3587)
- `#[pyfunction(pass_module)]` can now also receive `Py<PyModule>` as their first argument. [#3587](https://github.com/PyO3/pyo3/pull/3587)
- Add `traverse` method to `GILProtected`. [#3616](https://github.com/PyO3/pyo3/pull/3616)
- Added `abi3-py312` feature [#3687](https://github.com/PyO3/pyo3/pull/3687)
### Fixed
- Fix minimum version specification for optional `chrono` dependency. [#3512](https://github.com/PyO3/pyo3/pull/3512)
- Silenced new `clippy::unnecessary_fallible_conversions` warning when using a `Py<Self>` `self` receiver. [#3564](https://github.com/PyO3/pyo3/pull/3564)
## [0.20.0] - 2023-10-11
### Packaging
- Dual-license PyO3 under either the Apache 2.0 OR the MIT license. This makes the project GPLv2 compatible. [#3108](https://github.com/PyO3/pyo3/pull/3108)
- Update MSRV to Rust 1.56. [#3208](https://github.com/PyO3/pyo3/pull/3208)
- Bump `indoc` dependency to 2.0 and `unindent` dependency to 0.2. [#3237](https://github.com/PyO3/pyo3/pull/3237)
- Bump `syn` dependency to 2.0. [#3239](https://github.com/PyO3/pyo3/pull/3239)
- Drop support for debug builds of Python 3.7. [#3387](https://github.com/PyO3/pyo3/pull/3387)
- Bump `chrono` optional dependency to require 0.4.25 or newer. [#3427](https://github.com/PyO3/pyo3/pull/3427)
- Support Python 3.12. [#3488](https://github.com/PyO3/pyo3/pull/3488)
### Added
- Support `__lt__`, `__le__`, `__eq__`, `__ne__`, `__gt__` and `__ge__` in `#[pymethods]`. [#3203](https://github.com/PyO3/pyo3/pull/3203)
- Add FFI definition `Py_GETENV`. [#3336](https://github.com/PyO3/pyo3/pull/3336)
- Add `as_ptr` and `into_ptr` inherent methods for `Py`, `PyAny`, `PyRef`, and `PyRefMut`. [#3359](https://github.com/PyO3/pyo3/pull/3359)
- Implement `DoubleEndedIterator` for `PyTupleIterator` and `PyListIterator`. [#3366](https://github.com/PyO3/pyo3/pull/3366)
- Add `#[pyclass(rename_all = "...")]` option: this allows renaming all getters and setters of a struct, or all variants of an enum. Available renaming rules are: `"camelCase"`, `"kebab-case"`, `"lowercase"`, `"PascalCase"`, `"SCREAMING-KEBAB-CASE"`, `"SCREAMING_SNAKE_CASE"`, `"snake_case"`, `"UPPERCASE"`. [#3384](https://github.com/PyO3/pyo3/pull/3384)
- Add FFI definitions `PyObject_GC_IsTracked` and `PyObject_GC_IsFinalized` on Python 3.9 and up (PyPy 3.10 and up). [#3403](https://github.com/PyO3/pyo3/pull/3403)
- Add types for `None`, `Ellipsis`, and `NotImplemented`. [#3408](https://github.com/PyO3/pyo3/pull/3408)
- Add FFI definitions for the `Py_mod_multiple_interpreters` constant and its possible values. [#3494](https://github.com/PyO3/pyo3/pull/3494)
- Add FFI definitions for `PyInterpreterConfig` struct, its constants and `Py_NewInterpreterFromConfig`. [#3502](https://github.com/PyO3/pyo3/pull/3502)
### Changed
- Change `PySet::discard` to return `PyResult<bool>` (previously returned nothing). [#3281](https://github.com/PyO3/pyo3/pull/3281)
- Optimize implmentation of `IntoPy` for Rust tuples to Python tuples. [#3321](https://github.com/PyO3/pyo3/pull/3321)
- Change `PyDict::get_item` to no longer suppress arbitrary exceptions (the return type is now `PyResult<Option<&PyAny>>` instead of `Option<&PyAny>`), and deprecate `PyDict::get_item_with_error`. [#3330](https://github.com/PyO3/pyo3/pull/3330)
- Deprecate FFI definitions which are deprecated in Python 3.12. [#3336](https://github.com/PyO3/pyo3/pull/3336)
- `AsPyPointer` is now an `unsafe trait`. [#3358](https://github.com/PyO3/pyo3/pull/3358)
- Accept all `os.PathLike` values in implementation of `FromPyObject` for `PathBuf`. [#3374](https://github.com/PyO3/pyo3/pull/3374)
- Add `__builtins__` to globals in `py.run()` and `py.eval()` if they're missing. [#3378](https://github.com/PyO3/pyo3/pull/3378)
- Optimize implementation of `FromPyObject` for `BigInt` and `BigUint`. [#3379](https://github.com/PyO3/pyo3/pull/3379)
- `PyIterator::from_object` and `PyByteArray::from` now take a single argument of type `&PyAny` (previously took two arguments `Python` and `AsPyPointer`). [#3389](https://github.com/PyO3/pyo3/pull/3389)
- Replace `AsPyPointer` with `AsRef<PyAny>` as a bound in the blanket implementation of `From<&T> for PyObject`. [#3391](https://github.com/PyO3/pyo3/pull/3391)
- Replace blanket `impl IntoPy<PyObject> for &T where T: AsPyPointer` with implementations of `impl IntoPy<PyObject>` for `&PyAny`, `&T where T: AsRef<PyAny>`, and `&Py<T>`. [#3393](https://github.com/PyO3/pyo3/pull/3393)
- Preserve `std::io::Error` kind in implementation of `From<std::io::IntoInnerError>` for `PyErr` [#3396](https://github.com/PyO3/pyo3/pull/3396)
- Try to select a relevant `ErrorKind` in implementation of `From<PyErr>` for `OSError` subclass. [#3397](https://github.com/PyO3/pyo3/pull/3397)
- Retrieve the original `PyErr` in implementation of `From<std::io::Error>` for `PyErr` if the `std::io::Error` has been built using a Python exception (previously would create a new exception wrapping the `std::io::Error`). [#3402](https://github.com/PyO3/pyo3/pull/3402)
- `#[pymodule]` will now return the same module object on repeated import by the same Python interpreter, on Python 3.9 and up. [#3446](https://github.com/PyO3/pyo3/pull/3446)
- Truncate leap-seconds and warn when converting `chrono` types to Python `datetime` types (`datetime` cannot represent leap-seconds). [#3458](https://github.com/PyO3/pyo3/pull/3458)
- `Err` returned from `#[pyfunction]` will now have a non-None `__context__` if called from inside a `catch` block. [#3455](https://github.com/PyO3/pyo3/pull/3455)
- Deprecate undocumented `#[__new__]` form of `#[new]` attribute. [#3505](https://github.com/PyO3/pyo3/pull/3505)
### Removed
- Remove all functionality deprecated in PyO3 0.18, including `#[args]` attribute for `#[pymethods]`. [#3232](https://github.com/PyO3/pyo3/pull/3232)
- Remove `IntoPyPointer` trait in favour of `into_ptr` inherent methods. [#3385](https://github.com/PyO3/pyo3/pull/3385)
### Fixed
- Handle exceptions properly in `PySet::discard`. [#3281](https://github.com/PyO3/pyo3/pull/3281)
- The `PyTupleIterator` type returned by `PyTuple::iter` is now public and hence can be named by downstream crates. [#3366](https://github.com/PyO3/pyo3/pull/3366)
- Linking of `PyOS_FSPath` on PyPy. [#3374](https://github.com/PyO3/pyo3/pull/3374)
- Fix memory leak in `PyTypeBuilder::build`. [#3401](https://github.com/PyO3/pyo3/pull/3401)
- Disable removed FFI definitions `_Py_GetAllocatedBlocks`, `_PyObject_GC_Malloc`, and `_PyObject_GC_Calloc` on Python 3.11 and up. [#3403](https://github.com/PyO3/pyo3/pull/3403)
- Fix `ResourceWarning` and crashes related to GC when running with debug builds of CPython. [#3404](https://github.com/PyO3/pyo3/pull/3404)
- Some-wrapping of `Option<T>` default arguments will no longer re-wrap `Some(T)` or expressions evaluating to `None`. [#3461](https://github.com/PyO3/pyo3/pull/3461)
- Fix `IterNextOutput::Return` not returning a value on PyPy. [#3471](https://github.com/PyO3/pyo3/pull/3471)
- Emit compile errors instead of ignoring macro invocations inside `#[pymethods]` blocks. [#3491](https://github.com/PyO3/pyo3/pull/3491)
- Emit error on invalid arguments to `#[new]`, `#[classmethod]`, `#[staticmethod]`, and `#[classattr]`. [#3484](https://github.com/PyO3/pyo3/pull/3484)
- Disable `PyMarshal_WriteObjectToString` from `PyMarshal_ReadObjectFromString` with the `abi3` feature. [#3490](https://github.com/PyO3/pyo3/pull/3490)
- Fix FFI definitions for `_PyFrameEvalFunction` on Python 3.11 and up (it now receives a `_PyInterpreterFrame` opaque struct). [#3500](https://github.com/PyO3/pyo3/pull/3500)
## [0.19.2] - 2023-08-01
### Added
- Add FFI definitions `PyState_AddModule`, `PyState_RemoveModule` and `PyState_FindModule` for PyPy 3.9 and up. [#3295](https://github.com/PyO3/pyo3/pull/3295)
- Add FFI definitions `_PyObject_CallFunction_SizeT` and `_PyObject_CallMethod_SizeT`. [#3297](https://github.com/PyO3/pyo3/pull/3297)
- Add a "performance" section to the guide collecting performance-related tricks and problems. [#3304](https://github.com/PyO3/pyo3/pull/3304)
- Add `PyErr::Display` for all Python versions, and FFI symbol `PyErr_DisplayException` for Python 3.12. [#3334](https://github.com/PyO3/pyo3/pull/3334)
- Add FFI definition `PyType_GetDict()` for Python 3.12. [#3339](https://github.com/PyO3/pyo3/pull/3339)
- Add `PyAny::downcast_exact`. [#3346](https://github.com/PyO3/pyo3/pull/3346)
- Add `PySlice::full()` to construct a full slice (`::`). [#3353](https://github.com/PyO3/pyo3/pull/3353)
### Changed
- Update `PyErr` for 3.12 betas to avoid deprecated ffi methods. [#3306](https://github.com/PyO3/pyo3/pull/3306)
- Update FFI definitions of `object.h` for Python 3.12.0b4. [#3335](https://github.com/PyO3/pyo3/pull/3335)
- Update `pyo3::ffi` struct definitions to be compatible with 3.12.0b4. [#3342](https://github.com/PyO3/pyo3/pull/3342)
- Optimize conversion of `float` to `f64` (and `PyFloat::value`) on non-abi3 builds. [#3345](https://github.com/PyO3/pyo3/pull/3345)
### Fixed
- Fix timezone conversion bug for FixedOffset datetimes that were being incorrectly converted to and from UTC. [#3269](https://github.com/PyO3/pyo3/pull/3269)
- Fix `SystemError` raised in `PyUnicodeDecodeError_Create` on PyPy 3.10. [#3297](https://github.com/PyO3/pyo3/pull/3297)
- Correct FFI definition `Py_EnterRecursiveCall` to return `c_int` (was incorrectly returning `()`). [#3300](https://github.com/PyO3/pyo3/pull/3300)
- Fix case where `PyErr::matches` and `PyErr::is_instance` returned results inconsistent with `PyErr::get_type`. [#3313](https://github.com/PyO3/pyo3/pull/3313)
- Fix loss of panic message in `PanicException` when unwinding after the exception was "normalized". [#3326](https://github.com/PyO3/pyo3/pull/3326)
- Fix `PyErr::from_value` and `PyErr::into_value` losing traceback on conversion. [#3328](https://github.com/PyO3/pyo3/pull/3328)
- Fix reference counting of immortal objects on Python 3.12.0b4. [#3335](https://github.com/PyO3/pyo3/pull/3335)
## [0.19.1] - 2023-07-03
### Packaging
- Extend range of supported versions of `hashbrown` optional dependency to include version 0.14 [#3258](https://github.com/PyO3/pyo3/pull/3258)
- Extend range of supported versions of `indexmap` optional dependency to include version 2. [#3277](https://github.com/PyO3/pyo3/pull/3277)
- Support PyPy 3.10. [#3289](https://github.com/PyO3/pyo3/pull/3289)
### Added
- Add `pyo3::types::PyFrozenSetBuilder` to allow building a `PyFrozenSet` item by item. [#3156](https://github.com/PyO3/pyo3/pull/3156)
- Add support for converting to and from Python's `ipaddress.IPv4Address`/`ipaddress.IPv6Address` and `std::net::IpAddr`. [#3197](https://github.com/PyO3/pyo3/pull/3197)
- Add support for `num-bigint` feature in combination with `abi3`. [#3198](https://github.com/PyO3/pyo3/pull/3198)
- Add `PyErr_GetRaisedException()`, `PyErr_SetRaisedException()` to FFI definitions for Python 3.12 and later. [#3248](https://github.com/PyO3/pyo3/pull/3248)
- Add `Python::with_pool` which is a safer but more limited alternative to `Python::new_pool`. [#3263](https://github.com/PyO3/pyo3/pull/3263)
- Add `PyDict::get_item_with_error` on PyPy. [#3270](https://github.com/PyO3/pyo3/pull/3270)
- Allow `#[new]` methods may to return `Py<Self>` in order to return existing instances. [#3287](https://github.com/PyO3/pyo3/pull/3287)
### Fixed
- Fix conversion of classes implementing `__complex__` to `Complex` when using `abi3` or PyPy. [#3185](https://github.com/PyO3/pyo3/pull/3185)
- Stop suppressing unrelated exceptions in `PyAny::hasattr`. [#3271](https://github.com/PyO3/pyo3/pull/3271)
- Fix memory leak when creating `PySet` or `PyFrozenSet` or returning types converted into these internally, e.g. `HashSet` or `BTreeSet`. [#3286](https://github.com/PyO3/pyo3/pull/3286)
## [0.19.0] - 2023-05-31
### Packaging
- Correct dependency on syn to version 1.0.85 instead of the incorrect version 1.0.56. [#3152](https://github.com/PyO3/pyo3/pull/3152)
### Added
- Accept `text_signature` option (and automatically generate signature) for `#[new]` in `#[pymethods]`. [#2980](https://github.com/PyO3/pyo3/pull/2980)
- Add support for converting to and from Python's `decimal.Decimal` and `rust_decimal::Decimal`. [#3016](https://github.com/PyO3/pyo3/pull/3016)
- Add `#[pyo3(from_item_all)]` when deriving `FromPyObject` to specify `get_item` as getter for all fields. [#3120](https://github.com/PyO3/pyo3/pull/3120)
- Add `pyo3::exceptions::PyBaseExceptionGroup` for Python 3.11, and corresponding FFI definition `PyExc_BaseExceptionGroup`. [#3141](https://github.com/PyO3/pyo3/pull/3141)
- Accept `#[new]` with `#[classmethod]` to create a constructor which receives a (subtype's) class/`PyType` as its first argument. [#3157](https://github.com/PyO3/pyo3/pull/3157)
- Add `PyClass::get` and `Py::get` for GIL-indepedent access to classes with `#[pyclass(frozen)]`. [#3158](https://github.com/PyO3/pyo3/pull/3158)
- Add `PyAny::is_exact_instance` and `PyAny::is_exact_instance_of`. [#3161](https://github.com/PyO3/pyo3/pull/3161)
### Changed
- `PyAny::is_instance_of::<T>(obj)` is now equivalent to `T::is_type_of(obj)`, and now returns `bool` instead of `PyResult<bool>`. [#2881](https://github.com/PyO3/pyo3/pull/2881)
- Deprecate `text_signature` option on `#[pyclass]` structs. [#2980](https://github.com/PyO3/pyo3/pull/2980)
- No longer wrap `anyhow::Error`/`eyre::Report` containing a basic `PyErr` without a chain in a `PyRuntimeError`. [#3004](https://github.com/PyO3/pyo3/pull/3004)
- - Change `#[getter]` and `#[setter]` to use a common call "trampoline" to slightly reduce generated code size and compile times. [#3029](https://github.com/PyO3/pyo3/pull/3029)
- Improve default values for str, numbers and bool in automatically-generated `text_signature`. [#3050](https://github.com/PyO3/pyo3/pull/3050)
- Improve default value for `None` in automatically-generated `text_signature`. [#3066](https://github.com/PyO3/pyo3/pull/3066)
- Rename `PySequence::list` and `PySequence::tuple` to `PySequence::to_list` and `PySequence::to_tuple`. (The old names continue to exist as deprecated forms.) [#3111](https://github.com/PyO3/pyo3/pull/3111)
- Extend the lifetime of the GIL token returned by `PyRef::py` and `PyRefMut::py` to match the underlying borrow. [#3131](https://github.com/PyO3/pyo3/pull/3131)
- Safe access to the GIL, for example via `Python::with_gil`, is now locked inside of implementations of the `__traverse__` slot. [#3168](https://github.com/PyO3/pyo3/pull/3168)
### Removed
- Remove all functionality deprecated in PyO3 0.17, most prominently `Python::acquire_gil` is replaced by `Python::with_gil`. [#2981](https://github.com/PyO3/pyo3/pull/2981)
### Fixed
- Correct FFI definitions `PyGetSetDef`, `PyMemberDef`, `PyStructSequence_Field` and `PyStructSequence_Desc` to have `*const c_char` members for `name` and `doc` (not `*mut c_char`). [#3036](https://github.com/PyO3/pyo3/pull/3036)
- Fix panic on `fmt::Display`, instead return `"<unprintable object>"` string and report error via `sys.unraisablehook()` [#3062](https://github.com/PyO3/pyo3/pull/3062)
- Fix a compile error of "temporary value dropped while borrowed" when `#[pyfunction]`s take references into `#[pyclass]`es [#3142](https://github.com/PyO3/pyo3/pull/3142)
- Fix crashes caused by PyO3 applying deferred reference count updates when entering a `__traverse__` implementation. [#3168](https://github.com/PyO3/pyo3/pull/3168)
- Forbid running the `Drop` implementations of unsendable classes on other threads. [#3176](https://github.com/PyO3/pyo3/pull/3176)
- Fix a compile error when `#[pymethods]` items come from somewhere else (for example, as a macro argument) and a custom receiver like `Py<Self>` is used. [#3178](https://github.com/PyO3/pyo3/pull/3178)
## [0.18.3] - 2023-04-13
### Added
- Add `GILProtected<T>` to mediate concurrent access to a value using Python's global interpreter lock (GIL). [#2975](https://github.com/PyO3/pyo3/pull/2975)
- Support `PyASCIIObject` / `PyUnicode` and associated methods on big-endian architectures. [#3015](https://github.com/PyO3/pyo3/pull/3015)
- Add FFI definition `_PyDict_Contains_KnownHash()` for CPython 3.10 and up. [#3088](https://github.com/PyO3/pyo3/pull/3088)
### Fixed
- Fix compile error for `#[pymethods]` and `#[pyfunction]` called "output". [#3022](https://github.com/PyO3/pyo3/pull/3022)
- Fix compile error in generated code for magic methods implemented as a `#[staticmethod]`. [#3055](https://github.com/PyO3/pyo3/pull/3055)
- Fix `is_instance` for `PyDateTime` (would incorrectly check for a `PyDate`). [#3071](https://github.com/PyO3/pyo3/pull/3071)
- Fix upstream deprecation of `PyUnicode_InternImmortal` since Python 3.10. [#3071](https://github.com/PyO3/pyo3/pull/3087)
## [0.18.2] - 2023-03-24
### Packaging
- Disable default features of `chrono` to avoid depending on `time` v0.1.x. [#2939](https://github.com/PyO3/pyo3/pull/2939)
### Added
- Implement `IntoPy<PyObject>`, `ToPyObject` and `FromPyObject` for `Cow<[u8]>` to efficiently handle both `bytes` and `bytearray` objects. [#2899](https://github.com/PyO3/pyo3/pull/2899)
- Implement `IntoPy<PyObject>`, `ToPyObject` and `FromPyObject` for `Cell<T>`. [#3014](https://github.com/PyO3/pyo3/pull/3014)
- Add `PyList::to_tuple()`, as a convenient and efficient conversion from lists to tuples. [#3042](https://github.com/PyO3/pyo3/pull/3042)
- Add `PyTuple::to_list()`, as a convenient and efficient conversion from tuples to lists. [#3044](https://github.com/PyO3/pyo3/pull/3044)
### Changed
- Optimize `PySequence` conversion for `list` and `tuple` inputs. [#2944](https://github.com/PyO3/pyo3/pull/2944)
- Improve exception raised when creating `#[pyclass]` type object fails during module import. [#2947](https://github.com/PyO3/pyo3/pull/2947)
- Optimize `PyMapping` conversion for `dict` inputs. [#2954](https://github.com/PyO3/pyo3/pull/2954)
- Allow `create_exception!` to take a `dotted.module` to place the exception in a submodule. [#2979](https://github.com/PyO3/pyo3/pull/2979)
### Fixed
- Fix a reference counting race condition affecting `PyObject`s cloned in `allow_threads` blocks. [#2952](https://github.com/PyO3/pyo3/pull/2952)
- Fix `clippy::redundant_closure` lint on default arguments in `#[pyo3(signature = (...))]` annotations. [#2990](https://github.com/PyO3/pyo3/pull/2990)
- Fix `non_snake_case` lint on generated code in `#[pyfunction]` macro. [#2993](https://github.com/PyO3/pyo3/pull/2993)
- Fix some FFI definitions for the upcoming PyPy 3.10 release. [#3031](https://github.com/PyO3/pyo3/pull/3031)
## [0.18.1] - 2023-02-07
### Added
- Add `PyErr::write_unraisable()`. [#2889](https://github.com/PyO3/pyo3/pull/2889)
- Add `Python::Ellipsis()` and `PyAny::is_ellipsis()` methods. [#2911](https://github.com/PyO3/pyo3/pull/2911)
- Add `PyDict::update()` and `PyDict::update_if_missing()` methods. [#2912](https://github.com/PyO3/pyo3/pull/2912)
### Changed
- FFI definition `PyIter_Check` on CPython 3.7 is now implemented as `hasattr(type(obj), "__next__")`, which works correctly on all platforms and adds support for `abi3`. [#2914](https://github.com/PyO3/pyo3/pull/2914)
- Warn about unknown config keys in `PYO3_CONFIG_FILE` instead of denying. [#2926](https://github.com/PyO3/pyo3/pull/2926)
### Fixed
- Send errors returned by `__releasebuffer__` to `sys.unraisablehook` rather than causing `SystemError`. [#2886](https://github.com/PyO3/pyo3/pull/2886)
- Fix downcast to `PyIterator` succeeding for Python classes which did not implement `__next__`. [#2914](https://github.com/PyO3/pyo3/pull/2914)
- Fix segfault in `__traverse__` when visiting `None` fields of `Option<T: AsPyPointer>`. [#2921](https://github.com/PyO3/pyo3/pull/2921)
- Fix `#[pymethods(crate = "...")]` option being ignored. [#2923](https://github.com/PyO3/pyo3/pull/2923)
- Link against `pythonXY_d.dll` for debug Python builds on Windows. [#2937](https://github.com/PyO3/pyo3/pull/2937)
## [0.18.0] - 2023-01-17
### Packaging
- Relax `indexmap` optional depecency to allow `>= 1.6, < 2`. [#2849](https://github.com/PyO3/pyo3/pull/2849)
- Relax `hashbrown` optional dependency to allow `>= 0.9, < 0.14`. [#2875](https://github.com/PyO3/pyo3/pull/2875)
- Update `memoffset` dependency to 0.8. [#2875](https://github.com/PyO3/pyo3/pull/2875)
### Added
- Add `GILOnceCell::get_or_try_init` for fallible `GILOnceCell` initialization. [#2398](https://github.com/PyO3/pyo3/pull/2398)
- Add experimental feature `experimental-inspect` with `type_input()` and `type_output()` helpers to get the Python type of any Python-compatible object. [#2490](https://github.com/PyO3/pyo3/pull/2490) [#2882](https://github.com/PyO3/pyo3/pull/2882)
- The `#[pyclass]` macro can now take `get_all` and `set_all` to create getters and setters for every field. [#2692](https://github.com/PyO3/pyo3/pull/2692)
- Add `#[pyo3(signature = (...))]` option for `#[pyfunction]` and `#[pymethods]`. [#2702](https://github.com/PyO3/pyo3/pull/2702)
- `pyo3-build-config`: rebuild when `PYO3_ENVIRONMENT_SIGNATURE` environment variable value changes. [#2727](https://github.com/PyO3/pyo3/pull/2727)
- Add conversions between non-zero int types in `std::num` and Python `int`. [#2730](https://github.com/PyO3/pyo3/pull/2730)
- Add `Py::downcast()` as a companion to `PyAny::downcast()`, as well as `downcast_unchecked()` for both types. [#2734](https://github.com/PyO3/pyo3/pull/2734)
- Add types for all built-in `Warning` classes as well as `PyErr::warn_explicit`. [#2742](https://github.com/PyO3/pyo3/pull/2742)
- Add `abi3-py311` feature. [#2776](https://github.com/PyO3/pyo3/pull/2776)
- Add FFI definition `_PyErr_ChainExceptions()` for CPython. [#2788](https://github.com/PyO3/pyo3/pull/2788)
- Add FFI definitions `PyVectorcall_NARGS` and `PY_VECTORCALL_ARGUMENTS_OFFSET` for PyPy 3.8 and up. [#2811](https://github.com/PyO3/pyo3/pull/2811)
- Add `PyList::get_item_unchecked` for PyPy. [#2827](https://github.com/PyO3/pyo3/pull/2827)
### Changed
- PyO3's macros now emit a much nicer error message if function return values don't implement the required trait(s). [#2664](https://github.com/PyO3/pyo3/pull/2664)
- Use a TypeError, rather than a ValueError, when refusing to treat a str as a Vec. [#2685](https://github.com/PyO3/pyo3/pull/2685)
- Change `PyCFunction::new_closure` to take `name` and `doc` arguments. [#2686](https://github.com/PyO3/pyo3/pull/2686)
- `PyType::is_subclass`, `PyErr::is_instance` and `PyAny::is_instance` now take `&PyAny` instead of `&PyType` arguments, so that they work with objects that pretend to be types using `__subclasscheck__` and `__instancecheck__`. [#2695](https://github.com/PyO3/pyo3/pull/2695)
- Deprecate `#[args]` attribute and passing "args" specification directly to `#[pyfunction]` in favor of the new `#[pyo3(signature = (...))]` option. [#2702](https://github.com/PyO3/pyo3/pull/2702)
- Deprecate required arguments after `Option<T>` arguments to `#[pyfunction]` and `#[pymethods]` without also using `#[pyo3(signature)]` to specify whether the arguments should be required or have defaults. [#2703](https://github.com/PyO3/pyo3/pull/2703)
- Change `#[pyfunction]` and `#[pymethods]` to use a common call "trampoline" to slightly reduce generated code size and compile times. [#2705](https://github.com/PyO3/pyo3/pull/2705)
- `PyAny::cast_as()` and `Py::cast_as()` are now deprecated in favor of `PyAny::downcast()` and the new `Py::downcast()`. [#2734](https://github.com/PyO3/pyo3/pull/2734)
- Relax lifetime bounds on `PyAny::downcast()`. [#2734](https://github.com/PyO3/pyo3/pull/2734)
- Automatically generate `__text_signature__` for all Python functions created using `#[pyfunction]` and `#[pymethods]`. [#2784](https://github.com/PyO3/pyo3/pull/2784)
- Accept any iterator in `PySet::new` and `PyFrozenSet::new`. [#2795](https://github.com/PyO3/pyo3/pull/2795)
- Mixing `#[cfg(...)]` and `#[pyo3(...)]` attributes on `#[pyclass]` struct fields will now work. [#2796](https://github.com/PyO3/pyo3/pull/2796)
- Re-enable `PyFunction` on when building for abi3 or PyPy. [#2838](https://github.com/PyO3/pyo3/pull/2838)
- Improve `derive(FromPyObject)` to use `intern!` when applicable for `#[pyo3(item)]`. [#2879](https://github.com/PyO3/pyo3/pull/2879)
### Removed
- Remove the deprecated `pyproto` feature, `#[pyproto]` macro, and all accompanying APIs. [#2587](https://github.com/PyO3/pyo3/pull/2587)
- Remove all functionality deprecated in PyO3 0.16. [#2843](https://github.com/PyO3/pyo3/pull/2843)
### Fixed
- Disable `PyModule::filename` on PyPy. [#2715](https://github.com/PyO3/pyo3/pull/2715)
- `PyCodeObject` is now once again defined with fields on Python 3.7. [#2726](https://github.com/PyO3/pyo3/pull/2726)
- Raise a `TypeError` if `#[new]` pymethods with no arguments receive arguments when called from Python. [#2749](https://github.com/PyO3/pyo3/pull/2749)
- Use the `NOARGS` argument calling convention for methods that have a single `py: Python` argument (as a performance optimization). [#2760](https://github.com/PyO3/pyo3/pull/2760)
- Fix truncation of `isize` values to `c_long` in `PySlice::new`. [#2769](https://github.com/PyO3/pyo3/pull/2769)
- Fix soundness issue with FFI definition `PyUnicodeDecodeError_Create` on PyPy leading to indeterminate behavior (typically a `TypeError`). [#2772](https://github.com/PyO3/pyo3/pull/2772)
- Allow functions taking `**kwargs` to accept keyword arguments which share a name with a positional-only argument (as permitted by PEP 570). [#2800](https://github.com/PyO3/pyo3/pull/2800)
- Fix unresolved symbol for `PyObject_Vectorcall` on PyPy 3.9 and up. [#2811](https://github.com/PyO3/pyo3/pull/2811)
- Fix memory leak in `PyCFunction::new_closure`. [#2842](https://github.com/PyO3/pyo3/pull/2842)
## [0.17.3] - 2022-11-01
### Packaging
- Support Python 3.11. (Previous versions of PyO3 0.17 have been tested against Python 3.11 release candidates and are expected to be compatible, this is the first version tested against Python 3.11.0.) [#2708](https://github.com/PyO3/pyo3/pull/2708)
### Added
- Implemented `ExactSizeIterator` for `PyListIterator`, `PyDictIterator`, `PySetIterator` and `PyFrozenSetIterator`. [#2676](https://github.com/PyO3/pyo3/pull/2676)
### Fixed
- Fix regression of `impl FromPyObject for [T; N]` no longer accepting types passing `PySequence_Check`, e.g. NumPy arrays, since version 0.17.0. This the same fix that was applied `impl FromPyObject for Vec<T>` in version 0.17.1 extended to fixed-size arrays. [#2675](https://github.com/PyO3/pyo3/pull/2675)
- Fix UB in `FunctionDescription::extract_arguments_fastcall` due to creating slices from a null pointer. [#2687](https://github.com/PyO3/pyo3/pull/2687)
## [0.17.2] - 2022-10-04
### Packaging
- Added optional `chrono` feature to convert `chrono` types into types in the `datetime` module. [#2612](https://github.com/PyO3/pyo3/pull/2612)
### Added
- Add support for `num-bigint` feature on `PyPy`. [#2626](https://github.com/PyO3/pyo3/pull/2626)
### Fixed
- Correctly implement `__richcmp__` for enums, fixing `__ne__` returning always returning `True`. [#2622](https://github.com/PyO3/pyo3/pull/2622)
- Fix compile error since 0.17.0 with `Option<&SomePyClass>` argument with a default. [#2630](https://github.com/PyO3/pyo3/pull/2630)
- Fix regression of `impl FromPyObject for Vec<T>` no longer accepting types passing `PySequence_Check`, e.g. NumPy arrays, since 0.17.0. [#2631](https://github.com/PyO3/pyo3/pull/2631)
## [0.17.1] - 2022-08-28
### Fixed
- Fix visibility of `PyDictItems`, `PyDictKeys`, and `PyDictValues` types added in PyO3 0.17.0.
- Fix compile failure when using `#[pyo3(from_py_with = "...")]` attribute on an argument of type `Option<T>`. [#2592](https://github.com/PyO3/pyo3/pull/2592)
- Fix clippy `redundant-closure` lint on `**kwargs` arguments for `#[pyfunction]` and `#[pymethods]`. [#2595](https://github.com/PyO3/pyo3/pull/2595)
## [0.17.0] - 2022-08-23
### Packaging
- Update inventory dependency to `0.3` (the `multiple-pymethods` feature now requires Rust 1.62 for correctness). [#2492](https://github.com/PyO3/pyo3/pull/2492)
### Added
- Add `timezone_utc`. [#1588](https://github.com/PyO3/pyo3/pull/1588)
- Implement `ToPyObject` for `[T; N]`. [#2313](https://github.com/PyO3/pyo3/pull/2313)
- Add `PyDictKeys`, `PyDictValues` and `PyDictItems` Rust types. [#2358](https://github.com/PyO3/pyo3/pull/2358)
- Add `append_to_inittab`. [#2377](https://github.com/PyO3/pyo3/pull/2377)
- Add FFI definition `PyFrame_GetCode`. [#2406](https://github.com/PyO3/pyo3/pull/2406)
- Add `PyCode` and `PyFrame` high level objects. [#2408](https://github.com/PyO3/pyo3/pull/2408)
- Add FFI definitions `Py_fstring_input`, `sendfunc`, and `_PyErr_StackItem`. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Add `PyDateTime::new_with_fold`, `PyTime::new_with_fold`, `PyTime::get_fold`, and `PyDateTime::get_fold` for PyPy. [#2428](https://github.com/PyO3/pyo3/pull/2428)
- Add `#[pyclass(frozen)]`. [#2448](https://github.com/PyO3/pyo3/pull/2448)
- Accept `#[pyo3(name)]` on enum variants. [#2457](https://github.com/PyO3/pyo3/pull/2457)
- Add `CompareOp::matches` to implement `__richcmp__` as the result of a Rust `std::cmp::Ordering` comparison. [#2460](https://github.com/PyO3/pyo3/pull/2460)
- Add `PySuper` type. [#2486](https://github.com/PyO3/pyo3/pull/2486)
- Support PyPy on Windows with the `generate-import-lib` feature. [#2506](https://github.com/PyO3/pyo3/pull/2506)
- Add FFI definitions `Py_EnterRecursiveCall` and `Py_LeaveRecursiveCall`. [#2511](https://github.com/PyO3/pyo3/pull/2511)
- Add `PyDict::get_item_with_error`. [#2536](https://github.com/PyO3/pyo3/pull/2536)
- Add `#[pyclass(sequence)]` option. [#2567](https://github.com/PyO3/pyo3/pull/2567)
### Changed
- Change datetime constructors taking a `tzinfo` to take `Option<&PyTzInfo>` instead of `Option<&PyObject>`: `PyDateTime::new`, `PyDateTime::new_with_fold`, `PyTime::new`, and `PyTime::new_with_fold`. [#1588](https://github.com/PyO3/pyo3/pull/1588)
- Move `PyTypeObject::type_object` method to the `PyTypeInfo` trait, and deprecate the `PyTypeObject` trait. [#2287](https://github.com/PyO3/pyo3/pull/2287)
- Methods of `Py` and `PyAny` now accept `impl IntoPy<Py<PyString>>` rather than just `&str` to allow use of the `intern!` macro. [#2312](https://github.com/PyO3/pyo3/pull/2312)
- Change the deprecated `pyproto` feature to be opt-in instead of opt-out. [#2322](https://github.com/PyO3/pyo3/pull/2322)
- Emit better error messages when `#[pyfunction]` return types do not implement `IntoPy`. [#2326](https://github.com/PyO3/pyo3/pull/2326)
- Require `T: IntoPy` for `impl<T, const N: usize> IntoPy<PyObject> for [T; N]` instead of `T: ToPyObject`. [#2326](https://github.com/PyO3/pyo3/pull/2326)
- Deprecate the `ToBorrowedObject` trait. [#2333](https://github.com/PyO3/pyo3/pull/2333)
- Iterators over `PySet` and `PyDict` will now panic if the underlying collection is mutated during the iteration. [#2380](https://github.com/PyO3/pyo3/pull/2380)
- Iterators over `PySet` and `PyDict` will now panic if the underlying collection is mutated during the iteration. [#2380](https://github.com/PyO3/pyo3/pull/2380)
- Allow `#[classattr]` methods to be fallible. [#2385](https://github.com/PyO3/pyo3/pull/2385)
- Prevent multiple `#[pymethods]` with the same name for a single `#[pyclass]`. [#2399](https://github.com/PyO3/pyo3/pull/2399)
- Fixup `lib_name` when using `PYO3_CONFIG_FILE`. [#2404](https://github.com/PyO3/pyo3/pull/2404)
- Add a message to the `ValueError` raised by the `#[derive(FromPyObject)]` implementation for a tuple struct. [#2414](https://github.com/PyO3/pyo3/pull/2414)
- Allow `#[classattr]` methods to take `Python` argument. [#2456](https://github.com/PyO3/pyo3/pull/2456)
- Rework `PyCapsule` type to resolve soundness issues: [#2485](https://github.com/PyO3/pyo3/pull/2485)
- `PyCapsule::new` and `PyCapsule::new_with_destructor` now take `name: Option<CString>` instead of `&CStr`.
- The destructor `F` in `PyCapsule::new_with_destructor` must now be `Send`.
- `PyCapsule::get_context` deprecated in favor of `PyCapsule::context` which doesn't take a `py: Python<'_>` argument.
- `PyCapsule::set_context` no longer takes a `py: Python<'_>` argument.
- `PyCapsule::name` now returns `PyResult<Option<&CStr>>` instead of `&CStr`.
- `FromPyObject::extract` for `Vec<T>` no longer accepts Python `str` inputs. [#2500](https://github.com/PyO3/pyo3/pull/2500)
- Ensure each `#[pymodule]` is only initialized once. [#2523](https://github.com/PyO3/pyo3/pull/2523)
- `pyo3_build_config::add_extension_module_link_args` now also emits linker arguments for `wasm32-unknown-emscripten`. [#2538](https://github.com/PyO3/pyo3/pull/2538)
- Type checks for `PySequence` and `PyMapping` now require inputs to inherit from (or register with) `collections.abc.Sequence` and `collections.abc.Mapping` respectively. [#2477](https://github.com/PyO3/pyo3/pull/2477)
- Disable `PyFunction` on when building for abi3 or PyPy. [#2542](https://github.com/PyO3/pyo3/pull/2542)
- Deprecate `Python::acquire_gil`. [#2549](https://github.com/PyO3/pyo3/pull/2549)
### Removed
- Remove all functionality deprecated in PyO3 0.15. [#2283](https://github.com/PyO3/pyo3/pull/2283)
- Make the `Dict`, `WeakRef` and `BaseNativeType` members of the `PyClass` private implementation details. [#2572](https://github.com/PyO3/pyo3/pull/2572)
### Fixed
- Enable incorrectly disabled FFI definition `PyThreadState_DeleteCurrent`. [#2357](https://github.com/PyO3/pyo3/pull/2357)
- Fix `wrap_pymodule` interactions with name resolution rules: it no longer "sees through" glob imports of `use submodule::*` when `submodule::submodule` is a `#[pymodule]`. [#2363](https://github.com/PyO3/pyo3/pull/2363)
- Correct FFI definition `PyEval_EvalCodeEx` to take `*const *mut PyObject` array arguments instead of `*mut *mut PyObject`. [#2368](https://github.com/PyO3/pyo3/pull/2368)
- Fix "raw-ident" structs (e.g. `#[pyclass] struct r#RawName`) incorrectly having `r#` at the start of the class name created in Python. [#2395](https://github.com/PyO3/pyo3/pull/2395)
- Correct FFI definition `Py_tracefunc` to be `unsafe extern "C" fn` (was previously safe). [#2407](https://github.com/PyO3/pyo3/pull/2407)
- Fix compile failure with `#[pyo3(from_py_with = "...")]` annotations on a field in a `#[derive(FromPyObject)]` struct. [#2414](https://github.com/PyO3/pyo3/pull/2414)
- Fix FFI definitions `_PyDateTime_BaseTime` and `_PyDateTime_BaseDateTime` lacking leading underscores in their names. [#2421](https://github.com/PyO3/pyo3/pull/2421)
- Remove FFI definition `PyArena` on Python 3.10 and up. [#2421](https://github.com/PyO3/pyo3/pull/2421)
- Fix FFI definition `PyCompilerFlags` missing member `cf_feature_version` on Python 3.8 and up. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Fix FFI definition `PyAsyncMethods` missing member `am_send` on Python 3.10 and up. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Fix FFI definition `PyGenObject` having multiple incorrect members on various Python versions. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Fix FFI definition `PySyntaxErrorObject` missing members `end_lineno` and `end_offset` on Python 3.10 and up. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Fix FFI definition `PyHeapTypeObject` missing member `ht_module` on Python 3.9 and up. [#2423](https://github.com/PyO3/pyo3/pull/2423)
- Fix FFI definition `PyFrameObject` having multiple incorrect members on various Python versions. [#2424](https://github.com/PyO3/pyo3/pull/2424) [#2434](https://github.com/PyO3/pyo3/pull/2434)
- Fix FFI definition `PyTypeObject` missing deprecated field `tp_print` on Python 3.8. [#2428](https://github.com/PyO3/pyo3/pull/2428)
- Fix FFI definitions `PyDateTime_CAPI`. `PyDateTime_Date`, `PyASCIIObject`, `PyBaseExceptionObject`, `PyListObject`, and `PyTypeObject` on PyPy. [#2428](https://github.com/PyO3/pyo3/pull/2428)
- Fix FFI definition `_inittab` field `initfunc` typo'd as `initfun`. [#2431](https://github.com/PyO3/pyo3/pull/2431)
- Fix FFI definitions `_PyDateTime_BaseTime` and `_PyDateTime_BaseDateTime` incorrectly having `fold` member. [#2432](https://github.com/PyO3/pyo3/pull/2432)
- Fix FFI definitions `PyTypeObject`. `PyHeapTypeObject`, and `PyCFunctionObject` having incorrect members on PyPy 3.9. [#2433](https://github.com/PyO3/pyo3/pull/2433)
- Fix FFI definition `PyGetSetDef` to have `*const c_char` for `doc` member (not `*mut c_char`). [#2439](https://github.com/PyO3/pyo3/pull/2439)
- Fix `#[pyo3(from_py_with = "...")]` being ignored for 1-element tuple structs and transparent structs. [#2440](https://github.com/PyO3/pyo3/pull/2440)
- Use `memoffset` to avoid UB when computing `PyCell` layout. [#2450](https://github.com/PyO3/pyo3/pull/2450)
- Fix incorrect enum names being returned by the generated `repr` for enums renamed by `#[pyclass(name = "...")]` [#2457](https://github.com/PyO3/pyo3/pull/2457)
- Fix `PyObject_CallNoArgs` incorrectly being available when building for abi3 on Python 3.9. [#2476](https://github.com/PyO3/pyo3/pull/2476)
- Fix several clippy warnings generated by `#[pyfunction]` arguments. [#2503](https://github.com/PyO3/pyo3/pull/2503)
## [0.16.6] - 2022-08-23
### Changed
- Fix soundness issues with `PyCapsule` type with select workarounds. Users are encourage to upgrade to PyO3 0.17 at their earliest convenience which contains API breakages which fix the issues in a long-term fashion. [#2522](https://github.com/PyO3/pyo3/pull/2522)
- `PyCapsule::new` and `PyCapsule::new_with_destructor` now take ownership of a copy of the `name` to resolve a possible use-after-free.
- `PyCapsule::name` now returns an empty `CStr` instead of dereferencing a null pointer if the capsule has no name.
- The destructor `F` in `PyCapsule::new_with_destructor` will never be called if the capsule is deleted from a thread other than the one which the capsule was created in (a warning will be emitted).
- Panics during drop of panic payload caught by PyO3 will now abort. [#2544](https://github.com/PyO3/pyo3/pull/2544)
## [0.16.5] - 2022-05-15
### Added
- Add an experimental `generate-import-lib` feature to support auto-generating non-abi3 python import libraries for Windows targets. [#2364](https://github.com/PyO3/pyo3/pull/2364)
- Add FFI definition `Py_ExitStatusException`. [#2374](https://github.com/PyO3/pyo3/pull/2374)
### Changed
- Deprecate experimental `generate-abi3-import-lib` feature in favor of the new `generate-import-lib` feature. [#2364](https://github.com/PyO3/pyo3/pull/2364)
### Fixed
- Added missing `warn_default_encoding` field to `PyConfig` on 3.10+. The previously missing field could result in incorrect behavior or crashes. [#2370](https://github.com/PyO3/pyo3/pull/2370)
- Fixed order of `pathconfig_warnings` and `program_name` fields of `PyConfig` on 3.10+. Previously, the order of the fields was swapped and this could lead to incorrect behavior or crashes. [#2370](https://github.com/PyO3/pyo3/pull/2370)
## [0.16.4] - 2022-04-14
### Added
- Add `PyTzInfoAccess` trait for safe access to time zone information. [#2263](https://github.com/PyO3/pyo3/pull/2263)
- Add an experimental `generate-abi3-import-lib` feature to auto-generate `python3.dll` import libraries for Windows. [#2282](https://github.com/PyO3/pyo3/pull/2282)
- Add FFI definitions for `PyDateTime_BaseTime` and `PyDateTime_BaseDateTime`. [#2294](https://github.com/PyO3/pyo3/pull/2294)
### Changed
- Improved performance of failing calls to `FromPyObject::extract` which is common when functions accept multiple distinct types. [#2279](https://github.com/PyO3/pyo3/pull/2279)
- Default to "m" ABI tag when choosing `libpython` link name for CPython 3.7 on Unix. [#2288](https://github.com/PyO3/pyo3/pull/2288)
- Allow to compile "abi3" extensions without a working build host Python interpreter. [#2293](https://github.com/PyO3/pyo3/pull/2293)
### Fixed
- Crates depending on PyO3 can collect code coverage via LLVM instrumentation using stable Rust. [#2286](https://github.com/PyO3/pyo3/pull/2286)
- Fix segfault when calling FFI methods `PyDateTime_DATE_GET_TZINFO` or `PyDateTime_TIME_GET_TZINFO` on `datetime` or `time` without a tzinfo. [#2289](https://github.com/PyO3/pyo3/pull/2289)
- Fix directory names starting with the letter `n` breaking serialization of the interpreter configuration on Windows since PyO3 0.16.3. [#2299](https://github.com/PyO3/pyo3/pull/2299)
## [0.16.3] - 2022-04-05
### Packaging
- Extend `parking_lot` dependency supported versions to include 0.12. [#2239](https://github.com/PyO3/pyo3/pull/2239)
### Added
- Add methods to `pyo3_build_config::InterpreterConfig` to run Python scripts using the configured executable. [#2092](https://github.com/PyO3/pyo3/pull/2092)
- Add `as_bytes` method to `Py<PyBytes>`. [#2235](https://github.com/PyO3/pyo3/pull/2235)
- Add FFI definitions for `PyType_FromModuleAndSpec`, `PyType_GetModule`, `PyType_GetModuleState` and `PyModule_AddType`. [#2250](https://github.com/PyO3/pyo3/pull/2250)
- Add `pyo3_build_config::cross_compiling_from_to` as a helper to detect when PyO3 is cross-compiling. [#2253](https://github.com/PyO3/pyo3/pull/2253)
- Add `#[pyclass(mapping)]` option to leave sequence slots empty in container implementations. [#2265](https://github.com/PyO3/pyo3/pull/2265)
- Add `PyString::intern` to enable usage of the Python's built-in string interning. [#2268](https://github.com/PyO3/pyo3/pull/2268)
- Add `intern!` macro which can be used to amortize the cost of creating Python strings by storing them inside a `GILOnceCell`. [#2269](https://github.com/PyO3/pyo3/pull/2269)
- Add `PYO3_CROSS_PYTHON_IMPLEMENTATION` environment variable for selecting the default cross Python implementation. [#2272](https://github.com/PyO3/pyo3/pull/2272)
### Changed
- Allow `#[pyo3(crate = "...", text_signature = "...")]` options to be used directly in `#[pyclass(crate = "...", text_signature = "...")]`. [#2234](https://github.com/PyO3/pyo3/pull/2234)
- Make `PYO3_CROSS_LIB_DIR` environment variable optional when cross compiling. [#2241](https://github.com/PyO3/pyo3/pull/2241)
- Mark `METH_FASTCALL` calling convention as limited API on Python 3.10. [#2250](https://github.com/PyO3/pyo3/pull/2250)
- Deprecate `pyo3_build_config::cross_compiling` in favor of `pyo3_build_config::cross_compiling_from_to`. [#2253](https://github.com/PyO3/pyo3/pull/2253)
### Fixed
- Fix `abi3-py310` feature: use Python 3.10 ABI when available instead of silently falling back to the 3.9 ABI. [#2242](https://github.com/PyO3/pyo3/pull/2242)
- Use shared linking mode when cross compiling against a [Framework bundle](https://developer.apple.com/library/archive/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkAnatomy.html) for macOS. [#2233](https://github.com/PyO3/pyo3/pull/2233)
- Fix panic during compilation when `PYO3_CROSS_LIB_DIR` is set for some host/target combinations. [#2232](https://github.com/PyO3/pyo3/pull/2232)
- Correct dependency version for `syn` to require minimal patch version 1.0.56. [#2240](https://github.com/PyO3/pyo3/pull/2240)
## [0.16.2] - 2022-03-15
### Packaging
- Warn when modules are imported on PyPy 3.7 versions older than PyPy 7.3.8, as they are known to have binary compatibility issues. [#2217](https://github.com/PyO3/pyo3/pull/2217)
- Ensure build script of `pyo3-ffi` runs before that of `pyo3` to fix cross compilation. [#2224](https://github.com/PyO3/pyo3/pull/2224)
## [0.16.1] - 2022-03-05
### Packaging
- Extend `hashbrown` optional dependency supported versions to include 0.12. [#2197](https://github.com/PyO3/pyo3/pull/2197)
### Fixed
- Fix incorrect platform detection for Windows in `pyo3-build-config`. [#2198](https://github.com/PyO3/pyo3/pull/2198)
- Fix regression from 0.16 preventing cross compiling to aarch64 macOS. [#2201](https://github.com/PyO3/pyo3/pull/2201)
## [0.16.0] - 2022-02-27
### Packaging
- Update MSRV to Rust 1.48. [#2004](https://github.com/PyO3/pyo3/pull/2004)
- Update `indoc` optional dependency to 1.0. [#2004](https://github.com/PyO3/pyo3/pull/2004)
- Drop support for Python 3.6, remove `abi3-py36` feature. [#2006](https://github.com/PyO3/pyo3/pull/2006)
- `pyo3-build-config` no longer enables the `resolve-config` feature by default. [#2008](https://github.com/PyO3/pyo3/pull/2008)
- Update `inventory` optional dependency to 0.2. [#2019](https://github.com/PyO3/pyo3/pull/2019)
- Drop `paste` dependency. [#2081](https://github.com/PyO3/pyo3/pull/2081)
- The bindings found in `pyo3::ffi` are now a re-export of a separate `pyo3-ffi` crate. [#2126](https://github.com/PyO3/pyo3/pull/2126)
- Support PyPy 3.9. [#2143](https://github.com/PyO3/pyo3/pull/2143)
### Added
- Add `PyCapsule` type exposing the [Capsule API](https://docs.python.org/3/c-api/capsule.html#capsules). [#1980](https://github.com/PyO3/pyo3/pull/1980)
- Add `pyo3_build_config::Sysconfigdata` and supporting APIs. [#1996](https://github.com/PyO3/pyo3/pull/1996)
- Add `Py::setattr` method. [#2009](https://github.com/PyO3/pyo3/pull/2009)
- Add `#[pyo3(crate = "some::path")]` option to all attribute macros (except the deprecated `#[pyproto]`). [#2022](https://github.com/PyO3/pyo3/pull/2022)
- Enable `create_exception!` macro to take an optional docstring. [#2027](https://github.com/PyO3/pyo3/pull/2027)
- Enable `#[pyclass]` for fieldless (aka C-like) enums. [#2034](https://github.com/PyO3/pyo3/pull/2034)
- Add buffer magic methods `__getbuffer__` and `__releasebuffer__` to `#[pymethods]`. [#2067](https://github.com/PyO3/pyo3/pull/2067)
- Add support for paths in `wrap_pyfunction` and `wrap_pymodule`. [#2081](https://github.com/PyO3/pyo3/pull/2081)
- Enable `wrap_pyfunction!` to wrap a `#[pyfunction]` implemented in a different Rust module or crate. [#2091](https://github.com/PyO3/pyo3/pull/2091)
- Add `PyAny::contains` method (`in` operator for `PyAny`). [#2115](https://github.com/PyO3/pyo3/pull/2115)
- Add `PyMapping::contains` method (`in` operator for `PyMapping`). [#2133](https://github.com/PyO3/pyo3/pull/2133)
- Add garbage collection magic magic methods `__traverse__` and `__clear__` to `#[pymethods]`. [#2159](https://github.com/PyO3/pyo3/pull/2159)
- Add support for `from_py_with` on struct tuples and enums to override the default from-Python conversion. [#2181](https://github.com/PyO3/pyo3/pull/2181)
- Add `eq`, `ne`, `lt`, `le`, `gt`, `ge` methods to `PyAny` that wrap `rich_compare`. [#2175](https://github.com/PyO3/pyo3/pull/2175)
- Add `Py::is` and `PyAny::is` methods to check for object identity. [#2183](https://github.com/PyO3/pyo3/pull/2183)
- Add support for the `__getattribute__` magic method. [#2187](https://github.com/PyO3/pyo3/pull/2187)
### Changed
- `PyType::is_subclass`, `PyErr::is_instance` and `PyAny::is_instance` now operate run-time type object instead of a type known at compile-time. The old behavior is still available as `PyType::is_subclass_of`, `PyErr::is_instance_of` and `PyAny::is_instance_of`. [#1985](https://github.com/PyO3/pyo3/pull/1985)
- Rename some methods on `PyErr` (the old names are just marked deprecated for now): [#2026](https://github.com/PyO3/pyo3/pull/2026)
- `pytype` -> `get_type`
- `pvalue` -> `value` (and deprecate equivalent `instance`)
- `ptraceback` -> `traceback`
- `from_instance` -> `from_value`
- `into_instance` -> `into_value`
- `PyErr::new_type` now takes an optional docstring and now returns `PyResult<Py<PyType>>` rather than a `ffi::PyTypeObject` pointer. [#2027](https://github.com/PyO3/pyo3/pull/2027)
- Deprecate `PyType::is_instance`; it is inconsistent with other `is_instance` methods in PyO3. Instead of `typ.is_instance(obj)`, use `obj.is_instance(typ)`. [#2031](https://github.com/PyO3/pyo3/pull/2031)
- `__getitem__`, `__setitem__` and `__delitem__` in `#[pymethods]` now implement both a Python mapping and sequence by default. [#2065](https://github.com/PyO3/pyo3/pull/2065)
- Improve performance and error messages for `#[derive(FromPyObject)]` for enums. [#2068](https://github.com/PyO3/pyo3/pull/2068)
- Reduce generated LLVM code size (to improve compile times) for:
- internal `handle_panic` helper [#2074](https://github.com/PyO3/pyo3/pull/2074) [#2158](https://github.com/PyO3/pyo3/pull/2158)
- `#[pyfunction]` and `#[pymethods]` argument extraction [#2075](https://github.com/PyO3/pyo3/pull/2075) [#2085](https://github.com/PyO3/pyo3/pull/2085)
- `#[pyclass]` type object creation [#2076](https://github.com/PyO3/pyo3/pull/2076) [#2081](https://github.com/PyO3/pyo3/pull/2081) [#2157](https://github.com/PyO3/pyo3/pull/2157)
- Respect Rust privacy rules for items wrapped with `wrap_pyfunction` and `wrap_pymodule`. [#2081](https://github.com/PyO3/pyo3/pull/2081)
- Add modulo argument to `__ipow__` magic method. [#2083](https://github.com/PyO3/pyo3/pull/2083)
- Fix FFI definition for `_PyCFunctionFast`. [#2126](https://github.com/PyO3/pyo3/pull/2126)
- `PyDateTimeAPI` and `PyDateTime_TimeZone_UTC` are now unsafe functions instead of statics. [#2126](https://github.com/PyO3/pyo3/pull/2126)
- `PyDateTimeAPI` does not implicitly call `PyDateTime_IMPORT` anymore to reflect the original Python API more closely. Before the first call to `PyDateTime_IMPORT` a null pointer is returned. Therefore before calling any of the following FFI functions `PyDateTime_IMPORT` must be called to avoid undefined behavior: [#2126](https://github.com/PyO3/pyo3/pull/2126)
- `PyDateTime_TimeZone_UTC`
- `PyDate_Check`
- `PyDate_CheckExact`
- `PyDateTime_Check`
- `PyDateTime_CheckExact`
- `PyTime_Check`
- `PyTime_CheckExact`
- `PyDelta_Check`
- `PyDelta_CheckExact`
- `PyTZInfo_Check`
- `PyTZInfo_CheckExact`
- `PyDateTime_FromTimestamp`
- `PyDate_FromTimestamp`
- Deprecate the `gc` option for `pyclass` (e.g. `#[pyclass(gc)]`). Just implement a `__traverse__` `#[pymethod]`. [#2159](https://github.com/PyO3/pyo3/pull/2159)
- The `ml_meth` field of `PyMethodDef` is now represented by the `PyMethodDefPointer` union. [2166](https://github.com/PyO3/pyo3/pull/2166)
- Deprecate the `#[pyproto]` traits. [#2173](https://github.com/PyO3/pyo3/pull/2173)
### Removed
- Remove all functionality deprecated in PyO3 0.14. [#2007](https://github.com/PyO3/pyo3/pull/2007)
- Remove `Default` impl for `PyMethodDef`. [#2166](https://github.com/PyO3/pyo3/pull/2166)
- Remove `PartialEq` impl for `Py` and `PyAny` (use the new `is` instead). [#2183](https://github.com/PyO3/pyo3/pull/2183)
### Fixed
- Fix undefined symbol for `PyObject_HasAttr` on PyPy. [#2025](https://github.com/PyO3/pyo3/pull/2025)
- Fix memory leak in `PyErr::into_value`. [#2026](https://github.com/PyO3/pyo3/pull/2026)
- Fix clippy warning `needless-option-as-deref` in code generated by `#[pyfunction]` and `#[pymethods]`. [#2040](https://github.com/PyO3/pyo3/pull/2040)
- Fix undefined behavior in `PySlice::indices`. [#2061](https://github.com/PyO3/pyo3/pull/2061)
- Fix the `wrap_pymodule!` macro using the wrong name for a `#[pymodule]` with a `#[pyo3(name = "..")]` attribute. [#2081](https://github.com/PyO3/pyo3/pull/2081)
- Fix magic methods in `#[pymethods]` accepting implementations with the wrong number of arguments. [#2083](https://github.com/PyO3/pyo3/pull/2083)
- Fix panic in `#[pyfunction]` generated code when a required argument following an `Option` was not provided. [#2093](https://github.com/PyO3/pyo3/pull/2093)
- Fixed undefined behavior caused by incorrect `ExactSizeIterator` implementations. [#2124](https://github.com/PyO3/pyo3/pull/2124)
- Fix missing FFI definition `PyCMethod_New` on Python 3.9 and up. [#2143](https://github.com/PyO3/pyo3/pull/2143)
- Add missing FFI definitions `_PyLong_NumBits` and `_PyLong_AsByteArray` on PyPy. [#2146](https://github.com/PyO3/pyo3/pull/2146)
- Fix memory leak in implementation of `AsPyPointer` for `Option<T>`. [#2160](https://github.com/PyO3/pyo3/pull/2160)
- Fix FFI definition of `_PyLong_NumBits` to return `size_t` instead of `c_int`. [#2161](https://github.com/PyO3/pyo3/pull/2161)
- Fix `TypeError` thrown when argument parsing failed missing the originating causes. [2177](https://github.com/PyO3/pyo3/pull/2178)
## [0.15.2] - 2022-04-14
### Packaging
- Backport of PyPy 3.9 support from PyO3 0.16. [#2262](https://github.com/PyO3/pyo3/pull/2262)
## [0.15.1] - 2021-11-19
### Added
- Add implementations for `Py::as_ref` and `Py::into_ref` for `Py<PySequence>`, `Py<PyIterator>` and `Py<PyMapping>`. [#1682](https://github.com/PyO3/pyo3/pull/1682)
- Add `PyTraceback` type to represent and format Python tracebacks. [#1977](https://github.com/PyO3/pyo3/pull/1977)
### Changed
- `#[classattr]` constants with a known magic method name (which is lowercase) no longer trigger lint warnings expecting constants to be uppercase. [#1969](https://github.com/PyO3/pyo3/pull/1969)
### Fixed
- Fix creating `#[classattr]` by functions with the name of a known magic method. [#1969](https://github.com/PyO3/pyo3/pull/1969)
- Fix use of `catch_unwind` in `allow_threads` which can cause fatal crashes. [#1989](https://github.com/PyO3/pyo3/pull/1989)
- Fix build failure on PyPy when abi3 features are activated. [#1991](https://github.com/PyO3/pyo3/pull/1991)
- Fix mingw platform detection. [#1993](https://github.com/PyO3/pyo3/pull/1993)
- Fix panic in `__get__` implementation when accessing descriptor on type object. [#1997](https://github.com/PyO3/pyo3/pull/1997)
## [0.15.0] - 2021-11-03
### Packaging
- `pyo3`'s `Cargo.toml` now advertises `links = "python"` to inform Cargo that it links against *libpython*. [#1819](https://github.com/PyO3/pyo3/pull/1819)
- Added optional `anyhow` feature to convert `anyhow::Error` into `PyErr`. [#1822](https://github.com/PyO3/pyo3/pull/1822)
- Support Python 3.10. [#1889](https://github.com/PyO3/pyo3/pull/1889)
- Added optional `eyre` feature to convert `eyre::Report` into `PyErr`. [#1893](https://github.com/PyO3/pyo3/pull/1893)
- Support PyPy 3.8. [#1948](https://github.com/PyO3/pyo3/pull/1948)
### Added
- Add `PyList::get_item_unchecked` and `PyTuple::get_item_unchecked` to get items without bounds checks. [#1733](https://github.com/PyO3/pyo3/pull/1733)
- Support `#[doc = include_str!(...)]` attributes on Rust 1.54 and up. [#1746](https://github.com/PyO3/pyo3/issues/1746)
- Add `PyAny::py` as a convenience for `PyNativeType::py`. [#1751](https://github.com/PyO3/pyo3/pull/1751)
- Add implementation of `std::ops::Index<usize>` for `PyList`, `PyTuple` and `PySequence`. [#1825](https://github.com/PyO3/pyo3/pull/1825)
- Add range indexing implementations of `std::ops::Index` for `PyList`, `PyTuple` and `PySequence`. [#1829](https://github.com/PyO3/pyo3/pull/1829)
- Add `PyMapping` type to represent the Python mapping protocol. [#1844](https://github.com/PyO3/pyo3/pull/1844)
- Add commonly-used sequence methods to `PyList` and `PyTuple`. [#1849](https://github.com/PyO3/pyo3/pull/1849)
- Add `as_sequence` methods to `PyList` and `PyTuple`. [#1860](https://github.com/PyO3/pyo3/pull/1860)
- Add support for magic methods in `#[pymethods]`, intended as a replacement for `#[pyproto]`. [#1864](https://github.com/PyO3/pyo3/pull/1864)
- Add `abi3-py310` feature. [#1889](https://github.com/PyO3/pyo3/pull/1889)
- Add `PyCFunction::new_closure` to create a Python function from a Rust closure. [#1901](https://github.com/PyO3/pyo3/pull/1901)
- Add support for positional-only arguments in `#[pyfunction]`. [#1925](https://github.com/PyO3/pyo3/pull/1925)
- Add `PyErr::take` to attempt to fetch a Python exception if present. [#1957](https://github.com/PyO3/pyo3/pull/1957)
### Changed
- `PyList`, `PyTuple` and `PySequence`'s APIs now accepts only `usize` indices instead of `isize`.
[#1733](https://github.com/PyO3/pyo3/pull/1733), [#1802](https://github.com/PyO3/pyo3/pull/1802),
[#1803](https://github.com/PyO3/pyo3/pull/1803)
- `PyList::get_item` and `PyTuple::get_item` now return `PyResult<&PyAny>` instead of panicking. [#1733](https://github.com/PyO3/pyo3/pull/1733)
- `PySequence::in_place_repeat` and `PySequence::in_place_concat` now return `PyResult<&PySequence>` instead of `PyResult<()>`, which is needed in case of immutable sequences such as tuples. [#1803](https://github.com/PyO3/pyo3/pull/1803)
- `PySequence::get_slice` now returns `PyResult<&PySequence>` instead of `PyResult<&PyAny>`. [#1829](https://github.com/PyO3/pyo3/pull/1829)
- Deprecate `PyTuple::split_from`. [#1804](https://github.com/PyO3/pyo3/pull/1804)
- Deprecate `PyTuple::slice`, new method `PyTuple::get_slice` added with `usize` indices. [#1828](https://github.com/PyO3/pyo3/pull/1828)
- Deprecate FFI definitions `PyParser_SimpleParseStringFlags`, `PyParser_SimpleParseStringFlagsFilename`, `PyParser_SimpleParseFileFlags` when building for Python 3.9. [#1830](https://github.com/PyO3/pyo3/pull/1830)
- Mark FFI definitions removed in Python 3.10 `PyParser_ASTFromString`, `PyParser_ASTFromStringObject`, `PyParser_ASTFromFile`, `PyParser_ASTFromFileObject`, `PyParser_SimpleParseStringFlags`, `PyParser_SimpleParseStringFlagsFilename`, `PyParser_SimpleParseFileFlags`, `PyParser_SimpleParseString`, `PyParser_SimpleParseFile`, `Py_SymtableString`, and `Py_SymtableStringObject`. [#1830](https://github.com/PyO3/pyo3/pull/1830)
- `#[pymethods]` now handles magic methods similarly to `#[pyproto]`. In the future, `#[pyproto]` may be deprecated. [#1864](https://github.com/PyO3/pyo3/pull/1864)
- Deprecate FFI definitions `PySys_AddWarnOption`, `PySys_AddWarnOptionUnicode` and `PySys_HasWarnOptions`. [#1887](https://github.com/PyO3/pyo3/pull/1887)
- Deprecate `#[call]` attribute in favor of using `fn __call__`. [#1929](https://github.com/PyO3/pyo3/pull/1929)
- Fix missing FFI definition `_PyImport_FindExtensionObject` on Python 3.10. [#1942](https://github.com/PyO3/pyo3/pull/1942)
- Change `PyErr::fetch` to panic in debug mode if no exception is present. [#1957](https://github.com/PyO3/pyo3/pull/1957)
### Fixed
- Fix building with a conda environment on Windows. [#1873](https://github.com/PyO3/pyo3/pull/1873)
- Fix panic on Python 3.6 when calling `Python::with_gil` with Python initialized but threading not initialized. [#1874](https://github.com/PyO3/pyo3/pull/1874)
- Fix incorrect linking to version-specific DLL instead of `python3.dll` when cross-compiling to Windows with `abi3`. [#1880](https://github.com/PyO3/pyo3/pull/1880)
- Fix FFI definition for `PyTuple_ClearFreeList` incorrectly being present for Python 3.9 and up. [#1887](https://github.com/PyO3/pyo3/pull/1887)
- Fix panic in generated `#[derive(FromPyObject)]` for enums. [#1888](https://github.com/PyO3/pyo3/pull/1888)
- Fix cross-compiling to Python 3.7 builds with the "m" abi flag. [#1908](https://github.com/PyO3/pyo3/pull/1908)
- Fix `__mod__` magic method fallback to `__rmod__`. [#1934](https://github.com/PyO3/pyo3/pull/1934).
- Fix missing FFI definition `_PyImport_FindExtensionObject` on Python 3.10. [#1942](https://github.com/PyO3/pyo3/pull/1942)
## [0.14.5] - 2021-09-05
### Added
- Make `pyo3_build_config::InterpreterConfig` and subfields public. [#1848](https://github.com/PyO3/pyo3/pull/1848)
- Add `resolve-config` feature to the `pyo3-build-config` to control whether its build script does anything. [#1856](https://github.com/PyO3/pyo3/pull/1856)
### Fixed
- Fix 0.14.4 compile regression on `s390x-unknown-linux-gnu` target. [#1850](https://github.com/PyO3/pyo3/pull/1850)
## [0.14.4] - 2021-08-29
### Changed
- Mark `PyString::data` as `unsafe` and disable it and some supporting PyUnicode FFI APIs (which depend on a C bitfield) on big-endian targets. [#1834](https://github.com/PyO3/pyo3/pull/1834)
## [0.14.3] - 2021-08-22
### Added
- Add `PyString::data` to access the raw bytes stored in a Python string. [#1794](https://github.com/PyO3/pyo3/pull/1794)
### Fixed
- Raise `AttributeError` to avoid panic when calling `del` on a `#[setter]` defined class property. [#1779](https://github.com/PyO3/pyo3/pull/1779)
- Restrict FFI definitions `PyGILState_Check` and `Py_tracefunc` to the unlimited API. [#1787](https://github.com/PyO3/pyo3/pull/1787)
- Add missing `_type` field to `PyStatus` struct definition. [#1791](https://github.com/PyO3/pyo3/pull/1791)
- Reduce lower bound `num-complex` optional dependency to support interop with `rust-numpy` and `ndarray` when building with the MSRV of 1.41 [#1799](https://github.com/PyO3/pyo3/pull/1799)
- Fix memory leak in `Python::run_code`. [#1806](https://github.com/PyO3/pyo3/pull/1806)
- Fix memory leak in `PyModule::from_code`. [#1810](https://github.com/PyO3/pyo3/pull/1810)
- Remove use of `pyo3::` in `pyo3::types::datetime` which broke builds using `-Z avoid-dev-deps` [#1811](https://github.com/PyO3/pyo3/pull/1811)
## [0.14.2] - 2021-08-09
### Added
- Add `indexmap` feature to add `ToPyObject`, `IntoPy` and `FromPyObject` implementations for `indexmap::IndexMap`. [#1728](https://github.com/PyO3/pyo3/pull/1728)
- Add `pyo3_build_config::add_extension_module_link_args` to use in build scripts to set linker arguments (for macOS). [#1755](https://github.com/PyO3/pyo3/pull/1755)
- Add `Python::with_gil_unchecked` unsafe variation of `Python::with_gil` to allow obtaining a `Python` in scenarios where `Python::with_gil` would fail. [#1769](https://github.com/PyO3/pyo3/pull/1769)
### Changed
- `PyErr::new` no longer acquires the Python GIL internally. [#1724](https://github.com/PyO3/pyo3/pull/1724)
- Reverted PyO3 0.14.0's use of `cargo:rustc-cdylib-link-arg` in its build script, as Cargo unintentionally allowed crates to pass linker args to downstream crates in this way. Projects supporting macOS may need to restore `.cargo/config.toml` files. [#1755](https://github.com/PyO3/pyo3/pull/1755)
### Fixed
- Fix regression in 0.14.0 rejecting usage of `#[doc(hidden)]` on structs and functions annotated with PyO3 macros. [#1722](https://github.com/PyO3/pyo3/pull/1722)
- Fix regression in 0.14.0 leading to incorrect code coverage being computed for `#[pyfunction]`s. [#1726](https://github.com/PyO3/pyo3/pull/1726)
- Fix incorrect FFI definition of `Py_Buffer` on PyPy. [#1737](https://github.com/PyO3/pyo3/pull/1737)
- Fix incorrect calculation of `dictoffset` on 32-bit Windows. [#1475](https://github.com/PyO3/pyo3/pull/1475)
- Fix regression in 0.13.2 leading to linking to incorrect Python library on Windows "gnu" targets. [#1759](https://github.com/PyO3/pyo3/pull/1759)
- Fix compiler warning: deny trailing semicolons in expression macro. [#1762](https://github.com/PyO3/pyo3/pull/1762)
- Fix incorrect FFI definition of `Py_DecodeLocale`. The 2nd argument is now `*mut Py_ssize_t` instead of `Py_ssize_t`. [#1766](https://github.com/PyO3/pyo3/pull/1766)
## [0.14.1] - 2021-07-04
### Added
- Implement `IntoPy<PyObject>` for `&PathBuf` and `&OsString`. [#1712](https://github.com/PyO3/pyo3/pull/1712)
### Fixed
- Fix crashes on PyPy due to incorrect definitions of `PyList_SET_ITEM`. [#1713](https://github.com/PyO3/pyo3/pull/1713)
## [0.14.0] - 2021-07-03
### Packaging
- Update `num-bigint` optional dependency to 0.4. [#1481](https://github.com/PyO3/pyo3/pull/1481)
- Update `num-complex` optional dependency to 0.4. [#1482](https://github.com/PyO3/pyo3/pull/1482)
- Extend `hashbrown` optional dependency supported versions to include 0.11. [#1496](https://github.com/PyO3/pyo3/pull/1496)
- Support PyPy 3.7. [#1538](https://github.com/PyO3/pyo3/pull/1538)
### Added
- Extend conversions for `[T; N]` to all `N` using const generics (on Rust 1.51 and up). [#1128](https://github.com/PyO3/pyo3/pull/1128)
- Add conversions between `OsStr`/ `OsString` and Python strings. [#1379](https://github.com/PyO3/pyo3/pull/1379)
- Add conversions between `Path`/ `PathBuf` and Python strings (and `pathlib.Path` objects). [#1379](https://github.com/PyO3/pyo3/pull/1379) [#1654](https://github.com/PyO3/pyo3/pull/1654)
- Add a new set of `#[pyo3(...)]` attributes to control various PyO3 macro functionality:
- `#[pyo3(from_py_with = "...")]` function arguments and struct fields to override the default from-Python conversion. [#1411](https://github.com/PyO3/pyo3/pull/1411)
- `#[pyo3(name = "...")]` for setting Python names. [#1567](https://github.com/PyO3/pyo3/pull/1567)
- `#[pyo3(text_signature = "...")]` for setting text signature. [#1658](https://github.com/PyO3/pyo3/pull/1658)
- Add FFI definition `PyCFunction_CheckExact` for Python 3.9 and later. [#1425](https://github.com/PyO3/pyo3/pull/1425)
- Add FFI definition `Py_IS_TYPE`. [#1429](https://github.com/PyO3/pyo3/pull/1429)
- Add FFI definition `_Py_InitializeMain`. [#1473](https://github.com/PyO3/pyo3/pull/1473)
- Add FFI definitions from `cpython/import.h`.[#1475](https://github.com/PyO3/pyo3/pull/1475)
- Add tuple and unit struct support for `#[pyclass]` macro. [#1504](https://github.com/PyO3/pyo3/pull/1504)
- Add FFI definition `PyDateTime_TimeZone_UTC`. [#1572](https://github.com/PyO3/pyo3/pull/1572)
- Add support for `#[pyclass(extends=Exception)]`. [#1591](https://github.com/PyO3/pyo3/pull/1591)
- Add `PyErr::cause` and `PyErr::set_cause`. [#1679](https://github.com/PyO3/pyo3/pull/1679)
- Add FFI definitions from `cpython/pystate.h`. [#1687](https://github.com/PyO3/pyo3/pull/1687/)
- Add `wrap_pyfunction!` macro to `pyo3::prelude`. [#1695](https://github.com/PyO3/pyo3/pull/1695)
### Changed
- Allow only one `#[pymethods]` block per `#[pyclass]` by default, to remove the dependency on `inventory`. Add a `multiple-pymethods` feature to opt-in the original behavior and dependency on `inventory`. [#1457](https://github.com/PyO3/pyo3/pull/1457)
- Change `PyTimeAccess::get_fold` to return a `bool` instead of a `u8`. [#1397](https://github.com/PyO3/pyo3/pull/1397)
- Deprecate FFI definition `PyCFunction_Call` for Python 3.9 and up. [#1425](https://github.com/PyO3/pyo3/pull/1425)
- Deprecate FFI definition `PyModule_GetFilename`. [#1425](https://github.com/PyO3/pyo3/pull/1425)
- The `auto-initialize` feature is no longer enabled by default. [#1443](https://github.com/PyO3/pyo3/pull/1443)
- Change `PyCFunction::new` and `PyCFunction::new_with_keywords` to take `&'static str` arguments rather than implicitly copying (and leaking) them. [#1450](https://github.com/PyO3/pyo3/pull/1450)
- Deprecate `PyModule::call`, `PyModule::call0`, `PyModule::call1` and `PyModule::get`. [#1492](https://github.com/PyO3/pyo3/pull/1492)
- Add length information to `PyBufferError`s raised from `PyBuffer::copy_to_slice` and `PyBuffer::copy_from_slice`. [#1534](https://github.com/PyO3/pyo3/pull/1534)
- Automatically set `-undefined` and `dynamic_lookup` linker arguments on macOS with the `extension-module` feature. [#1539](https://github.com/PyO3/pyo3/pull/1539)
- Deprecate `#[pyproto]` methods which are easier to implement as `#[pymethods]`: [#1560](https://github.com/PyO3/pyo3/pull/1560)
- `PyBasicProtocol::__bytes__` and `PyBasicProtocol::__format__`
- `PyContextProtocol::__enter__` and `PyContextProtocol::__exit__`
- `PyDescrProtocol::__delete__` and `PyDescrProtocol::__set_name__`
- `PyMappingProtocol::__reversed__`
- `PyNumberProtocol::__complex__` and `PyNumberProtocol::__round__`
- `PyAsyncProtocol::__aenter__` and `PyAsyncProtocol::__aexit__`
- Deprecate several attributes in favor of the new `#[pyo3(...)]` options:
- `#[name = "..."]`, replaced by `#[pyo3(name = "...")]` [#1567](https://github.com/PyO3/pyo3/pull/1567)
- `#[pyfn(m, "name")]`, replaced by `#[pyfn(m)] #[pyo3(name = "...")]`. [#1610](https://github.com/PyO3/pyo3/pull/1610)
- `#[pymodule(name)]`, replaced by `#[pymodule] #[pyo3(name = "...")]` [#1650](https://github.com/PyO3/pyo3/pull/1650)
- `#[text_signature = "..."]`, replaced by `#[pyo3(text_signature = "...")]`. [#1658](https://github.com/PyO3/pyo3/pull/1658)
- Reduce LLVM line counts to improve compilation times. [#1604](https://github.com/PyO3/pyo3/pull/1604)
- No longer call `PyEval_InitThreads` in `#[pymodule]` init code. [#1630](https://github.com/PyO3/pyo3/pull/1630)
- Use `METH_FASTCALL` argument passing convention, when possible, to improve `#[pyfunction]` and method performance.
[#1619](https://github.com/PyO3/pyo3/pull/1619), [#1660](https://github.com/PyO3/pyo3/pull/1660)
- Filter sysconfigdata candidates by architecture when cross-compiling. [#1626](https://github.com/PyO3/pyo3/pull/1626)
### Removed
- Remove deprecated exception names `BaseException` etc. [#1426](https://github.com/PyO3/pyo3/pull/1426)
- Remove deprecated methods `Python::is_instance`, `Python::is_subclass`, `Python::release`, `Python::xdecref`, and `Py::from_owned_ptr_or_panic`. [#1426](https://github.com/PyO3/pyo3/pull/1426)
- Remove many FFI definitions which never existed in the Python C-API:
- (previously deprecated) `PyGetSetDef_INIT`, `PyGetSetDef_DICT`, `PyCoro_Check`, `PyCoroWrapper_Check`, and `PyAsyncGen_Check` [#1426](https://github.com/PyO3/pyo3/pull/1426)
- `PyMethodDef_INIT` [#1426](https://github.com/PyO3/pyo3/pull/1426)
- `PyTypeObject_INIT` [#1429](https://github.com/PyO3/pyo3/pull/1429)
- `PyObject_Check`, `PySuper_Check`, and `FreeFunc` [#1438](https://github.com/PyO3/pyo3/pull/1438)
- `PyModuleDef_INIT` [#1630](https://github.com/PyO3/pyo3/pull/1630)
- Remove pyclass implementation details from `PyTypeInfo`:
- `Type`, `DESCRIPTION`, and `FLAGS` [#1456](https://github.com/PyO3/pyo3/pull/1456)
- `BaseType`, `BaseLayout`, `Layout`, `Initializer` [#1596](https://github.com/PyO3/pyo3/pull/1596)
- Remove `PYO3_CROSS_INCLUDE_DIR` environment variable and the associated C header parsing functionality. [#1521](https://github.com/PyO3/pyo3/pull/1521)
- Remove `raw_pycfunction!` macro. [#1619](https://github.com/PyO3/pyo3/pull/1619)
- Remove `PyClassAlloc` trait. [#1657](https://github.com/PyO3/pyo3/pull/1657)
- Remove `PyList::get_parked_item`. [#1664](https://github.com/PyO3/pyo3/pull/1664)
### Fixed
- Remove FFI definition `PyCFunction_ClearFreeList` for Python 3.9 and later. [#1425](https://github.com/PyO3/pyo3/pull/1425)
- `PYO3_CROSS_LIB_DIR` environment variable no long required when compiling for x86-64 Python from macOS arm64 and reverse. [#1428](https://github.com/PyO3/pyo3/pull/1428)
- Fix FFI definition `_PyEval_RequestCodeExtraIndex`, which took an argument of the wrong type. [#1429](https://github.com/PyO3/pyo3/pull/1429)
- Fix FFI definition `PyIndex_Check` missing with the `abi3` feature. [#1436](https://github.com/PyO3/pyo3/pull/1436)
- Fix incorrect `TypeError` raised when keyword-only argument passed along with a positional argument in `*args`. [#1440](https://github.com/PyO3/pyo3/pull/1440)
- Fix inability to use a named lifetime for `&PyTuple` of `*args` in `#[pyfunction]`. [#1440](https://github.com/PyO3/pyo3/pull/1440)
- Fix use of Python argument for `#[pymethods]` inside macro expansions. [#1505](https://github.com/PyO3/pyo3/pull/1505)
- No longer include `__doc__` in `__all__` generated for `#[pymodule]`. [#1509](https://github.com/PyO3/pyo3/pull/1509)
- Always use cross-compiling configuration if any of the `PYO3_CROSS` family of environment variables are set. [#1514](https://github.com/PyO3/pyo3/pull/1514)
- Support `EnvironmentError`, `IOError`, and `WindowsError` on PyPy. [#1533](https://github.com/PyO3/pyo3/pull/1533)
- Fix unnecessary rebuilds when cycling between `cargo check` and `cargo clippy` in a Python virtualenv. [#1557](https://github.com/PyO3/pyo3/pull/1557)
- Fix segfault when dereferencing `ffi::PyDateTimeAPI` without the GIL. [#1563](https://github.com/PyO3/pyo3/pull/1563)
- Fix memory leak in `FromPyObject` implementations for `u128` and `i128`. [#1638](https://github.com/PyO3/pyo3/pull/1638)
- Fix `#[pyclass(extends=PyDict)]` leaking the dict contents on drop. [#1657](https://github.com/PyO3/pyo3/pull/1657)
- Fix segfault when calling `PyList::get_item` with negative indices. [#1668](https://github.com/PyO3/pyo3/pull/1668)
- Fix FFI definitions of `PyEval_SetProfile`/`PyEval_SetTrace` to take `Option<Py_tracefunc>` parameters. [#1692](https://github.com/PyO3/pyo3/pull/1692)
- Fix `ToPyObject` impl for `HashSet` to accept non-default hashers. [#1702](https://github.com/PyO3/pyo3/pull/1702)
## [0.13.2] - 2021-02-12
### Packaging
- Lower minimum supported Rust version to 1.41. [#1421](https://github.com/PyO3/pyo3/pull/1421)
### Added
- Add unsafe API `with_embedded_python_interpreter` to initialize a Python interpreter, execute a closure, and finalize the interpreter. [#1355](https://github.com/PyO3/pyo3/pull/1355)
- Add `serde` feature which provides implementations of `Serialize` and `Deserialize` for `Py<T>`. [#1366](https://github.com/PyO3/pyo3/pull/1366)
- Add FFI definition `_PyCFunctionFastWithKeywords` on Python 3.7 and up. [#1384](https://github.com/PyO3/pyo3/pull/1384)
- Add `PyDateTime::new_with_fold` method. [#1398](https://github.com/PyO3/pyo3/pull/1398)
- Add `size_hint` impls for `{PyDict,PyList,PySet,PyTuple}Iterator`s. [#1699](https://github.com/PyO3/pyo3/pull/1699)
### Changed
- `prepare_freethreaded_python` will no longer register an `atexit` handler to call `Py_Finalize`. This resolves a number of issues with incompatible C extensions causing crashes at finalization. [#1355](https://github.com/PyO3/pyo3/pull/1355)
- Mark `PyLayout::py_init`, `PyClassDict::clear_dict`, and `opt_to_pyobj` safe, as they do not perform any unsafe operations. [#1404](https://github.com/PyO3/pyo3/pull/1404)
### Fixed
- Fix support for using `r#raw_idents` as argument names in pyfunctions. [#1383](https://github.com/PyO3/pyo3/pull/1383)
- Fix typo in FFI definition for `PyFunction_GetCode` (was incorrectly `PyFunction_Code`). [#1387](https://github.com/PyO3/pyo3/pull/1387)
- Fix FFI definitions `PyMarshal_WriteObjectToString` and `PyMarshal_ReadObjectFromString` as available in limited API. [#1387](https://github.com/PyO3/pyo3/pull/1387)
- Fix FFI definitions `PyListObject` and those from `funcobject.h` as requiring non-limited API. [#1387](https://github.com/PyO3/pyo3/pull/1387)
- Fix unqualified `Result` usage in `pyobject_native_type_base`. [#1402](https://github.com/PyO3/pyo3/pull/1402)
- Fix build on systems where the default Python encoding is not UTF-8. [#1405](https://github.com/PyO3/pyo3/pull/1405)
- Fix build on mingw / MSYS2. [#1423](https://github.com/PyO3/pyo3/pull/1423)
## [0.13.1] - 2021-01-10
### Added
- Add support for `#[pyclass(dict)]` and `#[pyclass(weakref)]` with the `abi3` feature on Python 3.9 and up. [#1342](https://github.com/PyO3/pyo3/pull/1342)
- Add FFI definitions `PyOS_BeforeFork`, `PyOS_AfterFork_Parent`, `PyOS_AfterFork_Child` for Python 3.7 and up. [#1348](https://github.com/PyO3/pyo3/pull/1348)
- Add an `auto-initialize` feature to control whether PyO3 should automatically initialize an embedded Python interpreter. For compatibility this feature is enabled by default in PyO3 0.13.1, but is planned to become opt-in from PyO3 0.14.0. [#1347](https://github.com/PyO3/pyo3/pull/1347)
- Add support for cross-compiling to Windows without needing `PYO3_CROSS_INCLUDE_DIR`. [#1350](https://github.com/PyO3/pyo3/pull/1350)
### Deprecated
- Deprecate FFI definitions `PyEval_CallObjectWithKeywords`, `PyEval_CallObject`, `PyEval_CallFunction`, `PyEval_CallMethod` when building for Python 3.9. [#1338](https://github.com/PyO3/pyo3/pull/1338)
- Deprecate FFI definitions `PyGetSetDef_DICT` and `PyGetSetDef_INIT` which have never been in the Python API. [#1341](https://github.com/PyO3/pyo3/pull/1341)
- Deprecate FFI definitions `PyGen_NeedsFinalizing`, `PyImport_Cleanup` (removed in 3.9), and `PyOS_InitInterrupts` (3.10). [#1348](https://github.com/PyO3/pyo3/pull/1348)
- Deprecate FFI definition `PyOS_AfterFork` for Python 3.7 and up. [#1348](https://github.com/PyO3/pyo3/pull/1348)
- Deprecate FFI definitions `PyCoro_Check`, `PyAsyncGen_Check`, and `PyCoroWrapper_Check`, which have never been in the Python API (for the first two, it is possible to use `PyCoro_CheckExact` and `PyAsyncGen_CheckExact` instead; these are the actual functions provided by the Python API). [#1348](https://github.com/PyO3/pyo3/pull/1348)
- Deprecate FFI definitions for `PyUnicode_FromUnicode`, `PyUnicode_AsUnicode` and `PyUnicode_AsUnicodeAndSize`, which will be removed from 3.12 and up due to [PEP 623](https://www.python.org/dev/peps/pep-0623/). [#1370](https://github.com/PyO3/pyo3/pull/1370)
### Removed
- Remove FFI definition `PyFrame_ClearFreeList` when building for Python 3.9. [#1341](https://github.com/PyO3/pyo3/pull/1341)
- Remove FFI definition `_PyDict_Contains` when building for Python 3.10. [#1341](https://github.com/PyO3/pyo3/pull/1341)
- Remove FFI definitions `PyGen_NeedsFinalizing` and `PyImport_Cleanup` (for 3.9 and up), and `PyOS_InitInterrupts` (3.10). [#1348](https://github.com/PyO3/pyo3/pull/1348)
### Fixed
- Stop including `Py_TRACE_REFS` config setting automatically if `Py_DEBUG` is set on Python 3.8 and up. [#1334](https://github.com/PyO3/pyo3/pull/1334)
- Remove `#[deny(warnings)]` attribute (and instead refuse warnings only in CI). [#1340](https://github.com/PyO3/pyo3/pull/1340)
- Fix deprecation warning for missing `__module__` with `#[pyclass]`. [#1343](https://github.com/PyO3/pyo3/pull/1343)
- Correct return type of `PyFrozenSet::empty` to `&PyFrozenSet` (was incorrectly `&PySet`). [#1351](https://github.com/PyO3/pyo3/pull/1351)
- Fix missing `Py_INCREF` on heap type objects on Python versions before 3.8. [#1365](https://github.com/PyO3/pyo3/pull/1365)
## [0.13.0] - 2020-12-22
### Packaging
- Drop support for Python 3.5 (as it is now end-of-life). [#1250](https://github.com/PyO3/pyo3/pull/1250)
- Bump minimum supported Rust version to 1.45. [#1272](https://github.com/PyO3/pyo3/pull/1272)
- Bump indoc dependency to 1.0. [#1272](https://github.com/PyO3/pyo3/pull/1272)
- Bump paste dependency to 1.0. [#1272](https://github.com/PyO3/pyo3/pull/1272)
- Rename internal crates `pyo3cls` and `pyo3-derive-backend` to `pyo3-macros` and `pyo3-macros-backend` respectively. [#1317](https://github.com/PyO3/pyo3/pull/1317)
### Added
- Add support for building for CPython limited API. Opting-in to the limited API enables a single extension wheel built with PyO3 to be installable on multiple Python versions. This required a few minor changes to runtime behavior of of PyO3 `#[pyclass]` types. See the migration guide for full details. [#1152](https://github.com/PyO3/pyo3/pull/1152)
- Add feature flags `abi3-py36`, `abi3-py37`, `abi3-py38` etc. to set the minimum Python version when using the limited API. [#1263](https://github.com/PyO3/pyo3/pull/1263)
- Add argument names to `TypeError` messages generated by pymethod wrappers. [#1212](https://github.com/PyO3/pyo3/pull/1212)
- Add FFI definitions for PEP 587 "Python Initialization Configuration". [#1247](https://github.com/PyO3/pyo3/pull/1247)
- Add FFI definitions for `PyEval_SetProfile` and `PyEval_SetTrace`. [#1255](https://github.com/PyO3/pyo3/pull/1255)
- Add FFI definitions for context.h functions (`PyContext_New`, etc). [#1259](https://github.com/PyO3/pyo3/pull/1259)
- Add `PyAny::is_instance` method. [#1276](https://github.com/PyO3/pyo3/pull/1276)
- Add support for conversion between `char` and `PyString`. [#1282](https://github.com/PyO3/pyo3/pull/1282)
- Add FFI definitions for `PyBuffer_SizeFromFormat`, `PyObject_LengthHint`, `PyObject_CallNoArgs`, `PyObject_CallOneArg`, `PyObject_CallMethodNoArgs`, `PyObject_CallMethodOneArg`, `PyObject_VectorcallDict`, and `PyObject_VectorcallMethod`. [#1287](https://github.com/PyO3/pyo3/pull/1287)
- Add conversions between `u128`/`i128` and `PyLong` for PyPy. [#1310](https://github.com/PyO3/pyo3/pull/1310)
- Add `Python::version` and `Python::version_info` to get the running interpreter version. [#1322](https://github.com/PyO3/pyo3/pull/1322)
- Add conversions for tuples of length 10, 11, and 12. [#1454](https://github.com/PyO3/pyo3/pull/1454)
### Changed
- Change return type of `PyType::name` from `Cow<str>` to `PyResult<&str>`. [#1152](https://github.com/PyO3/pyo3/pull/1152)
- `#[pyclass(subclass)]` is now required for subclassing from Rust (was previously just required for subclassing from Python). [#1152](https://github.com/PyO3/pyo3/pull/1152)
- Change `PyIterator` to be consistent with other native types: it is now used as `&PyIterator` instead of `PyIterator<'a>`. [#1176](https://github.com/PyO3/pyo3/pull/1176)
- Change formatting of `PyDowncastError` messages to be closer to Python's builtin error messages. [#1212](https://github.com/PyO3/pyo3/pull/1212)
- Change `Debug` and `Display` impls for `PyException` to be consistent with `PyAny`. [#1275](https://github.com/PyO3/pyo3/pull/1275)
- Change `Debug` impl of `PyErr` to output more helpful information (acquiring the GIL if necessary). [#1275](https://github.com/PyO3/pyo3/pull/1275)
- Rename `PyTypeInfo::is_instance` and `PyTypeInfo::is_exact_instance` to `PyTypeInfo::is_type_of` and `PyTypeInfo::is_exact_type_of`. [#1278](https://github.com/PyO3/pyo3/pull/1278)
- Optimize `PyAny::call0`, `Py::call0` and `PyAny::call_method0` and `Py::call_method0` on Python 3.9 and up. [#1287](https://github.com/PyO3/pyo3/pull/1285)
- Require double-quotes for pyclass name argument e.g `#[pyclass(name = "MyClass")]`. [#1303](https://github.com/PyO3/pyo3/pull/1303)
### Deprecated
- Deprecate `Python::is_instance`, `Python::is_subclass`, `Python::release`, and `Python::xdecref`. [#1292](https://github.com/PyO3/pyo3/pull/1292)
### Removed
- Remove deprecated ffi definitions `PyUnicode_AsUnicodeCopy`, `PyUnicode_GetMax`, `_Py_CheckRecursionLimit`, `PyObject_AsCharBuffer`, `PyObject_AsReadBuffer`, `PyObject_CheckReadBuffer` and `PyObject_AsWriteBuffer`, which will be removed in Python 3.10. [#1217](https://github.com/PyO3/pyo3/pull/1217)
- Remove unused `python3` feature. [#1235](https://github.com/PyO3/pyo3/pull/1235)
### Fixed
- Fix missing field in `PyCodeObject` struct (`co_posonlyargcount`) - caused invalid access to other fields in Python >3.7. [#1260](https://github.com/PyO3/pyo3/pull/1260)
- Fix building for `x86_64-unknown-linux-musl` target from `x86_64-unknown-linux-gnu` host. [#1267](https://github.com/PyO3/pyo3/pull/1267)
- Fix `#[text_signature]` interacting badly with rust `r#raw_identifiers`. [#1286](https://github.com/PyO3/pyo3/pull/1286)
- Fix FFI definitions for `PyObject_Vectorcall` and `PyVectorcall_Call`. [#1287](https://github.com/PyO3/pyo3/pull/1285)
- Fix building with Anaconda python inside a virtualenv. [#1290](https://github.com/PyO3/pyo3/pull/1290)
- Fix definition of opaque FFI types. [#1312](https://github.com/PyO3/pyo3/pull/1312)
- Fix using custom error type in pyclass `#[new]` methods. [#1319](https://github.com/PyO3/pyo3/pull/1319)
## [0.12.4] - 2020-11-28
### Fixed
- Fix reference count bug in implementation of `From<Py<T>>` for `PyObject`, a regression introduced in PyO3 0.12. [#1297](https://github.com/PyO3/pyo3/pull/1297)
## [0.12.3] - 2020-10-12
### Fixed
- Fix support for Rust versions 1.39 to 1.44, broken by an incorrect internal update to paste 1.0 which was done in PyO3 0.12.2. [#1234](https://github.com/PyO3/pyo3/pull/1234)
## [0.12.2] - 2020-10-12
### Added
- Add support for keyword-only arguments without default values in `#[pyfunction]`. [#1209](https://github.com/PyO3/pyo3/pull/1209)
- Add `Python::check_signals` as a safe a wrapper for `PyErr_CheckSignals`. [#1214](https://github.com/PyO3/pyo3/pull/1214)
### Fixed
- Fix invalid document for protocol methods. [#1169](https://github.com/PyO3/pyo3/pull/1169)
- Hide docs of PyO3 private implementation details in `pyo3::class::methods`. [#1169](https://github.com/PyO3/pyo3/pull/1169)
- Fix unnecessary rebuild on PATH changes when the python interpreter is provided by PYO3_PYTHON. [#1231](https://github.com/PyO3/pyo3/pull/1231)
## [0.12.1] - 2020-09-16
### Fixed
- Fix building for a 32-bit Python on 64-bit Windows with a 64-bit Rust toolchain. [#1179](https://github.com/PyO3/pyo3/pull/1179)
- Fix building on platforms where `c_char` is `u8`. [#1182](https://github.com/PyO3/pyo3/pull/1182)
## [0.12.0] - 2020-09-12
### Added
- Add FFI definitions `Py_FinalizeEx`, `PyOS_getsig`, and `PyOS_setsig`. [#1021](https://github.com/PyO3/pyo3/pull/1021)
- Add `PyString::to_str` for accessing `PyString` as `&str`. [#1023](https://github.com/PyO3/pyo3/pull/1023)
- Add `Python::with_gil` for executing a closure with the Python GIL. [#1037](https://github.com/PyO3/pyo3/pull/1037)
- Add type information to failures in `PyAny::downcast`. [#1050](https://github.com/PyO3/pyo3/pull/1050)
- Implement `Debug` for `PyIterator`. [#1051](https://github.com/PyO3/pyo3/pull/1051)
- Add `PyBytes::new_with` and `PyByteArray::new_with` for initialising `bytes` and `bytearray` objects using a closure. [#1074](https://github.com/PyO3/pyo3/pull/1074)
- Add `#[derive(FromPyObject)]` macro for enums and structs. [#1065](https://github.com/PyO3/pyo3/pull/1065)
- Add `Py::as_ref` and `Py::into_ref` for converting `Py<T>` to `&T`. [#1098](https://github.com/PyO3/pyo3/pull/1098)
- Add ability to return `Result` types other than `PyResult` from `#[pyfunction]`, `#[pymethod]` and `#[pyproto]` functions. [#1106](https://github.com/PyO3/pyo3/pull/1118).
- Implement `ToPyObject`, `IntoPy`, and `FromPyObject` for [hashbrown](https://crates.io/crates/hashbrown)'s `HashMap` and `HashSet` types (requires the `hashbrown` feature). [#1114](https://github.com/PyO3/pyo3/pull/1114)
- Add `#[pyfunction(pass_module)]` and `#[pyfn(pass_module)]` to pass the module object as the first function argument. [#1143](https://github.com/PyO3/pyo3/pull/1143)
- Add `PyModule::add_function` and `PyModule::add_submodule` as typed alternatives to `PyModule::add_wrapped`. [#1143](https://github.com/PyO3/pyo3/pull/1143)
- Add native `PyCFunction` and `PyFunction` types. [#1163](https://github.com/PyO3/pyo3/pull/1163)
### Changed
- Rework exception types: [#1024](https://github.com/PyO3/pyo3/pull/1024) [#1115](https://github.com/PyO3/pyo3/pull/1115)
- Rename exception types from e.g. `RuntimeError` to `PyRuntimeError`. The old names continue to exist but are deprecated.
- Exception objects are now accessible as `&T` or `Py<T>`, just like other Python-native types.
- Rename `PyException::py_err` to `PyException::new_err`.
- Rename `PyUnicodeDecodeErr::new_err` to `PyUnicodeDecodeErr::new`.
- Remove `PyStopIteration::stop_iteration`.
- Require `T: Send` for the return value `T` of `Python::allow_threads`. [#1036](https://github.com/PyO3/pyo3/pull/1036)
- Rename `PYTHON_SYS_EXECUTABLE` to `PYO3_PYTHON`. The old name will continue to work (undocumented) but will be removed in a future release. [#1039](https://github.com/PyO3/pyo3/pull/1039)
- Remove `unsafe` from signature of `PyType::as_type_ptr`. [#1047](https://github.com/PyO3/pyo3/pull/1047)
- Change return type of `PyIterator::from_object` to `PyResult<PyIterator>` (was `Result<PyIterator, PyDowncastError>`). [#1051](https://github.com/PyO3/pyo3/pull/1051)
- `IntoPy` is no longer implied by `FromPy`. [#1063](https://github.com/PyO3/pyo3/pull/1063)
- Change `PyObject` to be a type alias for `Py<PyAny>`. [#1063](https://github.com/PyO3/pyo3/pull/1063)
- Rework `PyErr` to be compatible with the `std::error::Error` trait: [#1067](https://github.com/PyO3/pyo3/pull/1067) [#1115](https://github.com/PyO3/pyo3/pull/1115)
- Implement `Display`, `Error`, `Send` and `Sync` for `PyErr` and `PyErrArguments`.
- Add `PyErr::instance` for accessing `PyErr` as `&PyBaseException`.
- `PyErr`'s fields are now an implementation detail. The equivalent values can be accessed with `PyErr::ptype`, `PyErr::pvalue` and `PyErr::ptraceback`.
- Change receiver of `PyErr::print` and `PyErr::print_and_set_sys_last_vars` to `&self` (was `self`).
- Remove `PyErrValue`, `PyErr::from_value`, `PyErr::into_normalized`, and `PyErr::normalize`.
- Remove `PyException::into`.
- Remove `Into<PyResult<T>>` for `PyErr` and `PyException`.
- Change methods generated by `#[pyproto]` to return `NotImplemented` if Python should try a reversed operation. #[1072](https://github.com/PyO3/pyo3/pull/1072)
- Change argument to `PyModule::add` to `impl IntoPy<PyObject>` (was `impl ToPyObject`). #[1124](https://github.com/PyO3/pyo3/pull/1124)
### Removed
- Remove many exception and `PyErr` APIs; see the "changed" section above. [#1024](https://github.com/PyO3/pyo3/pull/1024) [#1067](https://github.com/PyO3/pyo3/pull/1067) [#1115](https://github.com/PyO3/pyo3/pull/1115)
- Remove `PyString::to_string` (use new `PyString::to_str`). [#1023](https://github.com/PyO3/pyo3/pull/1023)
- Remove `PyString::as_bytes`. [#1023](https://github.com/PyO3/pyo3/pull/1023)
- Remove `Python::register_any`. [#1023](https://github.com/PyO3/pyo3/pull/1023)
- Remove `GILGuard::acquire` from the public API. Use `Python::acquire_gil` or `Python::with_gil`. [#1036](https://github.com/PyO3/pyo3/pull/1036)
- Remove the `FromPy` trait. [#1063](https://github.com/PyO3/pyo3/pull/1063)
- Remove the `AsPyRef` trait. [#1098](https://github.com/PyO3/pyo3/pull/1098)
### Fixed
- Correct FFI definitions `Py_SetProgramName` and `Py_SetPythonHome` to take `*const` arguments (was `*mut`). [#1021](https://github.com/PyO3/pyo3/pull/1021)
- Fix `FromPyObject` for `num_bigint::BigInt` for Python objects with an `__index__` method. [#1027](https://github.com/PyO3/pyo3/pull/1027)
- Correct FFI definition `_PyLong_AsByteArray` to take `*mut c_uchar` argument (was `*const c_uchar`). [#1029](https://github.com/PyO3/pyo3/pull/1029)
- Fix segfault with `#[pyclass(dict, unsendable)]`. [#1058](https://github.com/PyO3/pyo3/pull/1058) [#1059](https://github.com/PyO3/pyo3/pull/1059)
- Fix using `&Self` as an argument type for functions in a `#[pymethods]` block. [#1071](https://github.com/PyO3/pyo3/pull/1071)
- Fix best-effort build against PyPy 3.6. [#1092](https://github.com/PyO3/pyo3/pull/1092)
- Fix many cases of lifetime elision in `#[pyproto]` implementations. [#1093](https://github.com/PyO3/pyo3/pull/1093)
- Fix detection of Python build configuration when cross-compiling. [#1095](https://github.com/PyO3/pyo3/pull/1095)
- Always link against libpython on android with the `extension-module` feature. [#1095](https://github.com/PyO3/pyo3/pull/1095)
- Fix the `+` operator not trying `__radd__` when both `__add__` and `__radd__` are defined in `PyNumberProtocol` (and similar for all other reversible operators). [#1107](https://github.com/PyO3/pyo3/pull/1107)
- Fix building with Anaconda python. [#1175](https://github.com/PyO3/pyo3/pull/1175)
## [0.11.1] - 2020-06-30
### Added
- `#[pyclass(unsendable)]`. [#1009](https://github.com/PyO3/pyo3/pull/1009)
### Changed
- Update `parking_lot` dependency to `0.11`. [#1010](https://github.com/PyO3/pyo3/pull/1010)
## [0.11.0] - 2020-06-28
### Added
- Support stable versions of Rust (>=1.39). [#969](https://github.com/PyO3/pyo3/pull/969)
- Add FFI definition `PyObject_AsFileDescriptor`. [#938](https://github.com/PyO3/pyo3/pull/938)
- Add `PyByteArray::data`, `PyByteArray::as_bytes`, and `PyByteArray::as_bytes_mut`. [#967](https://github.com/PyO3/pyo3/pull/967)
- Add `GILOnceCell` to use in situations where `lazy_static` or `once_cell` can deadlock. [#975](https://github.com/PyO3/pyo3/pull/975)
- Add `Py::borrow`, `Py::borrow_mut`, `Py::try_borrow`, and `Py::try_borrow_mut` for accessing `#[pyclass]` values. [#976](https://github.com/PyO3/pyo3/pull/976)
- Add `IterNextOutput` and `IterANextOutput` for returning from `__next__` / `__anext__`. [#997](https://github.com/PyO3/pyo3/pull/997)
### Changed
- Simplify internals of `#[pyo3(get)]` attribute. (Remove the hidden API `GetPropertyValue`.) [#934](https://github.com/PyO3/pyo3/pull/934)
- Call `Py_Finalize` at exit to flush buffers, etc. [#943](https://github.com/PyO3/pyo3/pull/943)
- Add type parameter to PyBuffer. #[951](https://github.com/PyO3/pyo3/pull/951)
- Require `Send` bound for `#[pyclass]`. [#966](https://github.com/PyO3/pyo3/pull/966)
- Add `Python` argument to most methods on `PyObject` and `Py<T>` to ensure GIL safety. [#970](https://github.com/PyO3/pyo3/pull/970)
- Change signature of `PyTypeObject::type_object` - now takes `Python` argument and returns `&PyType`. [#970](https://github.com/PyO3/pyo3/pull/970)
- Change return type of `PyTuple::slice` and `PyTuple::split_from` from `Py<PyTuple>` to `&PyTuple`. [#970](https://github.com/PyO3/pyo3/pull/970)
- Change return type of `PyTuple::as_slice` to `&[&PyAny]`. [#971](https://github.com/PyO3/pyo3/pull/971)
- Rename `PyTypeInfo::type_object` to `type_object_raw`, and add `Python` argument. [#975](https://github.com/PyO3/pyo3/pull/975)
- Update `num-complex` optional dependendency from `0.2` to `0.3`. [#977](https://github.com/PyO3/pyo3/pull/977)
- Update `num-bigint` optional dependendency from `0.2` to `0.3`. [#978](https://github.com/PyO3/pyo3/pull/978)
- `#[pyproto]` is re-implemented without specialization. [#961](https://github.com/PyO3/pyo3/pull/961)
- `PyClassAlloc::alloc` is renamed to `PyClassAlloc::new`. [#990](https://github.com/PyO3/pyo3/pull/990)
- `#[pyproto]` methods can now have return value `T` or `PyResult<T>` (previously only `PyResult<T>` was supported). [#996](https://github.com/PyO3/pyo3/pull/996)
- `#[pyproto]` methods can now skip annotating the return type if it is `()`. [#998](https://github.com/PyO3/pyo3/pull/998)
### Removed
- Remove `ManagedPyRef` (unused, and needs specialization) [#930](https://github.com/PyO3/pyo3/pull/930)
### Fixed
- Fix passing explicit `None` to `Option<T>` argument `#[pyfunction]` with a default value. [#936](https://github.com/PyO3/pyo3/pull/936)
- Fix `PyClass.__new__`'s not respecting subclasses when inherited by a Python class. [#990](https://github.com/PyO3/pyo3/pull/990)
- Fix returning `Option<T>` from `#[pyproto]` methods. [#996](https://github.com/PyO3/pyo3/pull/996)
- Fix accepting `PyRef<Self>` and `PyRefMut<Self>` to `#[getter]` and `#[setter]` methods. [#999](https://github.com/PyO3/pyo3/pull/999)
## [0.10.1] - 2020-05-14
### Fixed
- Fix deadlock in `Python::acquire_gil` after dropping a `PyObject` or `Py<T>`. [#924](https://github.com/PyO3/pyo3/pull/924)
## [0.10.0] - 2020-05-13
### Added
- Add FFI definition `_PyDict_NewPresized`. [#849](https://github.com/PyO3/pyo3/pull/849)
- Implement `IntoPy<PyObject>` for `HashSet` and `BTreeSet`. [#864](https://github.com/PyO3/pyo3/pull/864)
- Add `PyAny::dir` method. [#886](https://github.com/PyO3/pyo3/pull/886)
- Gate macros behind a `macros` feature (enabled by default). [#897](https://github.com/PyO3/pyo3/pull/897)
- Add ability to define class attributes using `#[classattr]` on functions in `#[pymethods]`. [#905](https://github.com/PyO3/pyo3/pull/905)
- Implement `Clone` for `PyObject` and `Py<T>`. [#908](https://github.com/PyO3/pyo3/pull/908)
- Implement `Deref<Target = PyAny>` for all builtin types. (`PyList`, `PyTuple`, `PyDict` etc.) [#911](https://github.com/PyO3/pyo3/pull/911)
- Implement `Deref<Target = PyAny>` for `PyCell<T>`. [#911](https://github.com/PyO3/pyo3/pull/911)
- Add `#[classattr]` support for associated constants in `#[pymethods]`. [#914](https://github.com/PyO3/pyo3/pull/914)
### Changed
- Panics will now be raised as a Python `PanicException`. [#797](https://github.com/PyO3/pyo3/pull/797)
- Change `PyObject` and `Py<T>` reference counts to decrement immediately upon drop when the GIL is held. [#851](https://github.com/PyO3/pyo3/pull/851)
- Allow `PyIterProtocol` methods to use either `PyRef` or `PyRefMut` as the receiver type. [#856](https://github.com/PyO3/pyo3/pull/856)
- Change the implementation of `FromPyObject` for `Py<T>` to apply to a wider range of `T`, including all `T: PyClass`. [#880](https://github.com/PyO3/pyo3/pull/880)
- Move all methods from the `ObjectProtocol` trait to the `PyAny` struct. [#911](https://github.com/PyO3/pyo3/pull/911)
- Remove need for `#![feature(specialization)]` in crates depending on PyO3. [#917](https://github.com/PyO3/pyo3/pull/917)
### Removed
- Remove `PyMethodsProtocol` trait. [#889](https://github.com/PyO3/pyo3/pull/889)
- Remove `num-traits` dependency. [#895](https://github.com/PyO3/pyo3/pull/895)
- Remove `ObjectProtocol` trait. [#911](https://github.com/PyO3/pyo3/pull/911)
- Remove `PyAny::None`. Users should use `Python::None` instead. [#911](https://github.com/PyO3/pyo3/pull/911)
- Remove all `*ProtocolImpl` traits. [#917](https://github.com/PyO3/pyo3/pull/917)
### Fixed
- Fix support for `__radd__` and other `__r*__` methods as implementations for Python mathematical operators. [#839](https://github.com/PyO3/pyo3/pull/839)
- Fix panics during garbage collection when traversing objects that were already mutably borrowed. [#855](https://github.com/PyO3/pyo3/pull/855)
- Prevent `&'static` references to Python objects as arguments to `#[pyfunction]` and `#[pymethods]`. [#869](https://github.com/PyO3/pyo3/pull/869)
- Fix lifetime safety bug with `AsPyRef::as_ref`. [#876](https://github.com/PyO3/pyo3/pull/876)
- Fix `#[pyo3(get)]` attribute on `Py<T>` fields. [#880](https://github.com/PyO3/pyo3/pull/880)
- Fix segmentation faults caused by functions such as `PyList::get_item` returning borrowed objects when it was not safe to do so. [#890](https://github.com/PyO3/pyo3/pull/890)
- Fix segmentation faults caused by nested `Python::acquire_gil` calls creating dangling references. [#893](https://github.com/PyO3/pyo3/pull/893)
- Fix segmentatation faults when a panic occurs during a call to `Python::allow_threads`. [#912](https://github.com/PyO3/pyo3/pull/912)
## [0.9.2] - 2020-04-09
### Added
- `FromPyObject` implementations for `HashSet` and `BTreeSet`. [#842](https://github.com/PyO3/pyo3/pull/842)
### Fixed
- Correctly detect 32bit architecture. [#830](https://github.com/PyO3/pyo3/pull/830)
## [0.9.1] - 2020-03-23
### Fixed
- Error messages for `#[pyclass]`. [#826](https://github.com/PyO3/pyo3/pull/826)
- `FromPyObject` implementation for `PySequence`. [#827](https://github.com/PyO3/pyo3/pull/827)
## [0.9.0] - 2020-03-19
### Added
- `PyCell`, which has RefCell-like features. [#770](https://github.com/PyO3/pyo3/pull/770)
- `PyClass`, `PyLayout`, `PyClassInitializer`. [#683](https://github.com/PyO3/pyo3/pull/683)
- Implemented `IntoIterator` for `PySet` and `PyFrozenSet`. [#716](https://github.com/PyO3/pyo3/pull/716)
- `FromPyObject` is now automatically implemented for `T: Clone` pyclasses. [#730](https://github.com/PyO3/pyo3/pull/730)
- `#[pyo3(get)]` and `#[pyo3(set)]` will now use the Rust doc-comment from the field for the Python property. [#755](https://github.com/PyO3/pyo3/pull/755)
- `#[setter]` functions may now take an argument of `Pyo3::Python`. [#760](https://github.com/PyO3/pyo3/pull/760)
- `PyTypeInfo::BaseLayout` and `PyClass::BaseNativeType`. [#770](https://github.com/PyO3/pyo3/pull/770)
- `PyDowncastImpl`. [#770](https://github.com/PyO3/pyo3/pull/770)
- Implement `FromPyObject` and `IntoPy<PyObject>` traits for arrays (up to 32). [#778](https://github.com/PyO3/pyo3/pull/778)
- `migration.md` and `types.md` in the guide. [#795](https://github.com/PyO3/pyo3/pull/795), #[802](https://github.com/PyO3/pyo3/pull/802)
- `ffi::{_PyBytes_Resize, _PyDict_Next, _PyDict_Contains, _PyDict_GetDictPtr}`. #[820](https://github.com/PyO3/pyo3/pull/820)
### Changed
- `#[new]` does not take `PyRawObject` and can return `Self`. [#683](https://github.com/PyO3/pyo3/pull/683)
- The blanket implementations for `FromPyObject` for `&T` and `&mut T` are no longer specializable. Implement `PyTryFrom` for your type to control the behavior of `FromPyObject::extract` for your types. [#713](https://github.com/PyO3/pyo3/pull/713)
- The implementation for `IntoPy<U> for T` where `U: FromPy<T>` is no longer specializable. Control the behavior of this via the implementation of `FromPy`. [#713](https://github.com/PyO3/pyo3/pull/713)
- Use `parking_lot::Mutex` instead of `spin::Mutex`. [#734](https://github.com/PyO3/pyo3/pull/734)
- Bumped minimum Rust version to `1.42.0-nightly 2020-01-21`. [#761](https://github.com/PyO3/pyo3/pull/761)
- `PyRef` and `PyRefMut` are renewed for `PyCell`. [#770](https://github.com/PyO3/pyo3/pull/770)
- Some new FFI functions for Python 3.8. [#784](https://github.com/PyO3/pyo3/pull/784)
- `PyAny` is now on the top level module and prelude. [#816](https://github.com/PyO3/pyo3/pull/816)
### Removed
- `PyRawObject`. [#683](https://github.com/PyO3/pyo3/pull/683)
- `PyNoArgsFunction`. [#741](https://github.com/PyO3/pyo3/pull/741)
- `initialize_type`. To set the module name for a `#[pyclass]`, use the `module` argument to the macro. #[751](https://github.com/PyO3/pyo3/pull/751)
- `AsPyRef::as_mut/with/with_mut/into_py/into_mut_py`. [#770](https://github.com/PyO3/pyo3/pull/770)
- `PyTryFrom::try_from_mut/try_from_mut_exact/try_from_mut_unchecked`. [#770](https://github.com/PyO3/pyo3/pull/770)
- `Python::mut_from_owned_ptr/mut_from_borrowed_ptr`. [#770](https://github.com/PyO3/pyo3/pull/770)
- `ObjectProtocol::get_base/get_mut_base`. [#770](https://github.com/PyO3/pyo3/pull/770)
### Fixed
- Fixed unsoundness of subclassing. [#683](https://github.com/PyO3/pyo3/pull/683).
- Clear error indicator when the exception is handled on the Rust side. [#719](https://github.com/PyO3/pyo3/pull/719)
- Usage of raw identifiers with `#[pyo3(set)]`. [#745](https://github.com/PyO3/pyo3/pull/745)
- Usage of `PyObject` with `#[pyo3(get)]`. [#760](https://github.com/PyO3/pyo3/pull/760)
- `#[pymethods]` used in conjunction with `#[cfg]`. #[769](https://github.com/PyO3/pyo3/pull/769)
- `"*"` in a `#[pyfunction()]` argument list incorrectly accepting any number of positional arguments (use `args = "*"` when this behavior is desired). #[792](https://github.com/PyO3/pyo3/pull/792)
- `PyModule::dict`. #[809](https://github.com/PyO3/pyo3/pull/809)
- Fix the case where `DESCRIPTION` is not null-terminated. #[822](https://github.com/PyO3/pyo3/pull/822)
## [0.8.5] - 2020-01-05
### Added
- Implemented `FromPyObject` for `HashMap` and `BTreeMap`
- Support for `#[name = "foo"]` attribute for `#[pyfunction]` and in `#[pymethods]`. [#692](https://github.com/PyO3/pyo3/pull/692)
## [0.8.4] - 2019-12-14
### Added
- Support for `#[text_signature]` attribute. [#675](https://github.com/PyO3/pyo3/pull/675)
## [0.8.3] - 2019-11-23
### Removed
- `#[init]` is removed. [#658](https://github.com/PyO3/pyo3/pull/658)
### Fixed
- Now all `&Py~` types have `!Send` bound. [#655](https://github.com/PyO3/pyo3/pull/655)
- Fix a compile error raised by the stabilization of `!` type. [#672](https://github.com/PyO3/pyo3/issues/672).
## [0.8.2] - 2019-10-27
### Added
- FFI compatibility for PEP 590 Vectorcall. [#641](https://github.com/PyO3/pyo3/pull/641)
### Fixed
- Fix PySequenceProtocol::set_item. [#624](https://github.com/PyO3/pyo3/pull/624)
- Fix a corner case of BigInt::FromPyObject. [#630](https://github.com/PyO3/pyo3/pull/630)
- Fix index errors in parameter conversion. [#631](https://github.com/PyO3/pyo3/pull/631)
- Fix handling of invalid utf-8 sequences in `PyString::as_bytes`. [#639](https://github.com/PyO3/pyo3/pull/639)
and `PyString::to_string_lossy` [#642](https://github.com/PyO3/pyo3/pull/642).
- Remove `__contains__` and `__iter__` from PyMappingProtocol. [#644](https://github.com/PyO3/pyo3/pull/644)
- Fix proc-macro definition of PySetAttrProtocol. [#645](https://github.com/PyO3/pyo3/pull/645)
## [0.8.1] - 2019-10-08
### Added
- Conversion between [num-bigint](https://github.com/rust-num/num-bigint) and Python int. [#608](https://github.com/PyO3/pyo3/pull/608)
### Fixed
- Make sure the right Python interpreter is used in OSX builds. [#604](https://github.com/PyO3/pyo3/pull/604)
- Patch specialization being broken by Rust 1.40. [#614](https://github.com/PyO3/pyo3/issues/614)
- Fix a segfault around PyErr. [#597](https://github.com/PyO3/pyo3/pull/597)
## [0.8.0] - 2019-09-16
### Added
- `module` argument to `pyclass` macro. [#499](https://github.com/PyO3/pyo3/pull/499)
- `py_run!` macro [#512](https://github.com/PyO3/pyo3/pull/512)
- Use existing fields and methods before calling custom **getattr**. [#505](https://github.com/PyO3/pyo3/pull/505)
- `PyBytes` can now be indexed just like `Vec<u8>`
- Implement `IntoPy<PyObject>` for `PyRef` and `PyRefMut`.
### Changed
- Implementing the Using the `gc` parameter for `pyclass` (e.g. `#[pyclass(gc)]`) without implementing the `class::PyGCProtocol` trait is now a compile-time error. Failing to implement this trait could lead to segfaults. [#532](https://github.com/PyO3/pyo3/pull/532)
- `PyByteArray::data` has been replaced with `PyDataArray::to_vec` because returning a `&[u8]` is unsound. (See [this comment](https://github.com/PyO3/pyo3/issues/373#issuecomment-512332696) for a great write-up for why that was unsound)
- Replace `mashup` with `paste`.
- `GILPool` gained a `Python` marker to prevent it from being misused to release Python objects without the GIL held.
### Removed
- `IntoPyObject` was replaced with `IntoPy<PyObject>`
- `#[pyclass(subclass)]` is hidden a `unsound-subclass` feature because it's causing segmentation faults.
### Fixed
- More readable error message for generics in pyclass [#503](https://github.com/PyO3/pyo3/pull/503)
## [0.7.0] - 2019-05-26
### Added
- PyPy support by omerbenamram in [#393](https://github.com/PyO3/pyo3/pull/393)
- Have `PyModule` generate an index of its members (`__all__` list).
- Allow `slf: PyRef<T>` for pyclass(#419)
- Allow to use lifetime specifiers in `pymethods`
- Add `marshal` module. [#460](https://github.com/PyO3/pyo3/pull/460)
### Changed
- `Python::run` returns `PyResult<()>` instead of `PyResult<&PyAny>`.
- Methods decorated with `#[getter]` and `#[setter]` can now omit wrapping the
result type in `PyResult` if they don't raise exceptions.
### Fixed
- `type_object::PyTypeObject` has been marked unsafe because breaking the contract `type_object::PyTypeObject::init_type` can lead to UB.
- Fixed automatic derive of `PySequenceProtocol` implementation in [#423](https://github.com/PyO3/pyo3/pull/423).
- Capitalization & better wording to README.md.
- Docstrings of properties is now properly set using the doc of the `#[getter]` method.
- Fixed issues with `pymethods` crashing on doc comments containing double quotes.
- `PySet::new` and `PyFrozenSet::new` now return `PyResult<&Py[Frozen]Set>`; exceptions are raised if
the items are not hashable.
- Fixed building using `venv` on Windows.
- `PyTuple::new` now returns `&PyTuple` instead of `Py<PyTuple>`.
- Fixed several issues with argument parsing; notable, the `*args` and `**kwargs`
tuple/dict now doesn't contain arguments that are otherwise assigned to parameters.
## [0.6.0] - 2019-03-28
### Regressions
- Currently, [#341](https://github.com/PyO3/pyo3/issues/341) causes `cargo test` to fail with weird linking errors when the `extension-module` feature is activated. For now you can work around this by making the `extension-module` feature optional and running the tests with `cargo test --no-default-features`:
```toml
[dependencies.pyo3]
version = "0.6.0"
[features]
extension-module = ["pyo3/extension-module"]
default = ["extension-module"]
```
### Added
- Added a `wrap_pymodule!` macro similar to the existing `wrap_pyfunction!` macro. Only available on python 3
- Added support for cross compiling (e.g. to arm v7) by mtp401 in [#327](https://github.com/PyO3/pyo3/pull/327). See the "Cross Compiling" section in the "Building and Distribution" chapter of the guide for more details.
- The `PyRef` and `PyRefMut` types, which allow to differentiate between an instance of a rust struct on the rust heap and an instance that is embedded inside a python object. By kngwyu in [#335](https://github.com/PyO3/pyo3/pull/335)
- Added `FromPy<T>` and `IntoPy<T>` which are equivalent to `From<T>` and `Into<T>` except that they require a gil token.
- Added `ManagedPyRef`, which should eventually replace `ToBorrowedObject`.
### Changed
- Renamed `PyObjectRef` to `PyAny` in #388
- Renamed `add_function` to `add_wrapped` as it now also supports modules.
- Renamed `#[pymodinit]` to `#[pymodule]`
- `py.init(|| value)` becomes `Py::new(value)`
- `py.init_ref(|| value)` becomes `PyRef::new(value)`
- `py.init_mut(|| value)` becomes `PyRefMut::new(value)`.
- `PyRawObject::init` is now infallible, e.g. it returns `()` instead of `PyResult<()>`.
- Renamed `py_exception!` to `create_exception!` and refactored the error macros.
- Renamed `wrap_function!` to `wrap_pyfunction!`
- Renamed `#[prop(get, set)]` to `#[pyo3(get, set)]`
- `#[pyfunction]` now supports the same arguments as `#[pyfn()]`
- Some macros now emit proper spanned errors instead of panics.
- Migrated to the 2018 edition
- `crate::types::exceptions` moved to `crate::exceptions`
- Replace `IntoPyTuple` with `IntoPy<Py<PyTuple>>`.
- `IntoPyPointer` and `ToPyPointer` moved into the crate root.
- `class::CompareOp` moved into `class::basic::CompareOp`
- PyTypeObject is now a direct subtrait PyTypeCreate, removing the old cyclical implementation in [#350](https://github.com/PyO3/pyo3/pull/350)
- Add `PyList::{sort, reverse}` by chr1sj0nes in [#357](https://github.com/PyO3/pyo3/pull/357) and [#358](https://github.com/PyO3/pyo3/pull/358)
- Renamed the `typeob` module to `type_object`
### Removed
- `PyToken` was removed due to unsoundness (See [#94](https://github.com/PyO3/pyo3/issues/94)).
- Removed the unnecessary type parameter from `PyObjectAlloc`
- `NoArgs`. Just use an empty tuple
- `PyObjectWithGIL`. `PyNativeType` is sufficient now that PyToken is removed.
### Fixed
- A soudness hole where every instances of a `#[pyclass]` struct was considered to be part of a python object, even though you can create instances that are not part of the python heap. This was fixed through `PyRef` and `PyRefMut`.
- Fix kwargs support in [#328](https://github.com/PyO3/pyo3/pull/328).
- Add full support for `__dict__` in [#403](https://github.com/PyO3/pyo3/pull/403).
## [0.5.3] - 2019-01-04
### Fixed
- Fix memory leak in ArrayList by kngwyu [#316](https://github.com/PyO3/pyo3/pull/316)
## [0.5.2] - 2018-11-25
### Fixed
- Fix undeterministic segfaults when creating many objects by kngwyu in [#281](https://github.com/PyO3/pyo3/pull/281)
## [0.5.1] - 2018-11-24
Yanked
## [0.5.0] - 2018-11-11
### Added
- `#[pyclass]` objects can now be returned from rust functions
- `PyComplex` by kngwyu in [#226](https://github.com/PyO3/pyo3/pull/226)
- `PyDict::from_sequence`, equivalent to `dict([(key, val), ...])`
- Bindings for the `datetime` standard library types: `PyDate`, `PyTime`, `PyDateTime`, `PyTzInfo`, `PyDelta` with associated `ffi` types, by pganssle [#200](https://github.com/PyO3/pyo3/pull/200).
- `PyString`, `PyUnicode`, and `PyBytes` now have an `as_bytes` method that returns `&[u8]`.
- `PyObjectProtocol::get_type_ptr` by ijl in [#242](https://github.com/PyO3/pyo3/pull/242)
### Changed
- Removes the types from the root module and the prelude. They now live in `pyo3::types` instead.
- All exceptions are constructed with `py_err` instead of `new`, as they return `PyErr` and not `Self`.
- `as_mut` and friends take and `&mut self` instead of `&self`
- `ObjectProtocol::call` now takes an `Option<&PyDict>` for the kwargs instead of an `IntoPyDictPointer`.
- `IntoPyDictPointer` was replace by `IntoPyDict` which doesn't convert `PyDict` itself anymore and returns a `PyDict` instead of `*mut PyObject`.
- `PyTuple::new` now takes an `IntoIterator` instead of a slice
- Updated to syn 0.15
- Splitted `PyTypeObject` into `PyTypeObject` without the create method and `PyTypeCreate` with requires `PyObjectAlloc<Self> + PyTypeInfo + Sized`.
- Ran `cargo edition --fix` which prefixed path with `crate::` for rust 2018
- Renamed `async` to `pyasync` as async will be a keyword in the 2018 edition.
- Starting to use `NonNull<*mut PyObject>` for Py and PyObject by ijl [#260](https://github.com/PyO3/pyo3/pull/260)
### Removed
- Removed most entries from the prelude. The new prelude is small and clear.
- Slowly removing specialization uses
- `PyString`, `PyUnicode`, and `PyBytes` no longer have a `data` method
(replaced by `as_bytes`) and `PyStringData` has been removed.
- The pyobject_extract macro
### Fixed
- Added an explanation that the GIL can temporarily be released even while holding a GILGuard.
- Lots of clippy errors
- Fix segfault on calling an unknown method on a PyObject
- Work around a [bug](https://github.com/rust-lang/rust/issues/55380) in the rust compiler by kngwyu [#252](https://github.com/PyO3/pyo3/pull/252)
- Fixed a segfault with subclassing pyo3 create classes and using `__class__` by kngwyu [#263](https://github.com/PyO3/pyo3/pull/263)
## [0.4.1] - 2018-08-20
### Changed
- PyTryFrom's error is always to `PyDowncastError`
### Fixed
- Fixed compilation on nightly since `use_extern_macros` was stabilized
### Removed
- The pyobject_downcast macro
## [0.4.0] - 2018-07-30
### Changed
- Merged both examples into one
- Rustfmt all the things :heavy_check_mark:
- Switched to [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
### Removed
- Conversions from tuples to PyDict due to [rust-lang/rust#52050](https://github.com/rust-lang/rust/issues/52050)
## [0.3.2] - 2018-07-22
### Changed
- Replaced `concat_idents` with mashup
## [0.3.1] - 2018-07-18
### Fixed
- Fixed scoping bug in pyobject_native_type that would break rust-numpy
## [0.3.0] - 2018-07-18
### Added
- A few internal macros became part of the public api ([#155](https://github.com/PyO3/pyo3/pull/155), [#186](https://github.com/PyO3/pyo3/pull/186))
- Always clone in getters. This allows using the get-annotation on all Clone-Types
### Changed
- Upgraded to syn 0.14 which means much better error messages :tada:
- 128 bit integer support by [kngwyu](https://github.com/kngwyu) ([#137](https://github.com/PyO3/pyo3/pull/173))
- `proc_macro` has been stabilized on nightly ([rust-lang/rust#52081](https://github.com/rust-lang/rust/pull/52081)). This means that we can remove the `proc_macro` feature, but now we need the `use_extern_macros` from the 2018 edition instead.
- All proc macro are now prefixed with `py` and live in the prelude. This means you can use `#[pyclass]`, `#[pymethods]`, `#[pyproto]`, `#[pyfunction]` and `#[pymodinit]` directly, at least after a `use pyo3::prelude::*`. They were also moved into a module called `proc_macro`. You shouldn't use `#[pyo3::proc_macro::pyclass]` or other longer paths in attributes because `proc_macro_path_invoc` isn't going to be stabilized soon.
- Renamed the `base` option in the `pyclass` macro to `extends`.
- `#[pymodinit]` uses the function name as module name, unless the name is overrriden with `#[pymodinit(name)]`
- The guide is now properly versioned.
## [0.2.7] - 2018-05-18
### Fixed
- Fix nightly breakage with proc_macro_path
## [0.2.6] - 2018-04-03
### Fixed
- Fix compatibility with TryFrom trait #137
## [0.2.5] - 2018-02-21
### Added
- CPython 3.7 support
### Fixed
- Embedded CPython 3.7b1 crashes on initialization #110
- Generated extension functions are weakly typed #108
- call_method\* crashes when the method does not exist #113
- Allow importing exceptions from nested modules #116
## [0.2.4] - 2018-01-19
### Added
- Allow to get mutable ref from PyObject #106
- Drop `RefFromPyObject` trait
- Add Python::register_any method
### Fixed
- Fix impl `FromPyObject` for `Py<T>`
- Mark method that work with raw pointers as unsafe #95
## [0.2.3] - 11-27-2017
### Changed
- Rustup to 1.23.0-nightly 2017-11-07
### Fixed
- Proper `c_char` usage #93
### Removed
- Remove use of now unneeded 'AsciiExt' trait
## [0.2.2] - 09-26-2017
### Changed
- Rustup to 1.22.0-nightly 2017-09-30
## [0.2.1] - 09-26-2017
### Fixed
- Fix rustc const_fn nightly breakage
## [0.2.0] - 08-12-2017
### Added
- Added inheritance support #15
- Added weakref support #56
- Added subclass support #64
- Added `self.__dict__` supoort #68
- Added `pyo3::prelude` module #70
- Better `Iterator` support for PyTuple, PyList, PyDict #75
- Introduce IntoPyDictPointer similar to IntoPyTuple #69
### Changed
- Allow to add gc support without implementing PyGCProtocol #57
- Refactor `PyErr` implementation. Drop `py` parameter from constructor.
## [0.1.0] - 07-23-2017
### Added
- Initial release
[Unreleased]: https://github.com/pyo3/pyo3/compare/v0.22.5...HEAD
[0.22.5]: https://github.com/pyo3/pyo3/compare/v0.22.4...v0.22.5
[0.22.4]: https://github.com/pyo3/pyo3/compare/v0.22.3...v0.22.4
[0.22.3]: https://github.com/pyo3/pyo3/compare/v0.22.2...v0.22.3
[0.22.2]: https://github.com/pyo3/pyo3/compare/v0.22.1...v0.22.2
[0.22.1]: https://github.com/pyo3/pyo3/compare/v0.22.0...v0.22.1
[0.22.0]: https://github.com/pyo3/pyo3/compare/v0.21.2...v0.22.0
[0.21.2]: https://github.com/pyo3/pyo3/compare/v0.21.1...v0.21.2
[0.21.1]: https://github.com/pyo3/pyo3/compare/v0.21.0...v0.21.1
[0.21.0]: https://github.com/pyo3/pyo3/compare/v0.20.3...v0.21.0
[0.21.0-beta.0]: https://github.com/pyo3/pyo3/compare/v0.20.3...v0.21.0-beta.0
[0.20.3]: https://github.com/pyo3/pyo3/compare/v0.20.2...v0.20.3
[0.20.2]: https://github.com/pyo3/pyo3/compare/v0.20.1...v0.20.2
[0.20.1]: https://github.com/pyo3/pyo3/compare/v0.20.0...v0.20.1
[0.20.0]: https://github.com/pyo3/pyo3/compare/v0.19.2...v0.20.0
[0.19.2]: https://github.com/pyo3/pyo3/compare/v0.19.1...v0.19.2
[0.19.1]: https://github.com/pyo3/pyo3/compare/v0.19.0...v0.19.1
[0.19.0]: https://github.com/pyo3/pyo3/compare/v0.18.3...v0.19.0
[0.18.3]: https://github.com/pyo3/pyo3/compare/v0.18.2...v0.18.3
[0.18.2]: https://github.com/pyo3/pyo3/compare/v0.18.1...v0.18.2
[0.18.1]: https://github.com/pyo3/pyo3/compare/v0.18.0...v0.18.1
[0.18.0]: https://github.com/pyo3/pyo3/compare/v0.17.3...v0.18.0
[0.17.3]: https://github.com/pyo3/pyo3/compare/v0.17.2...v0.17.3
[0.17.2]: https://github.com/pyo3/pyo3/compare/v0.17.1...v0.17.2
[0.17.1]: https://github.com/pyo3/pyo3/compare/v0.17.0...v0.17.1
[0.17.0]: https://github.com/pyo3/pyo3/compare/v0.16.6...v0.17.0
[0.16.6]: https://github.com/pyo3/pyo3/compare/v0.16.5...v0.16.6
[0.16.5]: https://github.com/pyo3/pyo3/compare/v0.16.4...v0.16.5
[0.16.4]: https://github.com/pyo3/pyo3/compare/v0.16.3...v0.16.4
[0.16.3]: https://github.com/pyo3/pyo3/compare/v0.16.2...v0.16.3
[0.16.2]: https://github.com/pyo3/pyo3/compare/v0.16.1...v0.16.2
[0.16.1]: https://github.com/pyo3/pyo3/compare/v0.16.0...v0.16.1
[0.16.0]: https://github.com/pyo3/pyo3/compare/v0.15.1...v0.16.0
[0.15.2]: https://github.com/pyo3/pyo3/compare/v0.15.1...v0.15.2
[0.15.1]: https://github.com/pyo3/pyo3/compare/v0.15.0...v0.15.1
[0.15.0]: https://github.com/pyo3/pyo3/compare/v0.14.5...v0.15.0
[0.14.5]: https://github.com/pyo3/pyo3/compare/v0.14.4...v0.14.5
[0.14.4]: https://github.com/pyo3/pyo3/compare/v0.14.3...v0.14.4
[0.14.3]: https://github.com/pyo3/pyo3/compare/v0.14.2...v0.14.3
[0.14.2]: https://github.com/pyo3/pyo3/compare/v0.14.1...v0.14.2
[0.14.1]: https://github.com/pyo3/pyo3/compare/v0.14.0...v0.14.1
[0.14.0]: https://github.com/pyo3/pyo3/compare/v0.13.2...v0.14.0
[0.13.2]: https://github.com/pyo3/pyo3/compare/v0.13.1...v0.13.2
[0.13.1]: https://github.com/pyo3/pyo3/compare/v0.13.0...v0.13.1
[0.13.0]: https://github.com/pyo3/pyo3/compare/v0.12.4...v0.13.0
[0.12.4]: https://github.com/pyo3/pyo3/compare/v0.12.3...v0.12.4
[0.12.3]: https://github.com/pyo3/pyo3/compare/v0.12.2...v0.12.3
[0.12.2]: https://github.com/pyo3/pyo3/compare/v0.12.1...v0.12.2
[0.12.1]: https://github.com/pyo3/pyo3/compare/v0.12.0...v0.12.1
[0.12.0]: https://github.com/pyo3/pyo3/compare/v0.11.1...v0.12.0
[0.11.1]: https://github.com/pyo3/pyo3/compare/v0.11.0...v0.11.1
[0.11.0]: https://github.com/pyo3/pyo3/compare/v0.10.1...v0.11.0
[0.10.1]: https://github.com/pyo3/pyo3/compare/v0.10.0...v0.10.1
[0.10.0]: https://github.com/pyo3/pyo3/compare/v0.9.2...v0.10.0
[0.9.2]: https://github.com/pyo3/pyo3/compare/v0.9.1...v0.9.2
[0.9.1]: https://github.com/pyo3/pyo3/compare/v0.9.0...v0.9.1
[0.9.0]: https://github.com/pyo3/pyo3/compare/v0.8.5...v0.9.0
[0.8.5]: https://github.com/pyo3/pyo3/compare/v0.8.4...v0.8.5
[0.8.4]: https://github.com/pyo3/pyo3/compare/v0.8.3...v0.8.4
[0.8.3]: https://github.com/pyo3/pyo3/compare/v0.8.2...v0.8.3
[0.8.2]: https://github.com/pyo3/pyo3/compare/v0.8.1...v0.8.2
[0.8.1]: https://github.com/pyo3/pyo3/compare/v0.8.0...v0.8.1
[0.8.0]: https://github.com/pyo3/pyo3/compare/v0.7.0...v0.8.0
[0.7.0]: https://github.com/pyo3/pyo3/compare/v0.6.0...v0.7.0
[0.6.0]: https://github.com/pyo3/pyo3/compare/v0.5.3...v0.6.0
[0.5.3]: https://github.com/pyo3/pyo3/compare/v0.5.2...v0.5.3
[0.5.2]: https://github.com/pyo3/pyo3/compare/v0.5.1...v0.5.2
[0.5.1]: https://github.com/pyo3/pyo3/compare/v0.5.0...v0.5.1
[0.5.0]: https://github.com/pyo3/pyo3/compare/v0.4.1...v0.5.0
[0.4.1]: https://github.com/pyo3/pyo3/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/pyo3/pyo3/compare/v0.3.2...v0.4.0
[0.3.2]: https://github.com/pyo3/pyo3/compare/v0.3.1...v0.3.2
[0.3.1]: https://github.com/pyo3/pyo3/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/pyo3/pyo3/compare/v0.2.7...v0.3.0
[0.2.7]: https://github.com/pyo3/pyo3/compare/v0.2.6...v0.2.7
[0.2.6]: https://github.com/pyo3/pyo3/compare/v0.2.5...v0.2.6
[0.2.5]: https://github.com/pyo3/pyo3/compare/v0.2.4...v0.2.5
[0.2.4]: https://github.com/pyo3/pyo3/compare/v0.2.3...v0.2.4
[0.2.3]: https://github.com/pyo3/pyo3/compare/v0.2.2...v0.2.3
[0.2.2]: https://github.com/pyo3/pyo3/compare/v0.2.1...v0.2.2
[0.2.1]: https://github.com/pyo3/pyo3/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/pyo3/pyo3/compare/v0.1.0...v0.2.0
[0.1.0]: https://github.com/PyO3/pyo3/tree/0.1.0
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/Contributing.md | # Contributing
Thank you for your interest in contributing to PyO3! All are welcome - please consider reading our [Code of Conduct](https://github.com/PyO3/pyo3/blob/main/Code-of-Conduct.md) to keep our community positive and inclusive.
If you are searching for ideas how to contribute, proceed to the ["Getting started contributing"](#getting-started-contributing) section. If you have found a specific issue to contribute to and need information about the development process, you may find the section ["Writing pull requests"](#writing-pull-requests) helpful.
If you want to become familiar with the codebase, see
[Architecture.md](https://github.com/PyO3/pyo3/blob/main/Architecture.md).
## Getting started contributing
Please join in with any part of PyO3 which interests you. We use GitHub issues to record all bugs and ideas. Feel free to request an issue to be assigned to you if you want to work on it.
You can browse the API of the non-public parts of PyO3 [here](https://pyo3.netlify.app/internal/doc/pyo3/index.html).
The following sections also contain specific ideas on where to start contributing to PyO3.
## Setting up a development environment
To work and develop PyO3, you need Python & Rust installed on your system.
* We encourage the use of [rustup](https://rustup.rs/) to be able to select and choose specific toolchains based on the project.
* [Pyenv](https://github.com/pyenv/pyenv) is also highly recommended for being able to choose a specific Python version.
* [virtualenv](https://virtualenv.pypa.io/en/latest/) can also be used with or without Pyenv to use specific installed Python versions.
* [`nox`][nox] is used to automate many of our CI tasks.
### Help users identify bugs
The [PyO3 Discord server](https://discord.gg/33kcChzH7f) is very active with users who are new to PyO3, and often completely new to Rust. Helping them debug is a great way to get experience with the PyO3 codebase.
Helping others often reveals bugs, documentation weaknesses, and missing APIs. It's a good idea to open GitHub issues for these immediately so the resolution can be designed and implemented!
### Implement issues ready for development
Issues where the solution is clear and work is not in progress use the [needs-implementer](https://github.com/PyO3/pyo3/issues?q=is%3Aissue+is%3Aopen+label%3Aneeds-implementer) label.
Don't be afraid if the solution is not clear to you! The core PyO3 contributors will be happy to mentor you through any questions you have to help you write the solution.
### Help write great docs
PyO3 has a user guide (using mdbook) as well as the usual Rust API docs. The aim is for both of these to be detailed, easy to understand, and up-to-date. Pull requests are always welcome to fix typos, change wording, add examples, etc.
There are some specific areas of focus where help is currently needed for the documentation:
- Issues requesting documentation improvements are tracked with the [documentation](https://github.com/PyO3/pyo3/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation) label.
- Not all APIs had docs or examples when they were made. The goal is to have documentation on all PyO3 APIs ([#306](https://github.com/PyO3/pyo3/issues/306)). If you see an API lacking a doc, please write one and open a PR!
To build the docs (including all features), install [`nox`][nox] and then run
```shell
nox -s docs -- open
```
#### Doctests
We use lots of code blocks in our docs. Run `cargo test --doc` when making changes to check that
the doctests still work, or `cargo test` to run all the Rust tests including doctests. See
https://doc.rust-lang.org/rustdoc/documentation-tests.html for a guide on doctests.
#### Building the guide
You can preview the user guide by building it locally with `mdbook`.
First, install [`mdbook`][mdbook] and [`nox`][nox]. Then, run
```shell
nox -s build-guide -- --open
```
To check all links in the guide are valid, also install [`lychee`][lychee] and use the `check-guide` session instead:
```shell
nox -s check-guide
```
### Help design the next PyO3
Issues which don't yet have a clear solution use the [needs-design](https://github.com/PyO3/pyo3/issues?q=is%3Aissue+is%3Aopen+label%3Aneeds-design) label.
If any of these issues interest you, please join in with the conversation on the issue! All opinions are valued, and if you're interested in going further with e.g. draft PRs to experiment with API designs, even better!
### Review pull requests
Everybody is welcome to submit comments on open PRs. Please help ensure new PyO3 APIs are safe, performant, tidy, and easy to use!
## Writing pull requests
Here are a few things to note when you are writing PRs.
### Testing and Continuous Integration
The PyO3 repo uses GitHub Actions. PRs are blocked from merging if CI is not successful. Formatting, linting and tests are checked for all Rust and Python code. In addition, all warnings in Rust code are disallowed (using `RUSTFLAGS="-D warnings"`).
Tests run with all supported Python versions with the latest stable Rust compiler, as well as for Python 3.9 with the minimum supported Rust version.
If you are adding a new feature, you should add it to the `full` feature in our *Cargo.toml** so that it is tested in CI.
You can run these checks yourself with `nox`. Use `nox -l` to list the full set of subcommands you can run.
#### Linting Python code
`nox -s ruff`
#### Linting Rust code
`nox -s rustfmt`
#### Semver checks
`cargo semver-checks check-release`
#### Clippy
`nox -s clippy-all`
#### Tests
`nox -s test` or `cargo test` for Rust tests only, `nox -f pytests/noxfile.py -s test` for Python tests only
#### Check all conditional compilation
`nox -s check-feature-powerset`
#### UI Tests
PyO3 uses [`trybuild`](https://github.com/dtolnay/trybuild) to develop UI tests to capture error messages from the Rust compiler for some of the macro functionality.
Because there are several feature combinations for these UI tests, when updating them all (e.g. for a new Rust compiler version) it may be helpful to use the `update-ui-tests` nox session:
```bash
nox -s update-ui-tests
```
### Documenting changes
We use [towncrier](https://towncrier.readthedocs.io/en/stable/index.html) to generate a CHANGELOG for each release.
To include your changes in the release notes, you should create one (or more) news items in the `newsfragments` directory. Valid news items should be saved as `<PR>.<CATEGORY>.md` where `<PR>` is the pull request number and `<CATEGORY>` is one of the following:
- `packaging` - for dependency changes and Python / Rust version compatibility changes
- `added` - for new features
- `changed` - for features which already existed but have been altered or deprecated
- `removed` - for features which have been removed
- `fixed` - for "changed" features which were classed as a bugfix
Docs-only PRs do not need news items; start your PR title with `docs:` to skip the check.
### Style guide
#### Generic code
PyO3 has a lot of generic APIs to increase usability. These can come at the cost of generic code bloat. Where reasonable, try to implement a concrete sub-portion of generic functions. There are two forms of this:
- If the concrete sub-portion doesn't benefit from re-use by other functions, name it `inner` and keep it as a local to the function.
- If the concrete sub-portion is re-used by other functions, preferably name it `_foo` and place it directly below `foo` in the source code (where `foo` is the original generic function).
#### FFI calls
PyO3 makes a lot of FFI calls to Python's C API using raw pointers. Where possible try to avoid using pointers-to-temporaries in expressions:
```rust
// dangerous
pyo3::ffi::Something(name.to_object(py).as_ptr());
// because the following refactoring is a use-after-free error:
let name = name.to_object(py).as_ptr();
pyo3::ffi::Something(name)
```
Instead, prefer to bind the safe owned `PyObject` wrapper before passing to ffi functions:
```rust
let name: PyObject = name.to_object(py);
pyo3::ffi::Something(name.as_ptr())
// name will automatically be freed when it falls out of scope
```
## Python and Rust version support policy
PyO3 aims to keep sufficient compatibility to make packaging Python extensions built with PyO3 feasible on most common package managers.
To keep package maintainers' lives simpler, PyO3 will commit, wherever possible, to only adjust minimum supported Rust and Python versions at the same time. This bump will only come in an `0.x` release, roughly once per year, after the oldest supported Python version reaches its end-of-life. (Check https://endoflife.date/python for a clear timetable on these.)
Below are guidelines on what compatibility all PRs are expected to deliver for each language.
### Python
PyO3 supports all officially supported Python versions, as well as the latest PyPy3 release. All of these versions are tested in CI.
### Rust
PyO3 aims to make use of up-to-date Rust language features to keep the implementation as efficient as possible.
The minimum Rust version supported will be decided when the release which bumps Python and Rust versions is made. At the time, the minimum Rust version will be set no higher than the lowest Rust version shipped in the current Debian, RHEL and Alpine Linux distributions.
CI tests both the most recent stable Rust version and the minimum supported Rust version. Because of Rust's stability guarantees this is sufficient to confirm support for all Rust versions in between.
## Benchmarking
PyO3 has two sets of benchmarks for evaluating some aspects of its performance. The benchmark suite is currently very small - please open PRs with new benchmarks if you're interested in helping to expand it!
First, there are Rust-based benchmarks located in the `pyo3-benches` subdirectory. You can run these benchmarks with:
nox -s bench
Second, there is a Python-based benchmark contained in the `pytests` subdirectory. You can read more about it [here](https://github.com/PyO3/pyo3/tree/main/pytests).
## Code coverage
You can view what code is and isn't covered by PyO3's tests. We aim to have 100% coverage - please check coverage and add tests if you notice a lack of coverage!
- First, ensure the llvm-cov cargo plugin is installed. You may need to run the plugin through cargo once before using it with `nox`.
```shell
cargo install cargo-llvm-cov
cargo llvm-cov
```
- Then, generate an `lcov.info` file with
```shell
nox -s coverage -- lcov
```
You can install an IDE plugin to view the coverage. For example, if you use VSCode:
- Add the [coverage-gutters](https://marketplace.visualstudio.com/items?itemName=ryanluker.vscode-coverage-gutters) plugin.
- Add these settings to VSCode's `settings.json`:
```json
{
"coverage-gutters.coverageFileNames": [
"lcov.info",
"cov.xml",
"coverage.xml",
],
"coverage-gutters.showLineCoverage": true
}
```
- You should now be able to see green highlights for code that is tested, and red highlights for code that is not tested.
## Sponsor this project
At the moment there is no official organisation that accepts sponsorship on PyO3's behalf. If you're seeking to provide significant funding to the PyO3 ecosystem, please reach out to us on [GitHub](https://github.com/PyO3/pyo3/issues/new) or [Discord](https://discord.gg/33kcChzH7f) and we can discuss.
In the meanwhile, some of our maintainers have personal GitHub sponsorship pages and would be grateful for your support:
- [davidhewitt](https://github.com/sponsors/davidhewitt)
- [messense](https://github.com/sponsors/messense)
[mdbook]: https://rust-lang.github.io/mdBook/cli/index.html
[lychee]: https://github.com/lycheeverse/lychee
[nox]: https://github.com/theacodes/nox
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/CITATION.cff | cff-version: 1.2.0
title: PyO3
message: >-
If you use this software as part of a publication and wish to cite
it, please use the metadata from this file.
type: software
authors:
- name: PyO3 Project and Contributors
website: https://github.com/PyO3
license:
- Apache-2.0
- MIT
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/build.rs | use std::env;
use pyo3_build_config::pyo3_build_script_impl::{cargo_env_var, errors::Result};
use pyo3_build_config::{bail, print_feature_cfgs, InterpreterConfig};
fn ensure_auto_initialize_ok(interpreter_config: &InterpreterConfig) -> Result<()> {
if cargo_env_var("CARGO_FEATURE_AUTO_INITIALIZE").is_some() && !interpreter_config.shared {
bail!(
"The `auto-initialize` feature is enabled, but your python installation only supports \
embedding the Python interpreter statically. If you are attempting to run tests, or a \
binary which is okay to link dynamically, install a Python distribution which ships \
with the Python shared library.\n\
\n\
Embedding the Python interpreter statically does not yet have first-class support in \
PyO3. If you are sure you intend to do this, disable the `auto-initialize` feature.\n\
\n\
For more information, see \
https://pyo3.rs/v{pyo3_version}/\
building-and-distribution.html#embedding-python-in-rust",
pyo3_version = env::var("CARGO_PKG_VERSION").unwrap()
);
}
Ok(())
}
/// Prepares the PyO3 crate for compilation.
///
/// This loads the config from pyo3-build-config and then makes some additional checks to improve UX
/// for users.
///
/// Emits the cargo configuration based on this config as well as a few checks of the Rust compiler
/// version to enable features which aren't supported on MSRV.
fn configure_pyo3() -> Result<()> {
let interpreter_config = pyo3_build_config::get();
ensure_auto_initialize_ok(interpreter_config)?;
for cfg in interpreter_config.build_script_outputs() {
println!("{}", cfg)
}
// Emit cfgs like `invalid_from_utf8_lint`
print_feature_cfgs();
Ok(())
}
fn main() {
pyo3_build_config::print_expected_cfgs();
if let Err(e) = configure_pyo3() {
eprintln!("error: {}", e.report());
std::process::exit(1)
}
}
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/README.md | # PyO3
[](https://github.com/PyO3/pyo3/actions)
[](https://codspeed.io/PyO3/pyo3)
[](https://codecov.io/gh/PyO3/pyo3)
[](https://crates.io/crates/pyo3)
[](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
[](https://discord.gg/33kcChzH7f)
[](https://github.com/PyO3/pyo3/blob/main/Contributing.md)
[Rust](https://www.rust-lang.org/) bindings for [Python](https://www.python.org/), including tools for creating native Python extension modules. Running and interacting with Python code from a Rust binary is also supported.
- User Guide: [stable](https://pyo3.rs) | [main](https://pyo3.rs/main)
- API Documentation: [stable](https://docs.rs/pyo3/) | [main](https://pyo3.rs/main/doc)
## Usage
Requires Rust 1.63 or greater.
PyO3 supports the following Python distributions:
- CPython 3.7 or greater
- PyPy 7.3 (Python 3.9+)
- GraalPy 24.0 or greater (Python 3.10+)
You can use PyO3 to write a native Python module in Rust, or to embed Python in a Rust binary. The following sections explain each of these in turn.
### Using Rust from Python
PyO3 can be used to generate a native Python module. The easiest way to try this out for the first time is to use [`maturin`](https://github.com/PyO3/maturin). `maturin` is a tool for building and publishing Rust-based Python packages with minimal configuration. The following steps install `maturin`, use it to generate and build a new Python package, and then launch Python to import and execute a function from the package.
First, follow the commands below to create a new directory containing a new Python `virtualenv`, and install `maturin` into the virtualenv using Python's package manager, `pip`:
```bash
# (replace string_sum with the desired package name)
$ mkdir string_sum
$ cd string_sum
$ python -m venv .env
$ source .env/bin/activate
$ pip install maturin
```
Still inside this `string_sum` directory, now run `maturin init`. This will generate the new package source. When given the choice of bindings to use, select pyo3 bindings:
```bash
$ maturin init
✔ 🤷 What kind of bindings to use? · pyo3
✨ Done! New project created string_sum
```
The most important files generated by this command are `Cargo.toml` and `lib.rs`, which will look roughly like the following:
**`Cargo.toml`**
```toml
[package]
name = "string_sum"
version = "0.1.0"
edition = "2021"
[lib]
# The name of the native library. This is the name which will be used in Python to import the
# library (i.e. `import string_sum`). If you change this, you must also change the name of the
# `#[pymodule]` in `src/lib.rs`.
name = "string_sum"
# "cdylib" is necessary to produce a shared library for Python to import from.
#
# Downstream Rust code (including code in `bin/`, `examples/`, and `tests/`) will not be able
# to `use string_sum;` unless the "rlib" or "lib" crate type is also included, e.g.:
# crate-type = ["cdylib", "rlib"]
crate-type = ["cdylib"]
[dependencies]
pyo3 = { version = "0.22.5", features = ["extension-module"] }
```
**`src/lib.rs`**
```rust
use pyo3::prelude::*;
/// Formats the sum of two numbers as string.
#[pyfunction]
fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
Ok((a + b).to_string())
}
/// A Python module implemented in Rust. The name of this function must match
/// the `lib.name` setting in the `Cargo.toml`, else Python will not be able to
/// import the module.
#[pymodule]
fn string_sum(m: &Bound<'_, PyModule>) -> PyResult<()> {
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
Ok(())
}
```
Finally, run `maturin develop`. This will build the package and install it into the Python virtualenv previously created and activated. The package is then ready to be used from `python`:
```bash
$ maturin develop
# lots of progress output as maturin runs the compilation...
$ python
>>> import string_sum
>>> string_sum.sum_as_string(5, 20)
'25'
```
To make changes to the package, just edit the Rust source code and then re-run `maturin develop` to recompile.
To run this all as a single copy-and-paste, use the bash script below (replace `string_sum` in the first command with the desired package name):
```bash
mkdir string_sum && cd "$_"
python -m venv .env
source .env/bin/activate
pip install maturin
maturin init --bindings pyo3
maturin develop
```
If you want to be able to run `cargo test` or use this project in a Cargo workspace and are running into linker issues, there are some workarounds in [the FAQ](https://pyo3.rs/latest/faq.html#i-cant-run-cargo-test-or-i-cant-build-in-a-cargo-workspace-im-having-linker-issues-like-symbol-not-found-or-undefined-reference-to-_pyexc_systemerror).
As well as with `maturin`, it is possible to build using [`setuptools-rust`](https://github.com/PyO3/setuptools-rust) or [manually](https://pyo3.rs/latest/building-and-distribution.html#manual-builds). Both offer more flexibility than `maturin` but require more configuration to get started.
### Using Python from Rust
To embed Python into a Rust binary, you need to ensure that your Python installation contains a shared library. The following steps demonstrate how to ensure this (for Ubuntu), and then give some example code which runs an embedded Python interpreter.
To install the Python shared library on Ubuntu:
```bash
sudo apt install python3-dev
```
To install the Python shared library on RPM based distributions (e.g. Fedora, Red Hat, SuSE), install the `python3-devel` package.
Start a new project with `cargo new` and add `pyo3` to the `Cargo.toml` like this:
```toml
[dependencies.pyo3]
version = "0.22.5"
features = ["auto-initialize"]
```
Example program displaying the value of `sys.version` and the current user name:
```rust
use pyo3::prelude::*;
use pyo3::types::IntoPyDict;
use pyo3::ffi::c_str;
fn main() -> PyResult<()> {
Python::with_gil(|py| {
let sys = py.import("sys")?;
let version: String = sys.getattr("version")?.extract()?;
let locals = [("os", py.import("os")?)].into_py_dict(py)?;
let code = c_str!("os.getenv('USER') or os.getenv('USERNAME') or 'Unknown'");
let user: String = py.eval(code, None, Some(&locals))?.extract()?;
println!("Hello {}, I'm Python {}", user, version);
Ok(())
})
}
```
The guide has [a section](https://pyo3.rs/latest/python-from-rust.html) with lots of examples
about this topic.
## Tools and libraries
- [maturin](https://github.com/PyO3/maturin) _Build and publish crates with pyo3, rust-cpython or cffi bindings as well as rust binaries as python packages_
- [setuptools-rust](https://github.com/PyO3/setuptools-rust) _Setuptools plugin for Rust support_.
- [pyo3-built](https://github.com/PyO3/pyo3-built) _Simple macro to expose metadata obtained with the [`built`](https://crates.io/crates/built) crate as a [`PyDict`](https://docs.rs/pyo3/*/pyo3/types/struct.PyDict.html)_
- [rust-numpy](https://github.com/PyO3/rust-numpy) _Rust binding of NumPy C-API_
- [dict-derive](https://github.com/gperinazzo/dict-derive) _Derive FromPyObject to automatically transform Python dicts into Rust structs_
- [pyo3-log](https://github.com/vorner/pyo3-log) _Bridge from Rust to Python logging_
- [pythonize](https://github.com/davidhewitt/pythonize) _Serde serializer for converting Rust objects to JSON-compatible Python objects_
- [pyo3-asyncio](https://github.com/awestlake87/pyo3-asyncio) _Utilities for working with Python's Asyncio library and async functions_
- [rustimport](https://github.com/mityax/rustimport) _Directly import Rust files or crates from Python, without manual compilation step. Provides pyo3 integration by default and generates pyo3 binding code automatically._
- [pyo3-arrow](https://crates.io/crates/pyo3-arrow) _Lightweight [Apache Arrow](https://arrow.apache.org/) integration for pyo3._
## Examples
- [autopy](https://github.com/autopilot-rs/autopy) _A simple, cross-platform GUI automation library for Python and Rust._
- Contains an example of building wheels on TravisCI and appveyor using [cibuildwheel](https://github.com/pypa/cibuildwheel)
- [ballista-python](https://github.com/apache/arrow-ballista-python) _A Python library that binds to Apache Arrow distributed query engine Ballista._
- [bed-reader](https://github.com/fastlmm/bed-reader) _Read and write the PLINK BED format, simply and efficiently._
- Shows Rayon/ndarray::parallel (including capturing errors, controlling thread num), Python types to Rust generics, Github Actions
- [cellular_raza](https://cellular-raza.com) _A cellular agent-based simulation framework for building complex models from a clean slate._
- [connector-x](https://github.com/sfu-db/connector-x) _Fastest library to load data from DB to DataFrames in Rust and Python._
- [cryptography](https://github.com/pyca/cryptography/tree/main/src/rust) _Python cryptography library with some functionality in Rust._
- [css-inline](https://github.com/Stranger6667/css-inline/tree/master/bindings/python) _CSS inlining for Python implemented in Rust._
- [datafusion-python](https://github.com/apache/arrow-datafusion-python) _A Python library that binds to Apache Arrow in-memory query engine DataFusion._
- [deltalake-python](https://github.com/delta-io/delta-rs/tree/main/python) _Native Delta Lake Python binding based on delta-rs with Pandas integration._
- [fastbloom](https://github.com/yankun1992/fastbloom) _A fast [bloom filter](https://github.com/yankun1992/fastbloom#BloomFilter) | [counting bloom filter](https://github.com/yankun1992/fastbloom#countingbloomfilter) implemented by Rust for Rust and Python!_
- [fastuuid](https://github.com/thedrow/fastuuid/) _Python bindings to Rust's UUID library._
- [feos](https://github.com/feos-org/feos) _Lightning fast thermodynamic modeling in Rust with fully developed Python interface._
- [forust](https://github.com/jinlow/forust) _A lightweight gradient boosted decision tree library written in Rust._
- [granian](https://github.com/emmett-framework/granian) _A Rust HTTP server for Python applications._
- [greptimedb](https://github.com/GreptimeTeam/greptimedb/tree/main/src/script) _Support [Python scripting](https://docs.greptime.com/user-guide/python-scripts/overview) in the database_
- [haem](https://github.com/BooleanCat/haem) _A Python library for working on Bioinformatics problems._
- [html2text-rs](https://github.com/deedy5/html2text_rs) _Python library for converting HTML to markup or plain text._
- [html-py-ever](https://github.com/PyO3/setuptools-rust/tree/main/examples/html-py-ever) _Using [html5ever](https://github.com/servo/html5ever) through [kuchiki](https://github.com/kuchiki-rs/kuchiki) to speed up html parsing and css-selecting._
- [hyperjson](https://github.com/mre/hyperjson) _A hyper-fast Python module for reading/writing JSON data using Rust's serde-json._
- [inline-python](https://github.com/fusion-engineering/inline-python) _Inline Python code directly in your Rust code._
- [johnnycanencrypt](https://github.com/kushaldas/johnnycanencrypt) OpenPGP library with Yubikey support.
- [jsonschema](https://github.com/Stranger6667/jsonschema/tree/master/crates/jsonschema-py) _A high-performance JSON Schema validator for Python._
- [mocpy](https://github.com/cds-astro/mocpy) _Astronomical Python library offering data structures for describing any arbitrary coverage regions on the unit sphere._
- [opendal](https://github.com/apache/opendal/tree/main/bindings/python) _A data access layer that allows users to easily and efficiently retrieve data from various storage services in a unified way._
- [orjson](https://github.com/ijl/orjson) _Fast Python JSON library._
- [ormsgpack](https://github.com/aviramha/ormsgpack) _Fast Python msgpack library._
- [point-process](https://github.com/ManifoldFR/point-process-rust/tree/master/pylib) _High level API for pointprocesses as a Python library._
- [polaroid](https://github.com/daggy1234/polaroid) _Hyper Fast and safe image manipulation library for Python written in Rust._
- [polars](https://github.com/pola-rs/polars) _Fast multi-threaded DataFrame library in Rust | Python | Node.js._
- [pydantic-core](https://github.com/pydantic/pydantic-core) _Core validation logic for pydantic written in Rust._
- [pyheck](https://github.com/kevinheavey/pyheck) _Fast case conversion library, built by wrapping [heck](https://github.com/withoutboats/heck)._
- Quite easy to follow as there's not much code.
- [pyre](https://github.com/Project-Dream-Weaver/pyre-http) _Fast Python HTTP server written in Rust._
- [primp](https://github.com/deedy5/primp) _The fastest python HTTP client that can impersonate web browsers by mimicking their headers and TLS/JA3/JA4/HTTP2 fingerprints._
- [rateslib](https://github.com/attack68/rateslib) _A fixed income library for Python using Rust extensions._
- [ril-py](https://github.com/Cryptex-github/ril-py) _A performant and high-level image processing library for Python written in Rust._
- [river](https://github.com/online-ml/river) _Online machine learning in python, the computationally heavy statistics algorithms are implemented in Rust._
- [robyn](https://github.com/sparckles/Robyn) A Super Fast Async Python Web Framework with a Rust runtime.
- [rust-python-coverage](https://github.com/cjermain/rust-python-coverage) _Example PyO3 project with automated test coverage for Rust and Python._
- [sail](https://github.com/lakehq/sail) _Unifying stream, batch, and AI workloads with Apache Spark compatibility._
- [tiktoken](https://github.com/openai/tiktoken) _A fast BPE tokeniser for use with OpenAI's models._
- [tokenizers](https://github.com/huggingface/tokenizers/tree/main/bindings/python) _Python bindings to the Hugging Face tokenizers (NLP) written in Rust._
- [tzfpy](http://github.com/ringsaturn/tzfpy) _A fast package to convert longitude/latitude to timezone name._
- [utiles](https://github.com/jessekrubin/utiles) _Fast Python web-map tile utilities_
- [wasmer-python](https://github.com/wasmerio/wasmer-python) _Python library to run WebAssembly binaries._
## Articles and other media
- [(Video) PyO3: From Python to Rust and Back Again](https://www.youtube.com/watch?v=UmL_CA-v3O8) - Jul 3, 2024
- [Parsing Python ASTs 20x Faster with Rust](https://www.gauge.sh/blog/parsing-python-asts-20x-faster-with-rust) - Jun 17, 2024
- [(Video) How Python Harnesses Rust through PyO3](https://www.youtube.com/watch?v=UkZ_m3Wj2hA) - May 18, 2024
- [(Video) Combining Rust and Python: The Best of Both Worlds?](https://www.youtube.com/watch?v=lyG6AKzu4ew) - Mar 1, 2024
- [(Video) Extending Python with Rust using PyO3](https://www.youtube.com/watch?v=T45ZEmSR1-s) - Dec 16, 2023
- [A Week of PyO3 + rust-numpy (How to Speed Up Your Data Pipeline X Times)](https://terencezl.github.io/blog/2023/06/06/a-week-of-pyo3-rust-numpy/) - Jun 6, 2023
- [(Podcast) PyO3 with David Hewitt](https://rustacean-station.org/episode/david-hewitt/) - May 19, 2023
- [Making Python 100x faster with less than 100 lines of Rust](https://ohadravid.github.io/posts/2023-03-rusty-python/) - Mar 28, 2023
- [How Pydantic V2 leverages Rust's Superpowers](https://fosdem.org/2023/schedule/event/rust_how_pydantic_v2_leverages_rusts_superpowers/) - Feb 4, 2023
- [How we extended the River stats module with Rust using PyO3](https://boring-guy.sh/posts/river-rust/) - Dec 23, 2022
- [Nine Rules for Writing Python Extensions in Rust](https://towardsdatascience.com/nine-rules-for-writing-python-extensions-in-rust-d35ea3a4ec29?sk=f8d808d5f414154fdb811e4137011437) - Dec 31, 2021
- [Calling Rust from Python using PyO3](https://saidvandeklundert.net/learn/2021-11-18-calling-rust-from-python-using-pyo3/) - Nov 18, 2021
- [davidhewitt's 2021 talk at Rust Manchester meetup](https://www.youtube.com/watch?v=-XyWG_klSAw&t=320s) - Aug 19, 2021
- [Incrementally porting a small Python project to Rust](https://blog.waleedkhan.name/port-python-to-rust/) - Apr 29, 2021
- [Vortexa - Integrating Rust into Python](https://www.vortexa.com/insight/integrating-rust-into-python) - Apr 12, 2021
- [Writing and publishing a Python module in Rust](https://blog.yossarian.net/2020/08/02/Writing-and-publishing-a-python-module-in-rust) - Aug 2, 2020
## Contributing
Everyone is welcomed to contribute to PyO3! There are many ways to support the project, such as:
- help PyO3 users with issues on GitHub and [Discord](https://discord.gg/33kcChzH7f)
- improve documentation
- write features and bugfixes
- publish blogs and examples of how to use PyO3
Our [contributing notes](https://github.com/PyO3/pyo3/blob/main/Contributing.md) and [architecture guide](https://github.com/PyO3/pyo3/blob/main/Architecture.md) have more resources if you wish to volunteer time for PyO3 and are searching where to start.
If you don't have time to contribute yourself but still wish to support the project's future success, some of our maintainers have GitHub sponsorship pages:
- [davidhewitt](https://github.com/sponsors/davidhewitt)
- [messense](https://github.com/sponsors/messense)
## License
PyO3 is licensed under the [Apache-2.0 license](LICENSE-APACHE) or the [MIT license](LICENSE-MIT), at your option.
Python is licensed under the [Python License](https://docs.python.org/3/license.html).
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in PyO3 by you, as defined in the Apache License, shall be dual-licensed as above, without any additional terms or conditions.
<a href="https://www.netlify.com"> <img src="https://www.netlify.com/v3/img/components/netlify-color-accent.svg" alt="Deploys by Netlify" /> </a>
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/netlify.toml | [build]
publish = "netlify_build/"
command = ".netlify/build.sh"
[build.environment]
PYTHON_VERSION = "3.8"
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/LICENSE-MIT | Copyright (c) 2023-present PyO3 Project and Contributors. https://github.com/PyO3
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/codecov.yml | comment: off
coverage:
status:
project:
default:
target: auto
# Allow a tiny drop of overall project coverage in PR to reduce spurious failures.
threshold: 0.25%
ignore:
- tests/
- pytests/
- src/test_hygiene/*.rs
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/Releasing.md | # Releasing
This is notes for the current process of releasing a new PyO3 version. Replace `<version>` in all instructions below with the new version.
## 1. Prepare the release commit
Follow the process below to update all required pieces to bump the version. All these changes are done in a single commit because it makes it clear to git readers what happened to bump the version. It also makes it easy to cherry-pick the version bump onto the `main` branch when tidying up branch history at the end of the release process.
1. Replace all instances of the PyO3 current version and the with the new version to be released. Places to check:
- `Cargo.toml` for all PyO3 crates in the repository.
- Examples in `README.md`
- PyO3 version embedded into documentation like the README.
- `pre-script.rhai` templates for the examples.
- `[towncrier]` section in `pyproject.toml`.
Some of the above locations may already have the new version with a `-dev` suffix, which needs to be removed.
**Make sure not to modify the CHANGELOG during this step!**
2. Run `towncrier build` to generate the CHANGELOG. The version used by `towncrier` should automatically be correct because of the update to `pyproject.toml` in step 1.
3. Manually edit the CHANGELOG for final notes. Steps to do:
- Adjust wording of any release lines to make them clearer for users / fix typos.
- Add a new link at the bottom for the new version, and update the `Unreleased` link.
4. Create the commit containing all the above changes, with a message of `release: <version>`. Push to `release-<BRANCH_VER>` branch on the main PyO3 repository, where `<BRANCH_VER>` depends on whether this is a major or minor release:
- for O.X.0 minor releases, just use `0.X`, e.g. `release-0.17`. This will become the maintenance branch after release.
- for 0.X.Y patch releases, use the full `0.X.Y`, e.g. `release-0.17.1`. This will be deleted after merge.
## 2. Create the release PR and draft release notes
Open a PR for the branch, and confirm that it passes CI. For `0.X.0` minor releases, the PR should be merging into `main`, for `0.X.Y` patch releases, the PR should be merging the `release-0.X` maintenance branch.
On https://github.com/PyO3/pyo3/releases, click "Draft a new release". The tag will be a new tag of `v<version>` (note preceding `v`) and target should be the `release-<BRANCH_VER>` branch you just pushed.
Write release notes which match the style of previous releases. You can get the list of contributors by running `nox -s contributors -- v<prev-version> release-<BRANCH_VER>` to get contributors from the previous version tag through to the branch tip you just pushed. (This uses the GitHub API, so you'll need to push the branch first.)
Save as a draft and wait for now.
## 3. Leave for a cooling off period
Wait a couple of days in case anyone wants to hold up the release to add bugfixes etc.
## 4. Put live
To put live:
- 1. run `nox -s publish` to put live on crates.io
- 2. publish the release on Github
- 3. merge the release PR
## 5. Tidy the main branch
If the release PR targeted a branch other than main, you will need to cherry-pick the version bumps, CHANGELOG modifications and removal of towncrier `newsfragments` and open another PR to land these on main.
## 6. Delete the release branch (patch releases only)
For 0.X.Y patch releases, the release branch is no longer needed, so it should be deleted.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/pyproject.toml | [tool.ruff.lint.extend-per-file-ignores]
"__init__.py" = ["F403"]
[tool.towncrier]
filename = "CHANGELOG.md"
version = "0.23.0-dev"
start_string = "<!-- towncrier release notes start -->\n"
template = ".towncrier.template.md"
title_format = "## [{version}] - {project_date}"
issue_format = "[#{issue}](https://github.com/PyO3/pyo3/pull/{issue})" # Note PyO3 shows pulls, not issues, in the CHANGELOG
underlines = ["", "", ""]
[tool.towncrier.fragment.packaging]
name = "Packaging"
[tool.towncrier.fragment.added]
name = "Added"
[tool.towncrier.fragment.changed]
name = "Changed"
[tool.towncrier.fragment.removed]
name = "Removed"
[tool.towncrier.fragment.fixed]
name = "Fixed"
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/pyo3/Architecture.md | <!-- This file contains a rough overview of the PyO3 codebase. -->
<!-- Please do not make descriptions too specific, so that we can easily -->
<!-- keep this file in sync with the codebase. -->
# PyO3: Architecture
This document roughly describes the high-level architecture of PyO3.
If you want to become familiar with the codebase you are in the right place!
## Overview
PyO3 provides a bridge between Rust and Python, based on the [Python/C API].
Thus, PyO3 has low-level bindings of these API as its core.
On top of that, we have higher-level bindings to operate Python objects safely.
Also, to define Python classes and functions in Rust code, we have `trait PyClass` and a set of
protocol traits (e.g., `PyIterProtocol`) for supporting object protocols (i.e., `__dunder__` methods).
Since implementing `PyClass` requires lots of boilerplate, we have a proc-macro `#[pyclass]`.
To summarize, there are six main parts to the PyO3 codebase.
1. [Low-level bindings of Python/C API.](#1-low-level-bindings-of-python-capi)
- [`pyo3-ffi`] and [`src/ffi`]
2. [Bindings to Python objects.](#2-bindings-to-python-objects)
- [`src/instance.rs`] and [`src/types`]
3. [`PyClass` and related functionalities.](#3-pyclass-and-related-functionalities)
- [`src/pycell.rs`], [`src/pyclass.rs`], and more
4. [Procedural macros to simplify usage for users.](#4-procedural-macros-to-simplify-usage-for-users)
- [`src/impl_`], [`pyo3-macros`] and [`pyo3-macros-backend`]
5. [`build.rs` and `pyo3-build-config`](#5-buildrs-and-pyo3-build-config)
- [`build.rs`](https://github.com/PyO3/pyo3/tree/main/build.rs)
- [`pyo3-build-config`]
## 1. Low-level bindings of Python/C API
[`pyo3-ffi`] contains wrappers of the [Python/C API]. This is currently done by hand rather than
automated tooling because:
- it gives us best control about how to adapt C conventions to Rust, and
- there are many Python interpreter versions we support in a single set of files.
We aim to provide straight-forward Rust wrappers resembling the file structure of [`cpython/Include`](https://github.com/python/cpython/tree/3.13/Include).
We are continuously updating the module to match the latest CPython version which PyO3 supports (i.e. as of time of writing Python 3.13). The tracking issue is [#1289](https://github.com/PyO3/pyo3/issues/1289), and contribution is welcome.
In the [`pyo3-ffi`] crate, there is lots of conditional compilation such as `#[cfg(Py_LIMITED_API)]`,
`#[cfg(Py_3_7)]`, and `#[cfg(PyPy)]`.
`Py_LIMITED_API` corresponds to `#define Py_LIMITED_API` macro in Python/C API.
With `Py_LIMITED_API`, we can build a Python-version-agnostic binary called an
[abi3 wheel](https://pyo3.rs/latest/building-and-distribution.html#py_limited_apiabi3).
`Py_3_7` means that the API is available from Python >= 3.7.
There are also `Py_3_8`, `Py_3_9`, and so on.
`PyPy` means that the API definition is for PyPy.
Those flags are set in [`build.rs`](#6-buildrs-and-pyo3-build-config).
## 2. Bindings to Python objects
[`src/types`] contains bindings to [built-in types](https://docs.python.org/3/library/stdtypes.html)
of Python, such as `dict` and `list`.
For historical reasons, Python's `object` is called `PyAny` in PyO3 and located in [`src/types/any.rs`].
Currently, `PyAny` is a straightforward wrapper of `ffi::PyObject`, defined as:
```rust
#[repr(transparent)]
pub struct PyAny(UnsafeCell<ffi::PyObject>);
```
Concrete Python objects are implemented by wrapping `PyAny`, e.g.,:
```rust
#[repr(transparent)]
pub struct PyDict(PyAny);
```
These types are not intended to be accessed directly, and instead are used through the `Py<T>` and `Bound<T>` smart pointers.
We have some macros in [`src/types/mod.rs`] which make it easier to implement APIs for concrete Python types.
## 3. `PyClass` and related functionalities
[`src/pycell.rs`], [`src/pyclass.rs`], and [`src/type_object.rs`] contain types and
traits to make `#[pyclass]` work.
Also, [`src/pyclass_init.rs`] and [`src/impl_/pyclass.rs`] have related functionalities.
To realize object-oriented programming in C, all Python objects have `ob_base: PyObject` as their
first field in their structure definition. Thanks to this guarantee, casting `*mut A` to `*mut PyObject`
is valid if `A` is a Python object.
To ensure this guarantee, we have a wrapper struct `PyCell<T>` in [`src/pycell.rs`] which is roughly:
```rust
#[repr(C)]
pub struct PyCell<T: PyClass> {
ob_base: crate::ffi::PyObject,
inner: T,
}
```
Thus, when copying a Rust struct to a Python object, we first allocate `PyCell` on the Python heap and then
move `T` into it.
Also, `PyCell` provides [RefCell](https://doc.rust-lang.org/std/cell/struct.RefCell.html)-like methods
to ensure Rust's borrow rules.
See [the documentation](https://docs.rs/pyo3/0.23.1/pyo3/pycell/#structs) for more.
`PyCell<T>` requires that `T` implements `PyClass`.
This trait is somewhat complex and derives many traits, but the most important one is `PyTypeInfo`
in [`src/type_object.rs`].
`PyTypeInfo` is also implemented for built-in types.
In Python, all objects have their types, and types are also objects of `type`.
For example, you can see `type({})` shows `dict` and `type(type({}))` shows `type` in Python REPL.
`T: PyTypeInfo` implies that `T` has a corresponding type object.
### Protocol methods
Python has some built-in special methods called dunder methods, such as `__iter__`.
They are called "slots" in the [abstract objects layer](https://docs.python.org/3/c-api/abstract.html) in
Python/C API.
We provide a way to implement those protocols similarly, by recognizing special
names in `#[pymethods]`, with a few new ones for slots that can not be
implemented in Python, such as GC support.
## 4. Procedural macros to simplify usage for users.
[`pyo3-macros`] provides five proc-macro APIs: `pymodule`, `pyfunction`, `pyclass`,
`pymethods`, and `#[derive(FromPyObject)]`.
[`pyo3-macros-backend`] has the actual implementations of these APIs.
[`src/impl_`] contains `#[doc(hidden)]` functionality used in code generated by these proc-macros,
such as parsing function arguments.
## 5. `build.rs` and `pyo3-build-config`
PyO3 supports a wide range of OSes, interpreters and use cases. The correct environment must be
detected at build time in order to set up relevant conditional compilation correctly. This logic
is captured in the [`pyo3-build-config`] crate, which is a `build-dependency` of `pyo3` and
`pyo3-macros`, and can also be used by downstream users in the same way.
In [`pyo3-build-config`]'s `build.rs` the build environment is detected and inlined into the crate
as a "config file". This works in all cases except for cross-compiling, where it is necessary to
capture this from the `pyo3` `build.rs` to get some extra environment variables that Cargo doesn't
set for build dependencies.
The `pyo3` `build.rs` also runs some safety checks such as ensuring the Python version detected is
actually supported.
Some of the functionality of `pyo3-build-config`:
- Find the interpreter for build and detect the Python version.
- We have to set some version flags like `#[cfg(Py_3_7)]`.
- If the interpreter is PyPy, we set `#[cfg(PyPy)`.
- If the `PYO3_CONFIG_FILE` environment variable is set then that file's contents will be used
instead of any detected configuration.
- If the `PYO3_NO_PYTHON` environment variable is set then the interpreter detection is bypassed
entirely and only abi3 extensions can be built.
- Check if we are building a Python extension.
- If we are building an extension (e.g., Python library installable by `pip`),
we don't link `libpython`.
Currently we use the `extension-module` feature for this purpose. This may change in the future.
See [#1123](https://github.com/PyO3/pyo3/pull/1123).
- Cross-compiling configuration
- If `TARGET` architecture and `HOST` architecture differ, we can find cross compile information
from environment variables (`PYO3_CROSS_LIB_DIR`, `PYO3_CROSS_PYTHON_VERSION` and
`PYO3_CROSS_PYTHON_IMPLEMENTATION`) or system files.
When cross compiling extension modules it is often possible to make it work without any
additional user input.
- When an experimental feature `generate-import-lib` is enabled, the `pyo3-ffi` build script can
generate `python3.dll` import libraries for Windows targets automatically via an external
[`python3-dll-a`] crate. This enables the users to cross compile Python extensions for Windows without
having to install any Windows Python libraries.
<!-- External Links -->
[python/c api]: https://docs.python.org/3/c-api/
[`python3-dll-a`]: https://docs.rs/python3-dll-a/latest/python3_dll_a/
<!-- Crates -->
[`pyo3-macros`]: https://github.com/PyO3/pyo3/tree/main/pyo3-macros
[`pyo3-macros-backend`]: https://github.com/PyO3/pyo3/tree/main/pyo3-macros-backend
[`pyo3-build-config`]: https://github.com/PyO3/pyo3/tree/main/pyo3-build-config
[`pyo3-ffi`]: https://github.com/PyO3/pyo3/tree/main/pyo3-ffi
<!-- Directories -->
[`src/class`]: https://github.com/PyO3/pyo3/tree/main/src/class
[`src/ffi`]: https://github.com/PyO3/pyo3/tree/main/src/ffi
[`src/types`]: https://github.com/PyO3/pyo3/tree/main/src/types
<!-- Files -->
[`src/impl_`]: https://github.com/PyO3/pyo3/blob/main/src/impl_
[`src/instance.rs`]: https://github.com/PyO3/pyo3/tree/main/src/instance.rs
[`src/pycell.rs`]: https://github.com/PyO3/pyo3/tree/main/src/pycell.rs
[`src/pyclass.rs`]: https://github.com/PyO3/pyo3/tree/main/src/pyclass.rs
[`src/pyclass_init.rs`]: https://github.com/PyO3/pyo3/tree/main/src/pyclass_init.rs
[`src/pyclass_slot.rs`]: https://github.com/PyO3/pyo3/tree/main/src/pyclass_slot.rs
[`src/type_object.rs`]: https://github.com/PyO3/pyo3/tree/main/src/type_object.rs
[`src/class/methods.rs`]: https://github.com/PyO3/pyo3/tree/main/src/class/methods.rs
[`src/class/impl_.rs`]: https://github.com/PyO3/pyo3/tree/main/src/class/impl_.rs
[`src/types/any.rs`]: https://github.com/PyO3/pyo3/tree/main/src/types/any.rs
[`src/types/mod.rs`]: https://github.com/PyO3/pyo3/tree/main/src/types/mod.rs
|
0 | lc_public_repos/langsmith-sdk/vendor/pyo3 | lc_public_repos/langsmith-sdk/vendor/pyo3/pyo3-runtime/LICENSE-APACHE | Copyright (c) 2017-present PyO3 Project and Contributors. https://github.com/PyO3
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.