index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/pythonrun.rs | use crate::object::*;
#[cfg(not(any(PyPy, GraalPy, Py_LIMITED_API, Py_3_10)))]
use crate::pyarena::PyArena;
use crate::PyCompilerFlags;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
use crate::{_mod, _node};
use libc::FILE;
use std::os::raw::{c_char, c_int};
extern "C" {
pub fn PyRun_SimpleStringFlags(arg1: *const c_char, arg2: *mut PyCompilerFlags) -> c_int;
pub fn _PyRun_SimpleFileObject(
fp: *mut FILE,
filename: *mut PyObject,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_AnyFileExFlags(
fp: *mut FILE,
filename: *const c_char,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn _PyRun_AnyFileObject(
fp: *mut FILE,
filename: *mut PyObject,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_SimpleFileExFlags(
fp: *mut FILE,
filename: *const c_char,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveOneFlags(
fp: *mut FILE,
filename: *const c_char,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveOneObject(
fp: *mut FILE,
filename: *mut PyObject,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn PyRun_InteractiveLoopFlags(
fp: *mut FILE,
filename: *const c_char,
flags: *mut PyCompilerFlags,
) -> c_int;
pub fn _PyRun_InteractiveLoopObject(
fp: *mut FILE,
filename: *mut PyObject,
flags: *mut PyCompilerFlags,
) -> c_int;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
pub fn PyParser_ASTFromString(
s: *const c_char,
filename: *const c_char,
start: c_int,
flags: *mut PyCompilerFlags,
arena: *mut PyArena,
) -> *mut _mod;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
pub fn PyParser_ASTFromStringObject(
s: *const c_char,
filename: *mut PyObject,
start: c_int,
flags: *mut PyCompilerFlags,
arena: *mut PyArena,
) -> *mut _mod;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
pub fn PyParser_ASTFromFile(
fp: *mut FILE,
filename: *const c_char,
enc: *const c_char,
start: c_int,
ps1: *const c_char,
ps2: *const c_char,
flags: *mut PyCompilerFlags,
errcode: *mut c_int,
arena: *mut PyArena,
) -> *mut _mod;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
pub fn PyParser_ASTFromFileObject(
fp: *mut FILE,
filename: *mut PyObject,
enc: *const c_char,
start: c_int,
ps1: *const c_char,
ps2: *const c_char,
flags: *mut PyCompilerFlags,
errcode: *mut c_int,
arena: *mut PyArena,
) -> *mut _mod;
}
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyRun_StringFlags")]
pub fn PyRun_StringFlags(
arg1: *const c_char,
arg2: c_int,
arg3: *mut PyObject,
arg4: *mut PyObject,
arg5: *mut PyCompilerFlags,
) -> *mut PyObject;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_FileExFlags(
fp: *mut FILE,
filename: *const c_char,
start: c_int,
globals: *mut PyObject,
locals: *mut PyObject,
closeit: c_int,
flags: *mut PyCompilerFlags,
) -> *mut PyObject;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn Py_CompileStringExFlags(
str: *const c_char,
filename: *const c_char,
start: c_int,
flags: *mut PyCompilerFlags,
optimize: c_int,
) -> *mut PyObject;
#[cfg(not(Py_LIMITED_API))]
pub fn Py_CompileStringObject(
str: *const c_char,
filename: *mut PyObject,
start: c_int,
flags: *mut PyCompilerFlags,
optimize: c_int,
) -> *mut PyObject;
}
#[inline]
#[cfg(not(any(PyPy, GraalPy)))]
pub unsafe fn Py_CompileString(string: *const c_char, p: *const c_char, s: c_int) -> *mut PyObject {
Py_CompileStringExFlags(string, p, s, std::ptr::null_mut(), -1)
}
#[inline]
#[cfg(not(any(PyPy, GraalPy)))]
pub unsafe fn Py_CompileStringFlags(
string: *const c_char,
p: *const c_char,
s: c_int,
f: *mut PyCompilerFlags,
) -> *mut PyObject {
Py_CompileStringExFlags(string, p, s, f, -1)
}
// skipped _Py_SourceAsString
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyRun_String")]
pub fn PyRun_String(
string: *const c_char,
s: c_int,
g: *mut PyObject,
l: *mut PyObject,
) -> *mut PyObject;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_AnyFile(fp: *mut FILE, name: *const c_char) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_AnyFileEx(fp: *mut FILE, name: *const c_char, closeit: c_int) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_AnyFileFlags(
arg1: *mut FILE,
arg2: *const c_char,
arg3: *mut PyCompilerFlags,
) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyRun_SimpleString")]
pub fn PyRun_SimpleString(s: *const c_char) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_SimpleFile(f: *mut FILE, p: *const c_char) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_SimpleFileEx(f: *mut FILE, p: *const c_char, c: c_int) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_InteractiveOne(f: *mut FILE, p: *const c_char) -> c_int;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_InteractiveLoop(f: *mut FILE, p: *const c_char) -> c_int;
#[cfg_attr(PyPy, link_name = "PyPyRun_File")]
pub fn PyRun_File(
fp: *mut FILE,
p: *const c_char,
s: c_int,
g: *mut PyObject,
l: *mut PyObject,
) -> *mut PyObject;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_FileEx(
fp: *mut FILE,
p: *const c_char,
s: c_int,
g: *mut PyObject,
l: *mut PyObject,
c: c_int,
) -> *mut PyObject;
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyRun_FileFlags(
fp: *mut FILE,
p: *const c_char,
s: c_int,
g: *mut PyObject,
l: *mut PyObject,
flags: *mut PyCompilerFlags,
) -> *mut PyObject;
}
// skipped macro PyRun_String
// skipped macro PyRun_AnyFile
// skipped macro PyRun_AnyFileEx
// skipped macro PyRun_AnyFileFlags
extern "C" {
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
#[cfg_attr(Py_3_9, deprecated(note = "Python 3.9"))]
pub fn PyParser_SimpleParseStringFlags(
arg1: *const c_char,
arg2: c_int,
arg3: c_int,
) -> *mut _node;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
#[cfg_attr(Py_3_9, deprecated(note = "Python 3.9"))]
pub fn PyParser_SimpleParseStringFlagsFilename(
arg1: *const c_char,
arg2: *const c_char,
arg3: c_int,
arg4: c_int,
) -> *mut _node;
#[cfg(not(any(PyPy, GraalPy, Py_3_10)))]
#[cfg_attr(Py_3_9, deprecated(note = "Python 3.9"))]
pub fn PyParser_SimpleParseFileFlags(
arg1: *mut FILE,
arg2: *const c_char,
arg3: c_int,
arg4: c_int,
) -> *mut _node;
#[cfg(PyPy)]
#[cfg_attr(PyPy, link_name = "PyPy_CompileStringFlags")]
pub fn Py_CompileStringFlags(
string: *const c_char,
p: *const c_char,
s: c_int,
f: *mut PyCompilerFlags,
) -> *mut PyObject;
}
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/tupleobject.rs | use crate::object::*;
#[cfg(not(PyPy))]
use crate::pyport::Py_ssize_t;
#[repr(C)]
pub struct PyTupleObject {
pub ob_base: PyVarObject,
#[cfg(not(GraalPy))]
pub ob_item: [*mut PyObject; 1],
}
// skipped _PyTuple_Resize
// skipped _PyTuple_MaybeUntrack
// skipped _PyTuple_CAST
/// Macro, trading safety for speed
#[inline]
#[cfg(not(PyPy))]
pub unsafe fn PyTuple_GET_SIZE(op: *mut PyObject) -> Py_ssize_t {
Py_SIZE(op)
}
#[inline]
#[cfg(not(any(PyPy, GraalPy)))]
pub unsafe fn PyTuple_GET_ITEM(op: *mut PyObject, i: Py_ssize_t) -> *mut PyObject {
*(*(op as *mut PyTupleObject)).ob_item.as_ptr().offset(i)
}
/// Macro, *only* to be used to fill in brand new tuples
#[inline]
#[cfg(not(any(PyPy, GraalPy)))]
pub unsafe fn PyTuple_SET_ITEM(op: *mut PyObject, i: Py_ssize_t, v: *mut PyObject) {
*(*(op as *mut PyTupleObject)).ob_item.as_mut_ptr().offset(i) = v;
}
// skipped _PyTuple_DebugMallocStats
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/pyframe.rs | #[cfg(Py_3_11)]
opaque_struct!(_PyInterpreterFrame);
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/weakrefobject.rs | #[cfg(not(any(PyPy, GraalPy)))]
pub struct _PyWeakReference {
pub ob_base: crate::PyObject,
pub wr_object: *mut crate::PyObject,
pub wr_callback: *mut crate::PyObject,
pub hash: crate::Py_hash_t,
pub wr_prev: *mut crate::PyWeakReference,
pub wr_next: *mut crate::PyWeakReference,
#[cfg(Py_3_11)]
pub vectorcall: Option<crate::vectorcallfunc>,
#[cfg(all(Py_3_13, Py_GIL_DISABLED))]
pub weakrefs_lock: *mut crate::PyMutex,
}
// skipped _PyWeakref_GetWeakrefCount
// skipped _PyWeakref_ClearRef
// skipped PyWeakRef_GET_OBJECT
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/pylifecycle.rs | use crate::{PyConfig, PyPreConfig, PyStatus, Py_ssize_t};
use libc::wchar_t;
use std::os::raw::{c_char, c_int};
// "private" functions in cpython/pylifecycle.h accepted in PEP 587
extern "C" {
// skipped _Py_SetStandardStreamEncoding;
pub fn Py_PreInitialize(src_config: *const PyPreConfig) -> PyStatus;
pub fn Py_PreInitializeFromBytesArgs(
src_config: *const PyPreConfig,
argc: Py_ssize_t,
argv: *mut *mut c_char,
) -> PyStatus;
pub fn Py_PreInitializeFromArgs(
src_config: *const PyPreConfig,
argc: Py_ssize_t,
argv: *mut *mut wchar_t,
) -> PyStatus;
pub fn _Py_IsCoreInitialized() -> c_int;
pub fn Py_InitializeFromConfig(config: *const PyConfig) -> PyStatus;
pub fn _Py_InitializeMain() -> PyStatus;
pub fn Py_RunMain() -> c_int;
pub fn Py_ExitStatusException(status: PyStatus) -> !;
// skipped _Py_RestoreSignals
// skipped Py_FdIsInteractive
// skipped _Py_FdIsInteractive
// skipped _Py_SetProgramFullPath
// skipped _Py_gitidentifier
// skipped _Py_getversion
// skipped _Py_IsFinalizing
// skipped _PyOS_URandom
// skipped _PyOS_URandomNonblock
// skipped _Py_CoerceLegacyLocale
// skipped _Py_LegacyLocaleDetected
// skipped _Py_SetLocaleFromEnv
}
#[cfg(Py_3_12)]
pub const PyInterpreterConfig_DEFAULT_GIL: c_int = 0;
#[cfg(Py_3_12)]
pub const PyInterpreterConfig_SHARED_GIL: c_int = 1;
#[cfg(Py_3_12)]
pub const PyInterpreterConfig_OWN_GIL: c_int = 2;
#[cfg(Py_3_12)]
#[repr(C)]
pub struct PyInterpreterConfig {
pub use_main_obmalloc: c_int,
pub allow_fork: c_int,
pub allow_exec: c_int,
pub allow_threads: c_int,
pub allow_daemon_threads: c_int,
pub check_multi_interp_extensions: c_int,
pub gil: c_int,
}
#[cfg(Py_3_12)]
pub const _PyInterpreterConfig_INIT: PyInterpreterConfig = PyInterpreterConfig {
use_main_obmalloc: 0,
allow_fork: 0,
allow_exec: 0,
allow_threads: 1,
allow_daemon_threads: 0,
check_multi_interp_extensions: 1,
gil: PyInterpreterConfig_OWN_GIL,
};
#[cfg(Py_3_12)]
pub const _PyInterpreterConfig_LEGACY_INIT: PyInterpreterConfig = PyInterpreterConfig {
use_main_obmalloc: 1,
allow_fork: 1,
allow_exec: 1,
allow_threads: 1,
allow_daemon_threads: 1,
check_multi_interp_extensions: 0,
gil: PyInterpreterConfig_SHARED_GIL,
};
extern "C" {
#[cfg(Py_3_12)]
pub fn Py_NewInterpreterFromConfig(
tstate_p: *mut *mut crate::PyThreadState,
config: *const PyInterpreterConfig,
) -> PyStatus;
}
// skipped atexit_datacallbackfunc
// skipped _Py_AtExit
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/import.rs | use crate::{PyInterpreterState, PyObject};
#[cfg(not(PyPy))]
use std::os::raw::c_uchar;
use std::os::raw::{c_char, c_int};
// skipped PyInit__imp
extern "C" {
pub fn _PyImport_IsInitialized(state: *mut PyInterpreterState) -> c_int;
// skipped _PyImport_GetModuleId
pub fn _PyImport_SetModule(name: *mut PyObject, module: *mut PyObject) -> c_int;
pub fn _PyImport_SetModuleString(name: *const c_char, module: *mut PyObject) -> c_int;
pub fn _PyImport_AcquireLock();
pub fn _PyImport_ReleaseLock() -> c_int;
#[cfg(not(Py_3_9))]
pub fn _PyImport_FindBuiltin(name: *const c_char, modules: *mut PyObject) -> *mut PyObject;
#[cfg(not(Py_3_11))]
pub fn _PyImport_FindExtensionObject(a: *mut PyObject, b: *mut PyObject) -> *mut PyObject;
pub fn _PyImport_FixupBuiltin(
module: *mut PyObject,
name: *const c_char,
modules: *mut PyObject,
) -> c_int;
pub fn _PyImport_FixupExtensionObject(
a: *mut PyObject,
b: *mut PyObject,
c: *mut PyObject,
d: *mut PyObject,
) -> c_int;
}
#[cfg(not(PyPy))]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _inittab {
pub name: *const c_char,
pub initfunc: Option<unsafe extern "C" fn() -> *mut PyObject>,
}
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
#[cfg(not(PyPy))]
pub static mut PyImport_Inittab: *mut _inittab;
}
extern "C" {
#[cfg(not(PyPy))]
pub fn PyImport_ExtendInittab(newtab: *mut _inittab) -> c_int;
}
#[cfg(not(PyPy))]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct _frozen {
pub name: *const c_char,
pub code: *const c_uchar,
pub size: c_int,
#[cfg(Py_3_11)]
pub is_package: c_int,
#[cfg(all(Py_3_11, not(Py_3_13)))]
pub get_code: Option<unsafe extern "C" fn() -> *mut PyObject>,
}
#[cfg_attr(windows, link(name = "pythonXY"))]
extern "C" {
#[cfg(not(PyPy))]
pub static mut PyImport_FrozenModules: *const _frozen;
#[cfg(all(not(PyPy), Py_3_11))]
pub static mut _PyImport_FrozenBootstrap: *const _frozen;
#[cfg(all(not(PyPy), Py_3_11))]
pub static mut _PyImport_FrozenStdlib: *const _frozen;
#[cfg(all(not(PyPy), Py_3_11))]
pub static mut _PyImport_FrozenTest: *const _frozen;
}
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/pymem.rs | use libc::size_t;
use std::os::raw::c_void;
extern "C" {
#[cfg_attr(PyPy, link_name = "PyPyMem_RawMalloc")]
pub fn PyMem_RawMalloc(size: size_t) -> *mut c_void;
#[cfg_attr(PyPy, link_name = "PyPyMem_RawCalloc")]
pub fn PyMem_RawCalloc(nelem: size_t, elsize: size_t) -> *mut c_void;
#[cfg_attr(PyPy, link_name = "PyPyMem_RawRealloc")]
pub fn PyMem_RawRealloc(ptr: *mut c_void, new_size: size_t) -> *mut c_void;
#[cfg_attr(PyPy, link_name = "PyPyMem_RawFree")]
pub fn PyMem_RawFree(ptr: *mut c_void);
// skipped _PyMem_GetCurrentAllocatorName
// skipped _PyMem_RawStrdup
// skipped _PyMem_Strdup
// skipped _PyMem_RawWcsdup
}
#[repr(C)]
#[derive(Copy, Clone)]
pub enum PyMemAllocatorDomain {
PYMEM_DOMAIN_RAW,
PYMEM_DOMAIN_MEM,
PYMEM_DOMAIN_OBJ,
}
// skipped PyMemAllocatorName
#[cfg(not(any(PyPy, GraalPy)))]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct PyMemAllocatorEx {
pub ctx: *mut c_void,
pub malloc: Option<extern "C" fn(ctx: *mut c_void, size: size_t) -> *mut c_void>,
pub calloc:
Option<extern "C" fn(ctx: *mut c_void, nelem: size_t, elsize: size_t) -> *mut c_void>,
pub realloc:
Option<extern "C" fn(ctx: *mut c_void, ptr: *mut c_void, new_size: size_t) -> *mut c_void>,
pub free: Option<extern "C" fn(ctx: *mut c_void, ptr: *mut c_void)>,
}
extern "C" {
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyMem_GetAllocator(domain: PyMemAllocatorDomain, allocator: *mut PyMemAllocatorEx);
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyMem_SetAllocator(domain: PyMemAllocatorDomain, allocator: *mut PyMemAllocatorEx);
#[cfg(not(any(PyPy, GraalPy)))]
pub fn PyMem_SetupDebugHooks();
}
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/mod.rs | pub(crate) mod abstract_;
// skipped bytearrayobject.h
pub(crate) mod bytesobject;
#[cfg(not(PyPy))]
pub(crate) mod ceval;
pub(crate) mod code;
pub(crate) mod compile;
pub(crate) mod complexobject;
#[cfg(Py_3_13)]
pub(crate) mod critical_section;
pub(crate) mod descrobject;
#[cfg(not(PyPy))]
pub(crate) mod dictobject;
// skipped fileobject.h
// skipped fileutils.h
pub(crate) mod frameobject;
pub(crate) mod funcobject;
pub(crate) mod genobject;
pub(crate) mod import;
#[cfg(all(Py_3_8, not(PyPy)))]
pub(crate) mod initconfig;
// skipped interpreteridobject.h
pub(crate) mod listobject;
#[cfg(Py_3_13)]
pub(crate) mod lock;
pub(crate) mod longobject;
#[cfg(all(Py_3_9, not(PyPy)))]
pub(crate) mod methodobject;
pub(crate) mod object;
pub(crate) mod objimpl;
pub(crate) mod pydebug;
pub(crate) mod pyerrors;
#[cfg(all(Py_3_8, not(PyPy)))]
pub(crate) mod pylifecycle;
pub(crate) mod pymem;
pub(crate) mod pystate;
pub(crate) mod pythonrun;
// skipped sysmodule.h
pub(crate) mod floatobject;
pub(crate) mod pyframe;
pub(crate) mod tupleobject;
pub(crate) mod unicodeobject;
pub(crate) mod weakrefobject;
pub use self::abstract_::*;
pub use self::bytesobject::*;
#[cfg(not(PyPy))]
pub use self::ceval::*;
pub use self::code::*;
pub use self::compile::*;
pub use self::complexobject::*;
#[cfg(Py_3_13)]
pub use self::critical_section::*;
pub use self::descrobject::*;
#[cfg(not(PyPy))]
pub use self::dictobject::*;
pub use self::floatobject::*;
pub use self::frameobject::*;
pub use self::funcobject::*;
pub use self::genobject::*;
pub use self::import::*;
#[cfg(all(Py_3_8, not(PyPy)))]
pub use self::initconfig::*;
pub use self::listobject::*;
#[cfg(Py_3_13)]
pub use self::lock::*;
pub use self::longobject::*;
#[cfg(all(Py_3_9, not(PyPy)))]
pub use self::methodobject::*;
pub use self::object::*;
pub use self::objimpl::*;
pub use self::pydebug::*;
pub use self::pyerrors::*;
#[cfg(Py_3_11)]
pub use self::pyframe::*;
#[cfg(all(Py_3_8, not(PyPy)))]
pub use self::pylifecycle::*;
pub use self::pymem::*;
pub use self::pystate::*;
pub use self::pythonrun::*;
pub use self::tupleobject::*;
pub use self::unicodeobject::*;
#[cfg(not(any(PyPy, GraalPy)))]
pub use self::weakrefobject::*;
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/cpython/critical_section.rs | #[cfg(Py_GIL_DISABLED)]
use crate::PyMutex;
use crate::PyObject;
#[repr(C)]
#[cfg(Py_GIL_DISABLED)]
pub struct PyCriticalSection {
_cs_prev: usize,
_cs_mutex: *mut PyMutex,
}
#[repr(C)]
#[cfg(Py_GIL_DISABLED)]
pub struct PyCriticalSection2 {
_cs_base: PyCriticalSection,
_cs_mutex2: *mut PyMutex,
}
#[cfg(not(Py_GIL_DISABLED))]
opaque_struct!(PyCriticalSection);
#[cfg(not(Py_GIL_DISABLED))]
opaque_struct!(PyCriticalSection2);
extern "C" {
pub fn PyCriticalSection_Begin(c: *mut PyCriticalSection, op: *mut PyObject);
pub fn PyCriticalSection_End(c: *mut PyCriticalSection);
pub fn PyCriticalSection2_Begin(c: *mut PyCriticalSection2, a: *mut PyObject, b: *mut PyObject);
pub fn PyCriticalSection2_End(c: *mut PyCriticalSection2);
}
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src | lc_public_repos/langsmith-sdk/vendor/orjson/include/pyo3/pyo3-ffi/src/impl_/mod.rs | #[cfg(Py_GIL_DISABLED)]
mod atomic_c_ulong {
pub struct GetAtomicCULong<const WIDTH: usize>();
pub trait AtomicCULongType {
type Type;
}
impl AtomicCULongType for GetAtomicCULong<32> {
type Type = std::sync::atomic::AtomicU32;
}
impl AtomicCULongType for GetAtomicCULong<64> {
type Type = std::sync::atomic::AtomicU64;
}
pub type TYPE =
<GetAtomicCULong<{ std::mem::size_of::<std::os::raw::c_ulong>() * 8 }> as AtomicCULongType>::Type;
}
/// Typedef for an atomic integer to match the platform-dependent c_ulong type.
#[cfg(Py_GIL_DISABLED)]
#[doc(hidden)]
pub type AtomicCULong = atomic_c_ulong::TYPE;
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/run_mem | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import lzma
import gc
import psutil
filename = sys.argv[1]
with lzma.open(filename, "r") as fileh:
fixture = fileh.read()
proc = psutil.Process()
lib_name = sys.argv[2]
if lib_name == "json":
from json import dumps, loads
elif lib_name == "orjson":
from orjson import dumps, loads
elif lib_name == "rapidjson":
from rapidjson import dumps, loads
elif lib_name == "simplejson":
from simplejson import dumps, loads
elif lib_name == "ujson":
from ujson import dumps, loads
else:
raise NotImplementedError
gc.collect()
mem_before = proc.memory_info().rss
for _ in range(100):
val = loads(fixture)
mem_after = proc.memory_info().rss
mem_diff = mem_after - mem_before
from json import loads as json_loads
correct = 1 if (json_loads(fixture) == json_loads(dumps(loads(fixture)))) else 0
print(f"{mem_before},{mem_diff},{correct}")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/benchmark_dumps.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from json import loads as json_loads
import pytest
from .data import fixtures, libraries
from .util import read_fixture_obj
@pytest.mark.parametrize("library", libraries)
@pytest.mark.parametrize("fixture", fixtures)
def test_dumps(benchmark, fixture, library):
dumper, loader = libraries[library]
benchmark.group = f"{fixture} serialization"
benchmark.extra_info["lib"] = library
data = read_fixture_obj(f"{fixture}.xz")
benchmark.extra_info["correct"] = json_loads(dumper(data)) == data # type: ignore
benchmark(dumper, data)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/benchmark_empty.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from json import loads as json_loads
import pytest
from .data import libraries
@pytest.mark.parametrize("data", ["[]", "{}", '""'])
@pytest.mark.parametrize("library", libraries)
def test_empty(benchmark, data, library):
dumper, loader = libraries[library]
correct = json_loads(dumper(loader(data))) == json_loads(data) # type: ignore
benchmark.extra_info["correct"] = correct
benchmark(loader, data)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/util.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import lzma
import os
from functools import lru_cache
from pathlib import Path
from typing import Any
import orjson
dirname = os.path.join(os.path.dirname(__file__), "../data")
if hasattr(os, "sched_setaffinity"):
os.sched_setaffinity(os.getpid(), {0, 1})
@lru_cache(maxsize=None)
def read_fixture(filename: str) -> bytes:
path = Path(dirname, filename)
if path.suffix == ".xz":
contents = lzma.decompress(path.read_bytes())
else:
contents = path.read_bytes()
return contents
@lru_cache(maxsize=None)
def read_fixture_obj(filename: str) -> Any:
return orjson.loads(read_fixture(filename))
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/data.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from json import dumps as _json_dumps
from json import loads as json_loads
from rapidjson import dumps as _rapidjson_dumps
from rapidjson import loads as rapidjson_loads
from simplejson import dumps as _simplejson_dumps
from simplejson import loads as simplejson_loads
from ujson import dumps as _ujson_dumps
from ujson import loads as ujson_loads
from orjson import dumps as orjson_dumps
from orjson import loads as orjson_loads
def ujson_dumps(obj):
return _ujson_dumps(obj).encode("utf-8")
def rapidjson_dumps(obj):
return _rapidjson_dumps(obj).encode("utf-8")
def json_dumps(obj):
return _json_dumps(obj).encode("utf-8")
def simplejson_dumps(obj):
return _simplejson_dumps(obj).encode("utf-8")
libraries = {
"orjson": (orjson_dumps, orjson_loads),
"ujson": (ujson_dumps, ujson_loads),
"json": (json_dumps, json_loads),
"rapidjson": (rapidjson_dumps, rapidjson_loads),
"simplejson": (simplejson_dumps, simplejson_loads),
}
fixtures = [
"canada.json",
"citm_catalog.json",
"github.json",
"twitter.json",
]
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/benchmark_loads.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from json import loads as json_loads
import pytest
from .data import fixtures, libraries
from .util import read_fixture
@pytest.mark.parametrize("fixture", fixtures)
@pytest.mark.parametrize("library", libraries)
def test_loads(benchmark, fixture, library):
dumper, loader = libraries[library]
benchmark.group = f"{fixture} deserialization"
benchmark.extra_info["lib"] = library
data = read_fixture(f"{fixture}.xz")
correct = json_loads(dumper(loader(data))) == json_loads(data) # type: ignore
benchmark.extra_info["correct"] = correct
benchmark(loader, data)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/requirements.txt | memory-profiler
pandas; python_version<"3.13"
pytest-benchmark
pytest-random-order
python-rapidjson
seaborn; python_version<"3.13"
simplejson
tabulate
ujson
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/run_func | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import lzma
import os
import gc
gc.disable()
os.sched_setaffinity(os.getpid(), {0, 1})
from orjson import dumps, loads
filename = sys.argv[1]
n = int(sys.argv[3]) if len(sys.argv) >= 4 else 1000
with lzma.open(filename, "r") as fileh:
file_bytes = fileh.read()
if sys.argv[2] == "dumps":
file_obj = loads(file_bytes)
for _ in range(n):
dumps(file_obj)
elif sys.argv[2] == "loads":
for _ in range(n):
loads(file_bytes)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/bench/run_default | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import os
os.sched_setaffinity(os.getpid(), {0, 1})
from orjson import dumps, OPT_SERIALIZE_NUMPY
class Custom:
pass
def default(_):
return None
n = int(sys.argv[1]) if len(sys.argv) >= 2 else 10000
obj = [[Custom()] * 1000] * 10
for _ in range(n):
dumps(obj, default, OPT_SERIALIZE_NUMPY)
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/_scripts/_fetch_schema.py | """Fetch and prune the Langsmith spec."""
import argparse
from pathlib import Path
import requests
import yaml
from openapi_spec_validator import validate_spec
def get_dependencies(schema, obj_name, new_components):
if obj_name in new_components["schemas"]:
return
obj_schema = schema["components"]["schemas"][obj_name]
new_components["schemas"][obj_name] = obj_schema
def process_schema(sub_schema):
if "$ref" in sub_schema:
get_dependencies(schema, sub_schema["$ref"].split("/")[-1], new_components)
else:
if "items" in sub_schema and "$ref" in sub_schema["items"]:
get_dependencies(
schema, sub_schema["items"]["$ref"].split("/")[-1], new_components
)
for keyword in ["anyOf", "oneOf", "allOf"]:
if keyword in sub_schema:
for item in sub_schema[keyword]:
process_schema(item)
if "properties" in obj_schema:
for prop_schema in obj_schema["properties"].values():
process_schema(prop_schema)
if "items" in obj_schema:
process_schema(obj_schema["items"])
for keyword in ["allOf", "anyOf", "oneOf"]:
if keyword in obj_schema:
for item in obj_schema[keyword]:
process_schema(item)
def _extract_langsmith_routes_and_properties(schema, operation_ids):
new_paths = {}
new_components = {"schemas": {}}
for path, methods in schema["paths"].items():
for method, operation in methods.items():
if operation.get("operationId") in operation_ids:
new_paths[path] = {method: operation}
request_body = operation.get("requestBody", {})
request_body_content = request_body.get("content", {}).get(
"application/json", {}
)
request_body_ref = request_body_content.get("schema", {}).get("$ref")
if request_body_ref:
schema_name = request_body_ref.split("/")[-1]
get_dependencies(schema, schema_name, new_components)
responses = operation.get("responses", {})
for response in responses.values():
response_ref = (
response.get("content", {})
.get("application/json", {})
.get("schema", {})
.get("$ref")
)
if response_ref:
schema_name = response_ref.split("/")[-1]
get_dependencies(schema, schema_name, new_components)
get_dependencies(schema, "ValidationError", new_components)
new_schema = {
"openapi": schema["openapi"],
"info": schema["info"],
"paths": new_paths,
"components": new_components,
}
return new_schema
def get_langsmith_runs_schema(
url: str = "https://web.smith.langchain.com/openapi.json",
) -> dict:
operation_ids = ["create_run_runs_post", "update_run_runs__run_id__patch"]
response = requests.get(url)
openapi_schema = response.json()
return _extract_langsmith_routes_and_properties(openapi_schema, operation_ids)
def test_openapi_specification(spec: dict):
# Validate the specification
errors = validate_spec(spec)
# Assert that there are no errors
assert errors is None, f"OpenAPI validation failed: {errors}"
def main(
out_file: str = "openapi.yaml",
url: str = "https://web.smith.langchain.com/openapi.json",
):
langsmith_schema = get_langsmith_runs_schema(url=url)
parent_dir = Path(__file__).parent.parent
test_openapi_specification(langsmith_schema)
with (parent_dir / "openapi" / out_file).open("w") as f:
# Sort the schema keys so the openapi version and info come at the top
for key in ["openapi", "info", "paths", "components"]:
langsmith_schema[key] = langsmith_schema.pop(key)
f.write(yaml.dump(langsmith_schema, sort_keys=False))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--url", type=str, default="https://web.smith.langchain.com/openapi.json"
)
parser.add_argument("--output", type=str, default="openapi.yaml")
args = parser.parse_args()
main(args.output, url=args.url)
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/openapi/openapi.yaml | openapi: 3.0.2
info:
title: LangSmith
version: 0.1.0
paths:
/runs/{run_id}:
patch:
tags:
- run
summary: Update Run
description: Update a run.
operationId: update_run_runs__run_id__patch
parameters:
- required: true
schema:
title: Run Id
type: string
format: uuid
name: run_id
in: path
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/RunUpdateSchemaExtended'
required: true
responses:
'200':
description: Successful Response
content:
application/json:
schema: {}
'422':
description: Validation Error
content:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
/runs:
post:
tags:
- run
summary: Create Run
description: Create a new run.
operationId: create_run_runs_post
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/RunCreateSchemaExtended'
required: true
responses:
'200':
description: Successful Response
content:
application/json:
schema: {}
'422':
description: Validation Error
content:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
components:
schemas:
RunUpdateSchemaExtended:
title: RunUpdateSchemaExtended
type: object
properties:
end_time:
title: End Time
type: string
format: date-time
error:
title: Error
type: string
inputs:
title: Inputs
anyOf:
- type: object
- $ref: '#/components/schemas/CreateChatCompletionRequest'
- $ref: '#/components/schemas/CreateCompletionRequest'
outputs:
title: Outputs
anyOf:
- type: object
- $ref: '#/components/schemas/CreateChatCompletionResponse'
- $ref: '#/components/schemas/CreateCompletionResponse'
events:
title: Events
type: array
items:
type: object
CreateChatCompletionRequest:
title: CreateChatCompletionRequest
type: object
properties:
model:
title: Model
type: string
default: ''
messages:
title: Messages
type: array
items:
$ref: '#/components/schemas/ChatCompletionRequestMessage'
default: []
functions:
title: Functions
type: array
items:
$ref: '#/components/schemas/ChatCompletionFunctions'
default: []
temperature:
title: Temperature
type: number
top_p:
title: Top P
type: number
n:
title: N
type: integer
stream:
title: Stream
type: boolean
stop:
title: Stop
anyOf:
- type: string
- type: array
items:
type: string
max_tokens:
title: Max Tokens
type: integer
presence_penalty:
title: Presence Penalty
type: number
frequency_penalty:
title: Frequency Penalty
type: number
logit_bias:
title: Logit Bias
type: object
additionalProperties:
type: integer
ChatCompletionRequestMessage:
title: ChatCompletionRequestMessage
type: object
properties:
role:
title: Role
type: string
default: ''
content:
title: Content
type: string
name:
title: Name
type: string
function_call:
$ref: '#/components/schemas/ChatCompletionFunctionCall'
ChatCompletionFunctionCall:
title: ChatCompletionFunctionCall
type: object
properties:
name:
title: Name
type: string
default: ''
arguments:
title: Arguments
type: string
default: ''
ChatCompletionFunctions:
title: ChatCompletionFunctions
type: object
properties:
name:
title: Name
type: string
default: ''
description:
title: Description
type: string
default: ''
parameters:
$ref: '#/components/schemas/ChatCompletionFunctionParameters'
ChatCompletionFunctionParameters:
title: ChatCompletionFunctionParameters
type: object
properties:
type:
title: Type
type: string
default: ''
properties:
title: Properties
type: object
default: {}
CreateCompletionRequest:
title: CreateCompletionRequest
required:
- model
- prompt
type: object
properties:
model:
title: Model
anyOf:
- type: string
- type: object
additionalProperties:
anyOf:
- type: string
- type: array
items:
type: string
prompt:
title: Prompt
anyOf:
- type: string
- type: array
items:
type: string
- type: array
items:
type: integer
- type: array
items:
type: array
items:
type: integer
suffix:
title: Suffix
type: string
max_tokens:
title: Max Tokens
type: integer
temperature:
title: Temperature
type: number
top_p:
title: Top P
type: number
n:
title: N
type: integer
stream:
title: Stream
type: boolean
logprobs:
title: Logprobs
type: integer
echo:
title: Echo
type: boolean
stop:
title: Stop
anyOf:
- type: string
- type: array
items:
type: string
presence_penalty:
title: Presence Penalty
type: number
frequency_penalty:
title: Frequency Penalty
type: number
best_of:
title: Best Of
type: integer
logit_bias:
title: Logit Bias
type: object
additionalProperties:
type: integer
user:
title: User
type: string
CreateChatCompletionResponse:
title: CreateChatCompletionResponse
type: object
properties:
id:
title: Id
type: string
default: ''
object:
title: Object
type: string
default: ''
created:
title: Created
type: integer
default: 0
model:
title: Model
type: string
default: ''
choices:
title: Choices
type: array
items:
$ref: '#/components/schemas/ChatCompletionChoice'
default: []
usage:
$ref: '#/components/schemas/CompletionUsage'
ChatCompletionChoice:
title: ChatCompletionChoice
type: object
properties:
index:
title: Index
type: integer
default: 0
message:
$ref: '#/components/schemas/ChatCompletionResponseMessage'
finish_reason:
title: Finish Reason
type: string
default: ''
ChatCompletionResponseMessage:
title: ChatCompletionResponseMessage
type: object
properties:
role:
title: Role
type: string
default: ''
content:
title: Content
type: string
function_call:
$ref: '#/components/schemas/ChatCompletionFunctionCall'
CompletionUsage:
title: CompletionUsage
type: object
properties:
prompt_tokens:
title: Prompt Tokens
type: integer
default: 0
completion_tokens:
title: Completion Tokens
type: integer
default: 0
total_tokens:
title: Total Tokens
type: integer
default: 0
CreateCompletionResponse:
title: CreateCompletionResponse
type: object
properties:
id:
title: Id
type: string
object:
title: Object
type: string
created:
title: Created
type: string
model:
title: Model
type: string
choices:
title: Choices
type: array
items:
$ref: '#/components/schemas/Choice'
default: []
usage:
$ref: '#/components/schemas/CompletionUsage'
Choice:
title: Choice
type: object
properties:
text:
title: Text
type: string
default: ''
index:
title: Index
type: integer
default: 0
logprobs:
$ref: '#/components/schemas/Logprobs'
finish_reason:
title: Finish Reason
type: string
default: ''
Logprobs:
title: Logprobs
type: object
properties:
tokens:
title: Tokens
type: array
items:
type: string
default: []
token_logprobs:
title: Token Logprobs
type: array
items:
type: number
default: []
top_logprobs:
title: Top Logprobs
type: array
items:
type: object
additionalProperties:
type: integer
default: []
text_offset:
title: Text Offset
type: array
items:
type: integer
default: []
HTTPValidationError:
title: HTTPValidationError
type: object
properties:
detail:
title: Detail
type: array
items:
$ref: '#/components/schemas/ValidationError'
ValidationError:
title: ValidationError
required:
- loc
- msg
- type
type: object
properties:
loc:
title: Location
type: array
items:
anyOf:
- type: string
- type: integer
msg:
title: Message
type: string
type:
title: Error Type
type: string
RunCreateSchemaExtended:
title: RunCreateSchemaExtended
required:
- name
- run_type
type: object
properties:
name:
title: Name
type: string
inputs:
title: Inputs
anyOf:
- type: object
- $ref: '#/components/schemas/CreateChatCompletionRequest'
- $ref: '#/components/schemas/CreateCompletionRequest'
run_type:
$ref: '#/components/schemas/RunTypeEnum'
start_time:
title: Start Time
type: string
format: date-time
end_time:
title: End Time
type: string
format: date-time
extra:
title: Extra
type: object
error:
title: Error
type: string
execution_order:
title: Execution Order
minimum: 1.0
type: integer
default: 1
serialized:
title: Serialized
type: object
outputs:
title: Outputs
anyOf:
- type: object
- $ref: '#/components/schemas/CreateChatCompletionResponse'
- $ref: '#/components/schemas/CreateCompletionResponse'
parent_run_id:
title: Parent Run Id
type: string
format: uuid
manifest_id:
title: Manifest Id
type: string
format: uuid
events:
title: Events
type: array
items:
type: object
tags:
title: Tags
type: array
items:
type: string
id:
title: Id
type: string
format: uuid
session_id:
title: Session Id
type: string
format: uuid
session_name:
title: Session Name
type: string
child_runs:
title: Child Runs
type: array
items:
$ref: '#/components/schemas/RunCreateSchema'
reference_example_id:
title: Reference Example Id
type: string
format: uuid
description: Create class for a run object, with additional typehints.
RunTypeEnum:
title: RunTypeEnum
enum:
- tool
- chain
- llm
- retriever
- embedding
- prompt
- parser
type: string
description: Enum for run types.
RunCreateSchema:
title: RunCreateSchema
required:
- name
- run_type
type: object
properties:
name:
title: Name
type: string
inputs:
title: Inputs
type: object
run_type:
$ref: '#/components/schemas/RunTypeEnum'
start_time:
title: Start Time
type: string
format: date-time
end_time:
title: End Time
type: string
format: date-time
extra:
title: Extra
type: object
error:
title: Error
type: string
execution_order:
title: Execution Order
minimum: 1.0
type: integer
default: 1
serialized:
title: Serialized
type: object
outputs:
title: Outputs
type: object
parent_run_id:
title: Parent Run Id
type: string
format: uuid
manifest_id:
title: Manifest Id
type: string
format: uuid
events:
title: Events
type: array
items:
type: object
tags:
title: Tags
type: array
items:
type: string
id:
title: Id
type: string
format: uuid
session_id:
title: Session Id
type: string
format: uuid
session_name:
title: Session Name
type: string
child_runs:
title: Child Runs
type: array
items:
$ref: '#/components/schemas/RunCreateSchema'
reference_example_id:
title: Reference Example Id
type: string
format: uuid
description: Create class for a Run object.
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/.vscode/settings.json | {
"cSpell.words": ["atee"],
"python.testing.pytestArgs": ["python"],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.formatting.provider": "black",
"eslint.workingDirectories": [
"./js"
]
}
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/python/Makefile | .PHONY: tests lint format build publish doctest integration_tests integration_tests_fast evals benchmark benchmark-fast
OUTPUT ?= out/benchmark.json
benchmark:
mkdir -p out
rm -f $(OUTPUT)
poetry run python -m bench -o $(OUTPUT) --rigorous
benchmark-fast:
mkdir -p out
rm -f $(OUTPUT)
poetry run python -m bench -o $(OUTPUT) --fast
PROFILE_NAME ?= output
profile-background-thread:
mkdir -p profiles
poetry run python -m cProfile -o profiles/$(PROFILE_NAME).prof bench/create_run.py
view-profile:
poetry run snakeviz profiles/${PROFILE_NAME}.prof
tests:
env \
-u LANGCHAIN_PROJECT \
-u LANGCHAIN_API_KEY \
-u LANGCHAIN_TRACING_V2 \
-u LANGSMITH_TRACING \
PYTHONDEVMODE=1 \
PYTHONASYNCIODEBUG=1 \
poetry run python -m pytest --disable-socket --allow-unix-socket -n auto --durations=10 tests/unit_tests
tests_watch:
poetry run ptw --now . -- -vv -x tests/unit_tests
integration_tests:
poetry run python -m pytest -v --durations=10 --cov=langsmith --cov-report=term-missing --cov-report=html --cov-config=.coveragerc tests/integration_tests
integration_tests_fast:
poetry run python -m pytest -n auto --durations=10 -v --cov=langsmith --cov-report=term-missing --cov-report=html --cov-config=.coveragerc tests/integration_tests
doctest:
poetry run python -m pytest -n auto --durations=10 --doctest-modules langsmith
evals:
poetry run python -m pytest tests/evaluation
lint:
poetry run ruff check .
poetry run mypy langsmith
poetry run black . --check
format:
poetry run ruff format .
poetry run ruff check . --fix
poetry run black .
build:
poetry build
publish:
poetry publish --dry-run
api_docs_build:
poetry run python docs/create_api_rst.py
cd docs && poetry run make html
poetry run python docs/scripts/custom_formatter.py docs/_build/html/
cp docs/_build/html/{reference,index}.html
open docs/_build/html/index.html
api_docs_clean:
git clean -fd ./docs/
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/python/poetry.lock | # This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anyio"
version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "attrs"
version = "24.2.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.7"
files = [
{file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
{file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
]
[package.extras]
benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "black"
version = "24.8.0"
description = "The uncompromising code formatter."
optional = false
python-versions = ">=3.8"
files = [
{file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"},
{file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"},
{file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"},
{file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"},
{file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"},
{file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"},
{file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"},
{file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"},
{file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"},
{file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"},
{file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"},
{file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"},
{file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"},
{file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"},
{file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"},
{file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"},
{file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"},
{file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"},
{file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"},
{file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"},
{file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"},
{file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"},
]
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
[[package]]
name = "certifi"
version = "2024.8.30"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"},
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
]
[[package]]
name = "charset-normalizer"
version = "3.4.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
{file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
{file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
{file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
{file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
{file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
{file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
{file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
{file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
{file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
{file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
{file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
{file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
{file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
{file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
{file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
{file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
{file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
{file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
{file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
{file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
{file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
{file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
{file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
{file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
[[package]]
name = "click"
version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
files = [
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "coverage"
version = "7.6.1"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"},
{file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"},
{file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"},
{file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"},
{file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"},
{file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"},
{file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"},
{file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"},
{file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"},
{file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"},
{file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"},
{file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"},
{file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"},
{file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"},
{file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"},
{file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"},
{file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"},
{file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"},
{file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"},
{file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"},
{file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"},
{file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"},
{file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"},
{file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"},
{file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"},
{file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"},
{file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"},
{file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"},
{file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"},
{file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"},
{file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"},
{file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"},
{file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"},
{file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"},
{file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"},
{file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"},
{file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"},
{file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"},
{file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"},
{file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"},
{file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"},
{file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"},
{file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"},
{file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"},
{file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"},
{file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"},
{file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"},
{file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"},
{file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"},
{file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"},
{file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"},
{file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"},
{file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"},
{file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"},
{file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"},
{file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"},
{file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"},
{file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"},
{file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"},
{file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"},
{file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"},
{file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"},
{file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"},
{file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"},
{file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"},
{file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"},
{file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"},
{file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"},
{file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"},
{file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"},
{file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"},
{file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"},
]
[package.dependencies]
tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
[package.extras]
toml = ["tomli"]
[[package]]
name = "dataclasses-json"
version = "0.6.7"
description = "Easily serialize dataclasses to and from JSON."
optional = false
python-versions = "<4.0,>=3.7"
files = [
{file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"},
{file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"},
]
[package.dependencies]
marshmallow = ">=3.18.0,<4.0.0"
typing-inspect = ">=0.4.0,<1"
[[package]]
name = "distro"
version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "execnet"
version = "2.1.1"
description = "execnet: rapid multi-Python deployment"
optional = false
python-versions = ">=3.8"
files = [
{file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
{file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
]
[package.extras]
testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastapi"
version = "0.115.4"
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
optional = false
python-versions = ">=3.8"
files = [
{file = "fastapi-0.115.4-py3-none-any.whl", hash = "sha256:0b504a063ffb3cf96a5e27dc1bc32c80ca743a2528574f9cdc77daa2d31b4742"},
{file = "fastapi-0.115.4.tar.gz", hash = "sha256:db653475586b091cb8b2fec2ac54a680ac6a158e07406e1abae31679e8826349"},
]
[package.dependencies]
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
starlette = ">=0.40.0,<0.42.0"
typing-extensions = ">=4.8.0"
[package.extras]
all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"]
[[package]]
name = "freezegun"
version = "1.5.1"
description = "Let your Python tests travel through time"
optional = false
python-versions = ">=3.7"
files = [
{file = "freezegun-1.5.1-py3-none-any.whl", hash = "sha256:bf111d7138a8abe55ab48a71755673dbaa4ab87f4cff5634a4442dfec34c15f1"},
{file = "freezegun-1.5.1.tar.gz", hash = "sha256:b29dedfcda6d5e8e083ce71b2b542753ad48cfec44037b3fc79702e2980a89e9"},
]
[package.dependencies]
python-dateutil = ">=2.7"
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.6"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"},
{file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.27.2"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"},
{file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iniconfig"
version = "2.0.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.7"
files = [
{file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
[[package]]
name = "jiter"
version = "0.7.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
files = [
{file = "jiter-0.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e14027f61101b3f5e173095d9ecf95c1cac03ffe45a849279bde1d97e559e314"},
{file = "jiter-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:979ec4711c2e37ac949561858bd42028884c9799516a923e1ff0b501ef341a4a"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:662d5d3cca58ad6af7a3c6226b641c8655de5beebcb686bfde0df0f21421aafa"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d89008fb47043a469f97ad90840b97ba54e7c3d62dc7cbb6cbf938bd0caf71d"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8b16c35c846a323ce9067170d5ab8c31ea3dbcab59c4f7608bbbf20c2c3b43f"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9e82daaa1b0a68704f9029b81e664a5a9de3e466c2cbaabcda5875f961702e7"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a87a9f586636e1f0dd3651a91f79b491ea0d9fd7cbbf4f5c463eebdc48bda7"},
{file = "jiter-0.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ec05b1615f96cc3e4901678bc863958611584072967d9962f9e571d60711d52"},
{file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a5cb97e35370bde7aa0d232a7f910f5a0fbbc96bc0a7dbaa044fd5cd6bcd7ec3"},
{file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb316dacaf48c8c187cea75d0d7f835f299137e6fdd13f691dff8f92914015c7"},
{file = "jiter-0.7.0-cp310-none-win32.whl", hash = "sha256:243f38eb4072763c54de95b14ad283610e0cd3bf26393870db04e520f60eebb3"},
{file = "jiter-0.7.0-cp310-none-win_amd64.whl", hash = "sha256:2221d5603c139f6764c54e37e7c6960c469cbcd76928fb10d15023ba5903f94b"},
{file = "jiter-0.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:91cec0ad755bd786c9f769ce8d843af955df6a8e56b17658771b2d5cb34a3ff8"},
{file = "jiter-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:feba70a28a27d962e353e978dbb6afd798e711c04cb0b4c5e77e9d3779033a1a"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9d866ec066c3616cacb8535dbda38bb1d470b17b25f0317c4540182bc886ce2"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7a7a00b6f9f18289dd563596f97ecaba6c777501a8ba04bf98e03087bcbc60"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aaf564094c7db8687f2660605e099f3d3e6ea5e7135498486674fcb78e29165"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4d27e09825c1b3c7a667adb500ce8b840e8fc9f630da8454b44cdd4fb0081bb"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca7c287da9c1d56dda88da1d08855a787dbb09a7e2bd13c66a2e288700bd7c7"},
{file = "jiter-0.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db19a6d160f093cbc8cd5ea2abad420b686f6c0e5fb4f7b41941ebc6a4f83cda"},
{file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e46a63c7f877cf7441ffc821c28287cfb9f533ae6ed707bde15e7d4dfafa7ae"},
{file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ba426fa7ff21cb119fa544b75dd3fbee6a70e55a5829709c0338d07ccd30e6d"},
{file = "jiter-0.7.0-cp311-none-win32.whl", hash = "sha256:c07f55a64912b0c7982377831210836d2ea92b7bd343fca67a32212dd72e38e0"},
{file = "jiter-0.7.0-cp311-none-win_amd64.whl", hash = "sha256:ed27b2c43e1b5f6c7fedc5c11d4d8bfa627de42d1143d87e39e2e83ddefd861a"},
{file = "jiter-0.7.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac7930bcaaeb1e229e35c91c04ed2e9f39025b86ee9fc3141706bbf6fff4aeeb"},
{file = "jiter-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:571feae3e7c901a8eedde9fd2865b0dfc1432fb15cab8c675a8444f7d11b7c5d"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8af4df8a262fa2778b68c2a03b6e9d1cb4d43d02bea6976d46be77a3a331af1"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd028d4165097a611eb0c7494d8c1f2aebd46f73ca3200f02a175a9c9a6f22f5"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6b487247c7836810091e9455efe56a52ec51bfa3a222237e1587d04d3e04527"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6d28a92f28814e1a9f2824dc11f4e17e1df1f44dc4fdeb94c5450d34bcb2602"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90443994bbafe134f0b34201dad3ebe1c769f0599004084e046fb249ad912425"},
{file = "jiter-0.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f9abf464f9faac652542ce8360cea8e68fba2b78350e8a170248f9bcc228702a"},
{file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db7a8d99fc5f842f7d2852f06ccaed066532292c41723e5dff670c339b649f88"},
{file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:15cf691ebd8693b70c94627d6b748f01e6d697d9a6e9f2bc310934fcfb7cf25e"},
{file = "jiter-0.7.0-cp312-none-win32.whl", hash = "sha256:9dcd54fa422fb66ca398bec296fed5f58e756aa0589496011cfea2abb5be38a5"},
{file = "jiter-0.7.0-cp312-none-win_amd64.whl", hash = "sha256:cc989951f73f9375b8eacd571baaa057f3d7d11b7ce6f67b9d54642e7475bfad"},
{file = "jiter-0.7.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:24cecd18df540963cd27c08ca5ce1d0179f229ff78066d9eecbe5add29361340"},
{file = "jiter-0.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d41b46236b90b043cca73785674c23d2a67d16f226394079d0953f94e765ed76"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b160db0987171365c153e406a45dcab0ee613ae3508a77bfff42515cb4ce4d6e"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1c8d91e0f0bd78602eaa081332e8ee4f512c000716f5bc54e9a037306d693a7"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:997706c683195eeff192d2e5285ce64d2a610414f37da3a3f2625dcf8517cf90"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ea52a8a0ff0229ab2920284079becd2bae0688d432fca94857ece83bb49c541"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d77449d2738cf74752bb35d75ee431af457e741124d1db5e112890023572c7c"},
{file = "jiter-0.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8203519907a1d81d6cb00902c98e27c2d0bf25ce0323c50ca594d30f5f1fbcf"},
{file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41d15ccc53931c822dd7f1aebf09faa3cda2d7b48a76ef304c7dbc19d1302e51"},
{file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:febf3179b2fabf71fbd2fd52acb8594163bb173348b388649567a548f356dbf6"},
{file = "jiter-0.7.0-cp313-none-win32.whl", hash = "sha256:4a8e2d866e7eda19f012444e01b55079d8e1c4c30346aaac4b97e80c54e2d6d3"},
{file = "jiter-0.7.0-cp313-none-win_amd64.whl", hash = "sha256:7417c2b928062c496f381fb0cb50412eee5ad1d8b53dbc0e011ce45bb2de522c"},
{file = "jiter-0.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9c62c737b5368e51e74960a08fe1adc807bd270227291daede78db24d5fbf556"},
{file = "jiter-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e4640722b1bef0f6e342fe4606aafaae0eb4f4be5c84355bb6867f34400f6688"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f367488c3b9453eab285424c61098faa1cab37bb49425e69c8dca34f2dfe7d69"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cf5d42beb3514236459454e3287db53d9c4d56c4ebaa3e9d0efe81b19495129"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc5190ea1113ee6f7252fa8a5fe5a6515422e378356c950a03bbde5cafbdbaab"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63ee47a149d698796a87abe445fc8dee21ed880f09469700c76c8d84e0d11efd"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48592c26ea72d3e71aa4bea0a93454df907d80638c3046bb0705507b6704c0d7"},
{file = "jiter-0.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:79fef541199bd91cfe8a74529ecccb8eaf1aca38ad899ea582ebbd4854af1e51"},
{file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d1ef6bb66041f2514739240568136c81b9dcc64fd14a43691c17ea793b6535c0"},
{file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca4d950863b1c238e315bf159466e064c98743eef3bd0ff9617e48ff63a4715"},
{file = "jiter-0.7.0-cp38-none-win32.whl", hash = "sha256:897745f230350dcedb8d1ebe53e33568d48ea122c25e6784402b6e4e88169be7"},
{file = "jiter-0.7.0-cp38-none-win_amd64.whl", hash = "sha256:b928c76a422ef3d0c85c5e98c498ce3421b313c5246199541e125b52953e1bc0"},
{file = "jiter-0.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9b669ff6f8ba08270dee9ccf858d3b0203b42314a428a1676762f2d390fbb64"},
{file = "jiter-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5be919bacd73ca93801c3042bce6e95cb9c555a45ca83617b9b6c89df03b9c2"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a282e1e8a396dabcea82d64f9d05acf7efcf81ecdd925b967020dcb0e671c103"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:17ecb1a578a56e97a043c72b463776b5ea30343125308f667fb8fce4b3796735"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b6045fa0527129218cdcd8a8b839f678219686055f31ebab35f87d354d9c36e"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:189cc4262a92e33c19d4fd24018f5890e4e6da5b2581f0059938877943f8298c"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c138414839effbf30d185e30475c6dc8a16411a1e3681e5fd4605ab1233ac67a"},
{file = "jiter-0.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2791604acef33da6b72d5ecf885a32384bcaf9aa1e4be32737f3b8b9588eef6a"},
{file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae60ec89037a78d60bbf3d8b127f1567769c8fa24886e0abed3f622791dea478"},
{file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:836f03dea312967635233d826f783309b98cfd9ccc76ac776e224cfcef577862"},
{file = "jiter-0.7.0-cp39-none-win32.whl", hash = "sha256:ebc30ae2ce4bc4986e1764c404b4ea1924f926abf02ce92516485098f8545374"},
{file = "jiter-0.7.0-cp39-none-win_amd64.whl", hash = "sha256:abf596f951370c648f37aa9899deab296c42a3829736e598b0dd10b08f77a44d"},
{file = "jiter-0.7.0.tar.gz", hash = "sha256:c061d9738535497b5509f8970584f20de1e900806b239a39a9994fc191dad630"},
]
[[package]]
name = "langsmith-pyo3"
version = "0.1.0rc2"
description = ""
optional = true
python-versions = ">=3.8"
files = [
{file = "langsmith_pyo3-0.1.0rc2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6a77834cf7225b863615456b4110fcc7df3ebd41a2d6ea0b8359f7ff8a785f21"},
{file = "langsmith_pyo3-0.1.0rc2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:57a3f4a777f601305bdd8a40d103cc4e24f06fdfddb26d9e2713991e636ed26d"},
{file = "langsmith_pyo3-0.1.0rc2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:bbc771e40e78dfd02f55b81c9ad6dda94922f7feebb9193963bbb83bd8af3eae"},
{file = "langsmith_pyo3-0.1.0rc2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:108739044d80909ac60069b2dc4f0b6c4ba46ce4bf6a2cfbded2b25b67524f7c"},
{file = "langsmith_pyo3-0.1.0rc2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9df02ad699243b2e54dea4309918dbe6923bc366a89e9b5f7ad857f9ae910f0d"},
{file = "langsmith_pyo3-0.1.0rc2-cp310-none-win_amd64.whl", hash = "sha256:f4f79a3b6e8d58c2123c022a3e314064e5b170b94bde966fd352253631fa4857"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e93a45b4d08fa3f188c6d76a98ab9b3fba7d0d604b0aa5e6370ce65334c0af6a"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c98ea4804d6a5d9213c7833b6d36fa967f8201bfbc57ac9e743f9b15f455d389"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:15f32257d5182324541aa2f370acf81b797afcb14238187b50244255676570e3"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7676baa04abce61298118b8790d0743246f8617e47b97036bd734a4623160c9a"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:725c687b036b333334c394dee110e40c73db2d86551c11821f1b089e61487407"},
{file = "langsmith_pyo3-0.1.0rc2-cp311-none-win_amd64.whl", hash = "sha256:3eb3ad8804d215b9670ef6c135714ced1e6db6d5f53c335fa3c1da9cbc24fef8"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b4dd2f456686b400bb47a400ceea571fb6c6cc6757cf6a9a4d5174ffa7c188a4"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:248b1aaab324f8f535b888d6ea1fff0f5e639b21686fe772010ae2cf360b2327"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c3dcd1f8bb6951f0ef4181d74f713fcf864b86f49132228acdf8f8c877605daa"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:62ee1c0ac5079809d8fb746d4522f573e37457197aebb71965eb2672a75bff38"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:17ffbbe8a62f8d87b4d9096bddfa2c4421cb29d45043b0b09d78bed8a9b7741f"},
{file = "langsmith_pyo3-0.1.0rc2-cp312-none-win_amd64.whl", hash = "sha256:3a06377c9ac390ed76a65f62e29b88480be32739b96ed9f51b6e6f6210551202"},
{file = "langsmith_pyo3-0.1.0rc2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d66e40f56495a84d11b9d47d69421750214e2de3ba683cdbc9eb04ded11a3f66"},
{file = "langsmith_pyo3-0.1.0rc2-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:12b27b6441666a3123a6016fcf78288d9194f54e48f021b5172fe8fc58994eba"},
{file = "langsmith_pyo3-0.1.0rc2-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:6e124f0aa1142087b7a6b0d2b9f6dd82415fa64899f12e9650174957918300f4"},
{file = "langsmith_pyo3-0.1.0rc2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:46bd1c411ebbda514954020c46eb65b3a8a9378cfc153fc35a09e375fc5feead"},
{file = "langsmith_pyo3-0.1.0rc2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4d08430ba4e93ec9ac704c1b0116130d9af7cee86b7d4b3a74829b239d5d557a"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7ac4c3a138cea449d5ed5621425daf148b0ed000df4a490cfb304099e0770004"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0d9d2bef3a0098e2ff28d7135320660abdf342b857c00f5ca17c0b03870193c8"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:4b82c9b6ba9bb6fd464aaca50b2f8094aba92f2948df0e6301b8b0fc2bb46baf"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:19e70971e432661314b6196357eb92633049e2dd0bc6fba61b86aa113a09aedf"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:372d80979cd1b7d59e290ab80f9cad3d7059f5aa66c9757d522898f0d399bbed"},
{file = "langsmith_pyo3-0.1.0rc2-cp38-none-win_amd64.whl", hash = "sha256:7fbb73b1c448ac4964358c9ca1be3107bb2c0c38715343c5da7f92ed0e3ee490"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ddeca8b3ae912d090ec9fd5589e0b80cd5475c80dbc439976d3f92bcbe93da81"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6daa12978c18f4560858eac2c84d60090cd5ba7e55e657e052ba7b558f23c1d8"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:60ec1e51f674141ab96f8c2d814d42410f8163f9323f1e98bde8d26cf4676513"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2c4813d11d515386b34a827c958edabecd9ef32306800a5e7d2f12ea2d1d0943"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:11c8d34d8583d5bb009a081fbfebab8b73c1730069626940ee05644c40f77625"},
{file = "langsmith_pyo3-0.1.0rc2-cp39-none-win_amd64.whl", hash = "sha256:f341dff48be2c289c23733489e60adf7e1f005eea95ebb6275b20314fd7fb5a6"},
{file = "langsmith_pyo3-0.1.0rc2.tar.gz", hash = "sha256:30eb26aa33deca44eb9210b77d478ec2157a0cb51f96da30f87072dd5912e3ed"},
]
[[package]]
name = "marshmallow"
version = "3.22.0"
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
optional = false
python-versions = ">=3.8"
files = [
{file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"},
{file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"},
]
[package.dependencies]
packaging = ">=17.0"
[package.extras]
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"]
docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"]
tests = ["pytest", "pytz", "simplejson"]
[[package]]
name = "multidict"
version = "6.1.0"
description = "multidict implementation"
optional = false
python-versions = ">=3.8"
files = [
{file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"},
{file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"},
{file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"},
{file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"},
{file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"},
{file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"},
{file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"},
{file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"},
{file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"},
{file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"},
{file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"},
{file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"},
{file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"},
{file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"},
{file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"},
{file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"},
{file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"},
{file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"},
{file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"},
{file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"},
{file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"},
{file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"},
{file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"},
{file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"},
{file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"},
{file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"},
{file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"},
{file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"},
{file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"},
{file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"},
{file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"},
{file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"},
{file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"},
{file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"},
{file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"},
{file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"},
{file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"},
{file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"},
{file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"},
{file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"},
{file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"},
{file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"},
{file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"},
{file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"},
{file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"},
{file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"},
{file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"},
{file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"},
{file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"},
{file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"},
{file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"},
{file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"},
{file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"},
{file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"},
{file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"},
{file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"},
{file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"},
{file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"},
{file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"},
{file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"},
{file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"},
{file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"},
{file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"},
{file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"},
{file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"},
{file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"},
{file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"},
{file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"},
{file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"},
{file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"},
{file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"},
{file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"},
{file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"},
{file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"},
{file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"},
{file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"},
{file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"},
{file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"},
{file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"},
{file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"},
{file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"},
{file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"},
{file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"},
{file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"},
{file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"},
{file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"},
{file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"},
{file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"},
{file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"},
{file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"},
{file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"},
{file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"},
]
[package.dependencies]
typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""}
[[package]]
name = "multipart"
version = "1.1.0"
description = "Parser for multipart/form-data"
optional = false
python-versions = ">=3.5"
files = [
{file = "multipart-1.1.0-py3-none-any.whl", hash = "sha256:5a784677de8b49e6409e730dfe018f73c5d7aef360e44750e00f67d669b51e91"},
{file = "multipart-1.1.0.tar.gz", hash = "sha256:ee32683f5c454740cd9139e1d6057053823da0729c426f156464f81111529ba1"},
]
[package.extras]
dev = ["build", "pytest", "pytest-cov", "twine"]
[[package]]
name = "mypy"
version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
{file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
{file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
{file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
{file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
{file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
{file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
{file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
{file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
{file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
{file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
{file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
{file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
{file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
{file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
{file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
{file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
{file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
{file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
{file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
{file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
{file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
{file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
{file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
{file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
{file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
{file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
{file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
{file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
{file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
{file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
faster-cache = ["orjson"]
install-types = ["pip"]
mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.5"
files = [
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "numpy"
version = "2.0.2"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
files = [
{file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"},
{file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"},
{file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"},
{file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"},
{file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"},
{file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"},
{file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"},
{file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"},
{file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"},
{file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"},
{file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"},
{file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"},
{file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"},
{file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"},
{file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"},
{file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"},
{file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"},
{file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"},
{file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"},
{file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"},
{file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"},
{file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"},
{file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"},
{file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"},
{file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"},
{file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"},
{file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"},
{file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"},
{file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"},
{file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"},
{file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"},
{file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"},
{file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"},
{file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"},
{file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"},
{file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"},
{file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"},
{file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"},
{file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"},
{file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"},
{file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"},
{file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"},
{file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"},
{file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"},
{file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"},
]
[[package]]
name = "openai"
version = "1.53.1"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
files = [
{file = "openai-1.53.1-py3-none-any.whl", hash = "sha256:b26bc2d91eda8a9317ebecddfbd388b3698f89fa56d78672dd115a1ccc175722"},
{file = "openai-1.53.1.tar.gz", hash = "sha256:04b8df362e7e2af75c8a3bcd105a5abb3837ce883e2fa3cb8d922cb8ee3515ac"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
jiter = ">=0.4.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
tqdm = ">4"
typing-extensions = ">=4.11,<5"
[package.extras]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
[[package]]
name = "orjson"
version = "3.10.11"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.8"
files = [
{file = "orjson-3.10.11-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6dade64687f2bd7c090281652fe18f1151292d567a9302b34c2dbb92a3872f1f"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82f07c550a6ccd2b9290849b22316a609023ed851a87ea888c0456485a7d196a"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd9a187742d3ead9df2e49240234d728c67c356516cf4db018833a86f20ec18c"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77b0fed6f209d76c1c39f032a70df2d7acf24b1812ca3e6078fd04e8972685a3"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63fc9d5fe1d4e8868f6aae547a7b8ba0a2e592929245fff61d633f4caccdcdd6"},
{file = "orjson-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65cd3e3bb4fbb4eddc3c1e8dce10dc0b73e808fcb875f9fab40c81903dd9323e"},
{file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f67c570602300c4befbda12d153113b8974a3340fdcf3d6de095ede86c06d92"},
{file = "orjson-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1f39728c7f7d766f1f5a769ce4d54b5aaa4c3f92d5b84817053cc9995b977acc"},
{file = "orjson-3.10.11-cp310-none-win32.whl", hash = "sha256:1789d9db7968d805f3d94aae2c25d04014aae3a2fa65b1443117cd462c6da647"},
{file = "orjson-3.10.11-cp310-none-win_amd64.whl", hash = "sha256:5576b1e5a53a5ba8f8df81872bb0878a112b3ebb1d392155f00f54dd86c83ff6"},
{file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"},
{file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"},
{file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"},
{file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"},
{file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"},
{file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"},
{file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"},
{file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"},
{file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"},
{file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"},
{file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"},
{file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"},
{file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"},
{file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"},
{file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"},
{file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"},
{file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"},
{file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"},
{file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"},
{file = "orjson-3.10.11-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:19b3763e8bbf8ad797df6b6b5e0fc7c843ec2e2fc0621398534e0c6400098f87"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1be83a13312e5e58d633580c5eb8d0495ae61f180da2722f20562974188af205"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:afacfd1ab81f46dedd7f6001b6d4e8de23396e4884cd3c3436bd05defb1a6446"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb4d0bea56bba596723d73f074c420aec3b2e5d7d30698bc56e6048066bd560c"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96ed1de70fcb15d5fed529a656df29f768187628727ee2788344e8a51e1c1350"},
{file = "orjson-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4bfb30c891b530f3f80e801e3ad82ef150b964e5c38e1fb8482441c69c35c61c"},
{file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d496c74fc2b61341e3cefda7eec21b7854c5f672ee350bc55d9a4997a8a95204"},
{file = "orjson-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:655a493bac606655db9a47fe94d3d84fc7f3ad766d894197c94ccf0c5408e7d3"},
{file = "orjson-3.10.11-cp38-none-win32.whl", hash = "sha256:b9546b278c9fb5d45380f4809e11b4dd9844ca7aaf1134024503e134ed226161"},
{file = "orjson-3.10.11-cp38-none-win_amd64.whl", hash = "sha256:b592597fe551d518f42c5a2eb07422eb475aa8cfdc8c51e6da7054b836b26782"},
{file = "orjson-3.10.11-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c95f2ecafe709b4e5c733b5e2768ac569bed308623c85806c395d9cca00e08af"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80c00d4acded0c51c98754fe8218cb49cb854f0f7eb39ea4641b7f71732d2cb7"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:461311b693d3d0a060439aa669c74f3603264d4e7a08faa68c47ae5a863f352d"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52ca832f17d86a78cbab86cdc25f8c13756ebe182b6fc1a97d534051c18a08de"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c57ea78a753812f528178aa2f1c57da633754c91d2124cb28991dab4c79a54"},
{file = "orjson-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7fcfc6f7ca046383fb954ba528587e0f9336828b568282b27579c49f8e16aad"},
{file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:86b9dd983857970c29e4c71bb3e95ff085c07d3e83e7c46ebe959bac07ebd80b"},
{file = "orjson-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d83f87582d223e54efb2242a79547611ba4ebae3af8bae1e80fa9a0af83bb7f"},
{file = "orjson-3.10.11-cp39-none-win32.whl", hash = "sha256:9fd0ad1c129bc9beb1154c2655f177620b5beaf9a11e0d10bac63ef3fce96950"},
{file = "orjson-3.10.11-cp39-none-win_amd64.whl", hash = "sha256:10f416b2a017c8bd17f325fb9dee1fb5cdd7a54e814284896b7c3f2763faa017"},
{file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"},
]
[[package]]
name = "packaging"
version = "24.1"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
]
[[package]]
name = "pandas-stubs"
version = "2.0.3.230814"
description = "Type annotations for pandas"
optional = false
python-versions = ">=3.8"
files = [
{file = "pandas_stubs-2.0.3.230814-py3-none-any.whl", hash = "sha256:4b3dfc027d49779176b7daa031a3405f7b839bcb6e312f4b9f29fea5feec5b4f"},
{file = "pandas_stubs-2.0.3.230814.tar.gz", hash = "sha256:1d5cc09e36e3d9f9a1ed9dceae4e03eeb26d1b898dd769996925f784365c8769"},
]
[package.dependencies]
numpy = {version = ">=1.25.0", markers = "python_version >= \"3.9\""}
types-pytz = ">=2022.1.1"
[[package]]
name = "pathspec"
version = "0.12.1"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.8"
files = [
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
]
[[package]]
name = "platformdirs"
version = "4.3.6"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
optional = false
python-versions = ">=3.8"
files = [
{file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"},
{file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"},
]
[package.extras]
docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"]
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"]
type = ["mypy (>=1.11.2)"]
[[package]]
name = "pluggy"
version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
]
[package.extras]
dev = ["pre-commit", "tox"]
testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "propcache"
version = "0.2.0"
description = "Accelerated property cache"
optional = false
python-versions = ">=3.8"
files = [
{file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"},
{file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"},
{file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"},
{file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"},
{file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"},
{file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"},
{file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"},
{file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"},
{file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"},
{file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"},
{file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"},
{file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"},
{file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"},
{file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"},
{file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc2db02409338bf36590aa985a461b2c96fce91f8e7e0f14c50c5fcc4f229016"},
{file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6ed8db0a556343d566a5c124ee483ae113acc9a557a807d439bcecc44e7dfbb"},
{file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91997d9cb4a325b60d4e3f20967f8eb08dfcb32b22554d5ef78e6fd1dda743a2"},
{file = "propcache-0.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7dde9e533c0a49d802b4f3f218fa9ad0a1ce21f2c2eb80d5216565202acab4"},
{file = "propcache-0.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffcad6c564fe6b9b8916c1aefbb37a362deebf9394bd2974e9d84232e3e08504"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:97a58a28bcf63284e8b4d7b460cbee1edaab24634e82059c7b8c09e65284f178"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:945db8ee295d3af9dbdbb698cce9bbc5c59b5c3fe328bbc4387f59a8a35f998d"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:39e104da444a34830751715f45ef9fc537475ba21b7f1f5b0f4d71a3b60d7fe2"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c5ecca8f9bab618340c8e848d340baf68bcd8ad90a8ecd7a4524a81c1764b3db"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c436130cc779806bdf5d5fae0d848713105472b8566b75ff70048c47d3961c5b"},
{file = "propcache-0.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:191db28dc6dcd29d1a3e063c3be0b40688ed76434622c53a284e5427565bbd9b"},
{file = "propcache-0.2.0-cp311-cp311-win32.whl", hash = "sha256:5f2564ec89058ee7c7989a7b719115bdfe2a2fb8e7a4543b8d1c0cc4cf6478c1"},
{file = "propcache-0.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e2e54267980349b723cff366d1e29b138b9a60fa376664a157a342689553f71"},
{file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"},
{file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"},
{file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"},
{file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"},
{file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"},
{file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"},
{file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"},
{file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"},
{file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"},
{file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"},
{file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"},
{file = "propcache-0.2.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ecddc221a077a8132cf7c747d5352a15ed763b674c0448d811f408bf803d9ad7"},
{file = "propcache-0.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0e53cb83fdd61cbd67202735e6a6687a7b491c8742dfc39c9e01e80354956763"},
{file = "propcache-0.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92fe151145a990c22cbccf9ae15cae8ae9eddabfc949a219c9f667877e40853d"},
{file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6a21ef516d36909931a2967621eecb256018aeb11fc48656e3257e73e2e247a"},
{file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f88a4095e913f98988f5b338c1d4d5d07dbb0b6bad19892fd447484e483ba6b"},
{file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a5b3bb545ead161be780ee85a2b54fdf7092815995661947812dde94a40f6fb"},
{file = "propcache-0.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67aeb72e0f482709991aa91345a831d0b707d16b0257e8ef88a2ad246a7280bf"},
{file = "propcache-0.2.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c997f8c44ec9b9b0bcbf2d422cc00a1d9b9c681f56efa6ca149a941e5560da2"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a66df3d4992bc1d725b9aa803e8c5a66c010c65c741ad901e260ece77f58d2f"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:3ebbcf2a07621f29638799828b8d8668c421bfb94c6cb04269130d8de4fb7136"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1235c01ddaa80da8235741e80815ce381c5267f96cc49b1477fdcf8c047ef325"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3947483a381259c06921612550867b37d22e1df6d6d7e8361264b6d037595f44"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d5bed7f9805cc29c780f3aee05de3262ee7ce1f47083cfe9f77471e9d6777e83"},
{file = "propcache-0.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4a91d44379f45f5e540971d41e4626dacd7f01004826a18cb048e7da7e96544"},
{file = "propcache-0.2.0-cp313-cp313-win32.whl", hash = "sha256:f902804113e032e2cdf8c71015651c97af6418363bea8d78dc0911d56c335032"},
{file = "propcache-0.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:8f188cfcc64fb1266f4684206c9de0e80f54622c3f22a910cbd200478aeae61e"},
{file = "propcache-0.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:53d1bd3f979ed529f0805dd35ddaca330f80a9a6d90bc0121d2ff398f8ed8861"},
{file = "propcache-0.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:83928404adf8fb3d26793665633ea79b7361efa0287dfbd372a7e74311d51ee6"},
{file = "propcache-0.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77a86c261679ea5f3896ec060be9dc8e365788248cc1e049632a1be682442063"},
{file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218db2a3c297a3768c11a34812e63b3ac1c3234c3a086def9c0fee50d35add1f"},
{file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7735e82e3498c27bcb2d17cb65d62c14f1100b71723b68362872bca7d0913d90"},
{file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:20a617c776f520c3875cf4511e0d1db847a076d720714ae35ffe0df3e440be68"},
{file = "propcache-0.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b69535c870670c9f9b14a75d28baa32221d06f6b6fa6f77a0a13c5a7b0a5b9"},
{file = "propcache-0.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4569158070180c3855e9c0791c56be3ceeb192defa2cdf6a3f39e54319e56b89"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:db47514ffdbd91ccdc7e6f8407aac4ee94cc871b15b577c1c324236b013ddd04"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:2a60ad3e2553a74168d275a0ef35e8c0a965448ffbc3b300ab3a5bb9956c2162"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:662dd62358bdeaca0aee5761de8727cfd6861432e3bb828dc2a693aa0471a563"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:25a1f88b471b3bc911d18b935ecb7115dff3a192b6fef46f0bfaf71ff4f12418"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:f60f0ac7005b9f5a6091009b09a419ace1610e163fa5deaba5ce3484341840e7"},
{file = "propcache-0.2.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:74acd6e291f885678631b7ebc85d2d4aec458dd849b8c841b57ef04047833bed"},
{file = "propcache-0.2.0-cp38-cp38-win32.whl", hash = "sha256:d9b6ddac6408194e934002a69bcaadbc88c10b5f38fb9307779d1c629181815d"},
{file = "propcache-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:676135dcf3262c9c5081cc8f19ad55c8a64e3f7282a21266d05544450bffc3a5"},
{file = "propcache-0.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:25c8d773a62ce0451b020c7b29a35cfbc05de8b291163a7a0f3b7904f27253e6"},
{file = "propcache-0.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:375a12d7556d462dc64d70475a9ee5982465fbb3d2b364f16b86ba9135793638"},
{file = "propcache-0.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1ec43d76b9677637a89d6ab86e1fef70d739217fefa208c65352ecf0282be957"},
{file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f45eec587dafd4b2d41ac189c2156461ebd0c1082d2fe7013571598abb8505d1"},
{file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc092ba439d91df90aea38168e11f75c655880c12782facf5cf9c00f3d42b562"},
{file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa1076244f54bb76e65e22cb6910365779d5c3d71d1f18b275f1dfc7b0d71b4d"},
{file = "propcache-0.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:682a7c79a2fbf40f5dbb1eb6bfe2cd865376deeac65acf9beb607505dced9e12"},
{file = "propcache-0.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e40876731f99b6f3c897b66b803c9e1c07a989b366c6b5b475fafd1f7ba3fb8"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:363ea8cd3c5cb6679f1c2f5f1f9669587361c062e4899fce56758efa928728f8"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:140fbf08ab3588b3468932974a9331aff43c0ab8a2ec2c608b6d7d1756dbb6cb"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e70fac33e8b4ac63dfc4c956fd7d85a0b1139adcfc0d964ce288b7c527537fea"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b33d7a286c0dc1a15f5fc864cc48ae92a846df287ceac2dd499926c3801054a6"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f6d5749fdd33d90e34c2efb174c7e236829147a2713334d708746e94c4bde40d"},
{file = "propcache-0.2.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22aa8f2272d81d9317ff5756bb108021a056805ce63dd3630e27d042c8092798"},
{file = "propcache-0.2.0-cp39-cp39-win32.whl", hash = "sha256:73e4b40ea0eda421b115248d7e79b59214411109a5bc47d0d48e4c73e3b8fcf9"},
{file = "propcache-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:9517d5e9e0731957468c29dbfd0f976736a0e55afaea843726e887f36fe017df"},
{file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"},
{file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"},
]
[[package]]
name = "psutil"
version = "5.9.8"
description = "Cross-platform lib for process and system monitoring in Python."
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
{file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"},
{file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"},
{file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"},
{file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"},
{file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"},
{file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"},
{file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"},
{file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"},
{file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"},
{file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"},
{file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"},
{file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"},
{file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"},
{file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"},
{file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"},
{file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"},
]
[package.extras]
test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
[[package]]
name = "py-spy"
version = "0.3.14"
description = "Sampling profiler for Python programs"
optional = false
python-versions = "*"
files = [
{file = "py_spy-0.3.14-py2.py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5b342cc5feb8d160d57a7ff308de153f6be68dcf506ad02b4d67065f2bae7f45"},
{file = "py_spy-0.3.14-py2.py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:fe7efe6c91f723442259d428bf1f9ddb9c1679828866b353d539345ca40d9dd2"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590905447241d789d9de36cff9f52067b6f18d8b5e9fb399242041568d414461"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd6211fe7f587b3532ba9d300784326d9a6f2b890af7bf6fff21a029ebbc812b"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e8e48032e71c94c3dd51694c39e762e4bbfec250df5bf514adcdd64e79371e0"},
{file = "py_spy-0.3.14-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f59b0b52e56ba9566305236375e6fc68888261d0d36b5addbe3cf85affbefc0e"},
{file = "py_spy-0.3.14-py2.py3-none-win_amd64.whl", hash = "sha256:8f5b311d09f3a8e33dbd0d44fc6e37b715e8e0c7efefafcda8bfd63b31ab5a31"},
]
[[package]]
name = "pydantic"
version = "2.9.2"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"},
{file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
typing-extensions = [
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
]
[package.extras]
email = ["email-validator (>=2.0.0)"]
timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
version = "2.23.4"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"},
{file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"},
{file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"},
{file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"},
{file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"},
{file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"},
{file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"},
{file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"},
{file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"},
{file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"},
{file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"},
{file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"},
{file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"},
{file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"},
{file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"},
{file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"},
{file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"},
{file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"},
{file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"},
{file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"},
{file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"},
{file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"},
{file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"},
{file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"},
{file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"},
{file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"},
{file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"},
{file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"},
{file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"},
{file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"},
{file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"},
{file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"},
{file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"},
{file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"},
]
[package.dependencies]
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pyperf"
version = "2.8.0"
description = "Python module to run and analyze benchmarks"
optional = false
python-versions = ">=3.7"
files = [
{file = "pyperf-2.8.0-py3-none-any.whl", hash = "sha256:1a775b5a09882f18bf876430ef78e07646f773f50774546f5f6a8b34d60e3968"},
{file = "pyperf-2.8.0.tar.gz", hash = "sha256:b30a20465819daf102b6543b512f6799a5a879ff2a123981e6cd732d0e6a7a79"},
]
[package.dependencies]
psutil = ">=5.9.0"
[package.extras]
dev = ["importlib-metadata", "tox"]
[[package]]
name = "pytest"
version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
]
[package.dependencies]
colorama = {version = "*", markers = "sys_platform == \"win32\""}
exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
iniconfig = "*"
packaging = "*"
pluggy = ">=0.12,<2.0"
tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
[package.extras]
testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.21.2"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest_asyncio-0.21.2-py3-none-any.whl", hash = "sha256:ab664c88bb7998f711d8039cacd4884da6430886ae8bbd4eded552ed2004f16b"},
{file = "pytest_asyncio-0.21.2.tar.gz", hash = "sha256:d67738fc232b94b326b9d060750beb16e0074210b98dd8b58a5239fa2a154f45"},
]
[package.dependencies]
pytest = ">=7.0.0"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
[[package]]
name = "pytest-cov"
version = "4.1.0"
description = "Pytest plugin for measuring coverage."
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
{file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
]
[package.dependencies]
coverage = {version = ">=5.2.1", extras = ["toml"]}
pytest = ">=4.6"
[package.extras]
testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
[[package]]
name = "pytest-rerunfailures"
version = "14.0"
description = "pytest plugin to re-run tests to eliminate flaky failures"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest-rerunfailures-14.0.tar.gz", hash = "sha256:4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92"},
{file = "pytest_rerunfailures-14.0-py3-none-any.whl", hash = "sha256:4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32"},
]
[package.dependencies]
packaging = ">=17.1"
pytest = ">=7.2"
[[package]]
name = "pytest-socket"
version = "0.7.0"
description = "Pytest Plugin to disable socket calls during tests"
optional = false
python-versions = ">=3.8,<4.0"
files = [
{file = "pytest_socket-0.7.0-py3-none-any.whl", hash = "sha256:7e0f4642177d55d317bbd58fc68c6bd9048d6eadb2d46a89307fa9221336ce45"},
{file = "pytest_socket-0.7.0.tar.gz", hash = "sha256:71ab048cbbcb085c15a4423b73b619a8b35d6a307f46f78ea46be51b1b7e11b3"},
]
[package.dependencies]
pytest = ">=6.2.5"
[[package]]
name = "pytest-subtests"
version = "0.11.0"
description = "unittest subTest() support and subtests fixture"
optional = false
python-versions = ">=3.7"
files = [
{file = "pytest-subtests-0.11.0.tar.gz", hash = "sha256:51865c88457545f51fb72011942f0a3c6901ee9e24cbfb6d1b9dc1348bafbe37"},
{file = "pytest_subtests-0.11.0-py3-none-any.whl", hash = "sha256:453389984952eec85ab0ce0c4f026337153df79587048271c7fd0f49119c07e4"},
]
[package.dependencies]
attrs = ">=19.2.0"
pytest = ">=7.0"
[[package]]
name = "pytest-watcher"
version = "0.3.5"
description = "Automatically rerun your tests on file modifications"
optional = false
python-versions = ">=3.7.0,<4.0.0"
files = [
{file = "pytest_watcher-0.3.5-py3-none-any.whl", hash = "sha256:af00ca52c7be22dc34c0fd3d7ffef99057207a73b05dc5161fe3b2fe91f58130"},
{file = "pytest_watcher-0.3.5.tar.gz", hash = "sha256:8896152460ba2b1a8200c12117c6611008ec96c8b2d811f0a05ab8a82b043ff8"},
]
[package.dependencies]
tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
watchdog = ">=2.0.0"
[[package]]
name = "pytest-xdist"
version = "3.6.1"
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
optional = false
python-versions = ">=3.8"
files = [
{file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
{file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
]
[package.dependencies]
execnet = ">=2.1"
pytest = ">=7.0.0"
[package.extras]
psutil = ["psutil (>=3.0)"]
setproctitle = ["setproctitle"]
testing = ["filelock"]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
]
[package.dependencies]
six = ">=1.5"
[[package]]
name = "pyyaml"
version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
{file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
{file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
{file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
{file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
{file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
{file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
{file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
{file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
{file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
{file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
{file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
{file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
{file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
{file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
{file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
{file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
{file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
{file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
{file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
{file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
{file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
{file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
{file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
{file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
{file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
{file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
{file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
{file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
{file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
version = "1.0.0"
description = "A utility belt for advanced users of python-requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
{file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
]
[package.dependencies]
requests = ">=2.0.1,<3.0.0"
[[package]]
name = "ruff"
version = "0.6.9"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
{file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
{file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
{file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
{file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
{file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
{file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
]
[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "starlette"
version = "0.41.2"
description = "The little ASGI library that shines."
optional = false
python-versions = ">=3.8"
files = [
{file = "starlette-0.41.2-py3-none-any.whl", hash = "sha256:fbc189474b4731cf30fcef52f18a8d070e3f3b46c6a04c97579e85e6ffca942d"},
{file = "starlette-0.41.2.tar.gz", hash = "sha256:9834fd799d1a87fd346deb76158668cfa0b0d56f85caefe8268e2d97c3468b62"},
]
[package.dependencies]
anyio = ">=3.4.0,<5"
typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
[package.extras]
full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"]
[[package]]
name = "tomli"
version = "2.0.2"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
{file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
]
[[package]]
name = "tqdm"
version = "4.66.6"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
{file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"},
{file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[package.extras]
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
notebook = ["ipywidgets (>=6)"]
slack = ["slack-sdk"]
telegram = ["requests"]
[[package]]
name = "types-psutil"
version = "5.9.5.20240516"
description = "Typing stubs for psutil"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-psutil-5.9.5.20240516.tar.gz", hash = "sha256:bb296f59fc56458891d0feb1994717e548a1bcf89936a2877df8792b822b4696"},
{file = "types_psutil-5.9.5.20240516-py3-none-any.whl", hash = "sha256:83146ded949a10167d9895e567b3b71e53ebc5e23fd8363eab62b3c76cce7b89"},
]
[[package]]
name = "types-pytz"
version = "2024.2.0.20241003"
description = "Typing stubs for pytz"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-pytz-2024.2.0.20241003.tar.gz", hash = "sha256:575dc38f385a922a212bac00a7d6d2e16e141132a3c955078f4a4fd13ed6cb44"},
{file = "types_pytz-2024.2.0.20241003-py3-none-any.whl", hash = "sha256:3e22df1336c0c6ad1d29163c8fda82736909eb977281cb823c57f8bae07118b7"},
]
[[package]]
name = "types-pyyaml"
version = "6.0.12.20240917"
description = "Typing stubs for PyYAML"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"},
{file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"},
]
[[package]]
name = "types-requests"
version = "2.31.0.6"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.7"
files = [
{file = "types-requests-2.31.0.6.tar.gz", hash = "sha256:cd74ce3b53c461f1228a9b783929ac73a666658f223e28ed29753771477b3bd0"},
{file = "types_requests-2.31.0.6-py3-none-any.whl", hash = "sha256:a2db9cb228a81da8348b49ad6db3f5519452dd20a9c1e1a868c83c5fe88fd1a9"},
]
[package.dependencies]
types-urllib3 = "*"
[[package]]
name = "types-requests"
version = "2.32.0.20241016"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
{file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
]
[package.dependencies]
urllib3 = ">=2"
[[package]]
name = "types-tqdm"
version = "4.66.0.20240417"
description = "Typing stubs for tqdm"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-tqdm-4.66.0.20240417.tar.gz", hash = "sha256:16dce9ef522ea8d40e4f5b8d84dd8a1166eefc13ceee7a7e158bf0f1a1421a31"},
{file = "types_tqdm-4.66.0.20240417-py3-none-any.whl", hash = "sha256:248aef1f9986b7b8c2c12b3cb4399fc17dba0a29e7e3f3f9cd704babb879383d"},
]
[[package]]
name = "types-urllib3"
version = "1.26.25.14"
description = "Typing stubs for urllib3"
optional = false
python-versions = "*"
files = [
{file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"},
{file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"},
]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "typing-inspect"
version = "0.9.0"
description = "Runtime inspection utilities for typing module."
optional = false
python-versions = "*"
files = [
{file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
{file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"},
]
[package.dependencies]
mypy-extensions = ">=0.3.0"
typing-extensions = ">=3.7.4"
[[package]]
name = "urllib3"
version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
{file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
]
[package.extras]
brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
name = "urllib3"
version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
{file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
]
[package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "uvicorn"
version = "0.29.0"
description = "The lightning-fast ASGI server."
optional = false
python-versions = ">=3.8"
files = [
{file = "uvicorn-0.29.0-py3-none-any.whl", hash = "sha256:2c2aac7ff4f4365c206fd773a39bf4ebd1047c238f8b8268ad996829323473de"},
{file = "uvicorn-0.29.0.tar.gz", hash = "sha256:6a69214c0b6a087462412670b3ef21224fa48cae0e452b5883e8e8bdfdd11dd0"},
]
[package.dependencies]
click = ">=7.0"
h11 = ">=0.8"
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
[[package]]
name = "vcrpy"
version = "6.0.2"
description = "Automatically mock your HTTP interactions to simplify and speed up testing"
optional = false
python-versions = ">=3.8"
files = [
{file = "vcrpy-6.0.2-py2.py3-none-any.whl", hash = "sha256:40370223861181bc76a5e5d4b743a95058bb1ad516c3c08570316ab592f56cad"},
{file = "vcrpy-6.0.2.tar.gz", hash = "sha256:88e13d9111846745898411dbc74a75ce85870af96dd320d75f1ee33158addc09"},
]
[package.dependencies]
PyYAML = "*"
urllib3 = [
{version = "*", markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\""},
{version = "<2", markers = "platform_python_implementation == \"PyPy\" or python_version < \"3.10\""},
]
wrapt = "*"
yarl = "*"
[package.extras]
tests = ["Werkzeug (==2.0.3)", "aiohttp", "boto3", "httplib2", "httpx", "pytest", "pytest-aiohttp", "pytest-asyncio", "pytest-cov", "pytest-httpbin", "requests (>=2.22.0)", "tornado", "urllib3"]
[[package]]
name = "watchdog"
version = "4.0.2"
description = "Filesystem events monitoring"
optional = false
python-versions = ">=3.8"
files = [
{file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22"},
{file = "watchdog-4.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1"},
{file = "watchdog-4.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503"},
{file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930"},
{file = "watchdog-4.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b"},
{file = "watchdog-4.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef"},
{file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a"},
{file = "watchdog-4.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29"},
{file = "watchdog-4.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a"},
{file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b"},
{file = "watchdog-4.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d"},
{file = "watchdog-4.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7"},
{file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040"},
{file = "watchdog-4.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7"},
{file = "watchdog-4.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4"},
{file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9"},
{file = "watchdog-4.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578"},
{file = "watchdog-4.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b"},
{file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa"},
{file = "watchdog-4.0.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3"},
{file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508"},
{file = "watchdog-4.0.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee"},
{file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1"},
{file = "watchdog-4.0.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_armv7l.whl", hash = "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_i686.whl", hash = "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64.whl", hash = "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757"},
{file = "watchdog-4.0.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8"},
{file = "watchdog-4.0.2-py3-none-win32.whl", hash = "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19"},
{file = "watchdog-4.0.2-py3-none-win_amd64.whl", hash = "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b"},
{file = "watchdog-4.0.2-py3-none-win_ia64.whl", hash = "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c"},
{file = "watchdog-4.0.2.tar.gz", hash = "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270"},
]
[package.extras]
watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "wrapt"
version = "1.16.0"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.6"
files = [
{file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
{file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"},
{file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"},
{file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"},
{file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"},
{file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"},
{file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"},
{file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"},
{file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"},
{file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"},
{file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"},
{file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"},
{file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"},
{file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"},
{file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"},
{file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"},
{file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"},
{file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"},
{file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"},
{file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"},
{file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"},
{file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"},
{file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"},
{file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"},
{file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"},
{file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"},
{file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"},
{file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"},
{file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"},
{file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"},
{file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"},
{file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"},
{file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"},
{file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"},
{file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"},
{file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"},
{file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"},
{file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"},
{file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"},
{file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"},
{file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"},
{file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"},
]
[[package]]
name = "yarl"
version = "1.15.2"
description = "Yet another URL library"
optional = false
python-versions = ">=3.8"
files = [
{file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"},
{file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"},
{file = "yarl-1.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43ebdcc120e2ca679dba01a779333a8ea76b50547b55e812b8b92818d604662c"},
{file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3433da95b51a75692dcf6cc8117a31410447c75a9a8187888f02ad45c0a86c50"},
{file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38d0124fa992dbacd0c48b1b755d3ee0a9f924f427f95b0ef376556a24debf01"},
{file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ded1b1803151dd0f20a8945508786d57c2f97a50289b16f2629f85433e546d47"},
{file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace4cad790f3bf872c082366c9edd7f8f8f77afe3992b134cfc810332206884f"},
{file = "yarl-1.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c77494a2f2282d9bbbbcab7c227a4d1b4bb829875c96251f66fb5f3bae4fb053"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b7f227ca6db5a9fda0a2b935a2ea34a7267589ffc63c8045f0e4edb8d8dcf956"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:31561a5b4d8dbef1559b3600b045607cf804bae040f64b5f5bca77da38084a8a"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3e52474256a7db9dcf3c5f4ca0b300fdea6c21cca0148c8891d03a025649d935"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0e1af74a9529a1137c67c887ed9cde62cff53aa4d84a3adbec329f9ec47a3936"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:15c87339490100c63472a76d87fe7097a0835c705eb5ae79fd96e343473629ed"},
{file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:74abb8709ea54cc483c4fb57fb17bb66f8e0f04438cff6ded322074dbd17c7ec"},
{file = "yarl-1.15.2-cp310-cp310-win32.whl", hash = "sha256:ffd591e22b22f9cb48e472529db6a47203c41c2c5911ff0a52e85723196c0d75"},
{file = "yarl-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:1695497bb2a02a6de60064c9f077a4ae9c25c73624e0d43e3aa9d16d983073c2"},
{file = "yarl-1.15.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9fcda20b2de7042cc35cf911702fa3d8311bd40055a14446c1e62403684afdc5"},
{file = "yarl-1.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0545de8c688fbbf3088f9e8b801157923be4bf8e7b03e97c2ecd4dfa39e48e0e"},
{file = "yarl-1.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fbda058a9a68bec347962595f50546a8a4a34fd7b0654a7b9697917dc2bf810d"},
{file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ac2bc069f4a458634c26b101c2341b18da85cb96afe0015990507efec2e417"},
{file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd126498171f752dd85737ab1544329a4520c53eed3997f9b08aefbafb1cc53b"},
{file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3db817b4e95eb05c362e3b45dafe7144b18603e1211f4a5b36eb9522ecc62bcf"},
{file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:076b1ed2ac819933895b1a000904f62d615fe4533a5cf3e052ff9a1da560575c"},
{file = "yarl-1.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8cfd847e6b9ecf9f2f2531c8427035f291ec286c0a4944b0a9fce58c6446046"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32b66be100ac5739065496c74c4b7f3015cef792c3174982809274d7e51b3e04"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:34a2d76a1984cac04ff8b1bfc939ec9dc0914821264d4a9c8fd0ed6aa8d4cfd2"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0afad2cd484908f472c8fe2e8ef499facee54a0a6978be0e0cff67b1254fd747"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c68e820879ff39992c7f148113b46efcd6ec765a4865581f2902b3c43a5f4bbb"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:98f68df80ec6ca3015186b2677c208c096d646ef37bbf8b49764ab4a38183931"},
{file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c56ec1eacd0a5d35b8a29f468659c47f4fe61b2cab948ca756c39b7617f0aa5"},
{file = "yarl-1.15.2-cp311-cp311-win32.whl", hash = "sha256:eedc3f247ee7b3808ea07205f3e7d7879bc19ad3e6222195cd5fbf9988853e4d"},
{file = "yarl-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:0ccaa1bc98751fbfcf53dc8dfdb90d96e98838010fc254180dd6707a6e8bb179"},
{file = "yarl-1.15.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82d5161e8cb8f36ec778fd7ac4d740415d84030f5b9ef8fe4da54784a1f46c94"},
{file = "yarl-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fa2bea05ff0a8fb4d8124498e00e02398f06d23cdadd0fe027d84a3f7afde31e"},
{file = "yarl-1.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99e12d2bf587b44deb74e0d6170fec37adb489964dbca656ec41a7cd8f2ff178"},
{file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:243fbbbf003754fe41b5bdf10ce1e7f80bcc70732b5b54222c124d6b4c2ab31c"},
{file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:856b7f1a7b98a8c31823285786bd566cf06226ac4f38b3ef462f593c608a9bd6"},
{file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:553dad9af802a9ad1a6525e7528152a015b85fb8dbf764ebfc755c695f488367"},
{file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30c3ff305f6e06650a761c4393666f77384f1cc6c5c0251965d6bfa5fbc88f7f"},
{file = "yarl-1.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:353665775be69bbfc6d54c8d134bfc533e332149faeddd631b0bc79df0897f46"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f4fe99ce44128c71233d0d72152db31ca119711dfc5f2c82385ad611d8d7f897"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9c1e3ff4b89cdd2e1a24c214f141e848b9e0451f08d7d4963cb4108d4d798f1f"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:711bdfae4e699a6d4f371137cbe9e740dc958530cb920eb6f43ff9551e17cfbc"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4388c72174868884f76affcdd3656544c426407e0043c89b684d22fb265e04a5"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f0e1844ad47c7bd5d6fa784f1d4accc5f4168b48999303a868fe0f8597bde715"},
{file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5cafb02cf097a82d74403f7e0b6b9df3ffbfe8edf9415ea816314711764a27b"},
{file = "yarl-1.15.2-cp312-cp312-win32.whl", hash = "sha256:156ececdf636143f508770bf8a3a0498de64da5abd890c7dbb42ca9e3b6c05b8"},
{file = "yarl-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:435aca062444a7f0c884861d2e3ea79883bd1cd19d0a381928b69ae1b85bc51d"},
{file = "yarl-1.15.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:416f2e3beaeae81e2f7a45dc711258be5bdc79c940a9a270b266c0bec038fb84"},
{file = "yarl-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:173563f3696124372831007e3d4b9821746964a95968628f7075d9231ac6bb33"},
{file = "yarl-1.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ce2e0f6123a60bd1a7f5ae3b2c49b240c12c132847f17aa990b841a417598a2"},
{file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaea112aed589131f73d50d570a6864728bd7c0c66ef6c9154ed7b59f24da611"},
{file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ca3b9f370f218cc2a0309542cab8d0acdfd66667e7c37d04d617012485f904"},
{file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23ec1d3c31882b2a8a69c801ef58ebf7bae2553211ebbddf04235be275a38548"},
{file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75119badf45f7183e10e348edff5a76a94dc19ba9287d94001ff05e81475967b"},
{file = "yarl-1.15.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e6fdc976ec966b99e4daa3812fac0274cc28cd2b24b0d92462e2e5ef90d368"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8657d3f37f781d987037f9cc20bbc8b40425fa14380c87da0cb8dfce7c92d0fb"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:93bed8a8084544c6efe8856c362af08a23e959340c87a95687fdbe9c9f280c8b"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:69d5856d526802cbda768d3e6246cd0d77450fa2a4bc2ea0ea14f0d972c2894b"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ccad2800dfdff34392448c4bf834be124f10a5bc102f254521d931c1c53c455a"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a880372e2e5dbb9258a4e8ff43f13888039abb9dd6d515f28611c54361bc5644"},
{file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c998d0558805860503bc3a595994895ca0f7835e00668dadc673bbf7f5fbfcbe"},
{file = "yarl-1.15.2-cp313-cp313-win32.whl", hash = "sha256:533a28754e7f7439f217550a497bb026c54072dbe16402b183fdbca2431935a9"},
{file = "yarl-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:5838f2b79dc8f96fdc44077c9e4e2e33d7089b10788464609df788eb97d03aad"},
{file = "yarl-1.15.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fbbb63bed5fcd70cd3dd23a087cd78e4675fb5a2963b8af53f945cbbca79ae16"},
{file = "yarl-1.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2e93b88ecc8f74074012e18d679fb2e9c746f2a56f79cd5e2b1afcf2a8a786b"},
{file = "yarl-1.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af8ff8d7dc07ce873f643de6dfbcd45dc3db2c87462e5c387267197f59e6d776"},
{file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66f629632220a4e7858b58e4857927dd01a850a4cef2fb4044c8662787165cf7"},
{file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:833547179c31f9bec39b49601d282d6f0ea1633620701288934c5f66d88c3e50"},
{file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa738e0282be54eede1e3f36b81f1e46aee7ec7602aa563e81e0e8d7b67963f"},
{file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a13a07532e8e1c4a5a3afff0ca4553da23409fad65def1b71186fb867eeae8d"},
{file = "yarl-1.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c45817e3e6972109d1a2c65091504a537e257bc3c885b4e78a95baa96df6a3f8"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:670eb11325ed3a6209339974b276811867defe52f4188fe18dc49855774fa9cf"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:d417a4f6943112fae3924bae2af7112562285848d9bcee737fc4ff7cbd450e6c"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bc8936d06cd53fddd4892677d65e98af514c8d78c79864f418bbf78a4a2edde4"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:954dde77c404084c2544e572f342aef384240b3e434e06cecc71597e95fd1ce7"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5bc0df728e4def5e15a754521e8882ba5a5121bd6b5a3a0ff7efda5d6558ab3d"},
{file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b71862a652f50babab4a43a487f157d26b464b1dedbcc0afda02fd64f3809d04"},
{file = "yarl-1.15.2-cp38-cp38-win32.whl", hash = "sha256:63eab904f8630aed5a68f2d0aeab565dcfc595dc1bf0b91b71d9ddd43dea3aea"},
{file = "yarl-1.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:2cf441c4b6e538ba0d2591574f95d3fdd33f1efafa864faa077d9636ecc0c4e9"},
{file = "yarl-1.15.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a32d58f4b521bb98b2c0aa9da407f8bd57ca81f34362bcb090e4a79e9924fefc"},
{file = "yarl-1.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:766dcc00b943c089349d4060b935c76281f6be225e39994c2ccec3a2a36ad627"},
{file = "yarl-1.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bed1b5dbf90bad3bfc19439258c97873eab453c71d8b6869c136346acfe497e7"},
{file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed20a4bdc635f36cb19e630bfc644181dd075839b6fc84cac51c0f381ac472e2"},
{file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d538df442c0d9665664ab6dd5fccd0110fa3b364914f9c85b3ef9b7b2e157980"},
{file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c6cf1d92edf936ceedc7afa61b07e9d78a27b15244aa46bbcd534c7458ee1b"},
{file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce44217ad99ffad8027d2fde0269ae368c86db66ea0571c62a000798d69401fb"},
{file = "yarl-1.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47a6000a7e833ebfe5886b56a31cb2ff12120b1efd4578a6fcc38df16cc77bd"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e52f77a0cd246086afde8815039f3e16f8d2be51786c0a39b57104c563c5cbb0"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:f9ca0e6ce7774dc7830dc0cc4bb6b3eec769db667f230e7c770a628c1aa5681b"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:136f9db0f53c0206db38b8cd0c985c78ded5fd596c9a86ce5c0b92afb91c3a19"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:173866d9f7409c0fb514cf6e78952e65816600cb888c68b37b41147349fe0057"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6e840553c9c494a35e449a987ca2c4f8372668ee954a03a9a9685075228e5036"},
{file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:458c0c65802d816a6b955cf3603186de79e8fdb46d4f19abaec4ef0a906f50a7"},
{file = "yarl-1.15.2-cp39-cp39-win32.whl", hash = "sha256:5b48388ded01f6f2429a8c55012bdbd1c2a0c3735b3e73e221649e524c34a58d"},
{file = "yarl-1.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:81dadafb3aa124f86dc267a2168f71bbd2bfb163663661ab0038f6e4b8edb810"},
{file = "yarl-1.15.2-py3-none-any.whl", hash = "sha256:0d3105efab7c5c091609abacad33afff33bdff0035bece164c98bcf5a85ef90a"},
{file = "yarl-1.15.2.tar.gz", hash = "sha256:a39c36f4218a5bb668b4f06874d676d35a035ee668e6e7e3538835c703634b84"},
]
[package.dependencies]
idna = ">=2.0"
multidict = ">=4.0"
propcache = ">=0.2.0"
[extras]
langsmith-pyo3 = ["langsmith-pyo3"]
vcr = []
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4.0"
content-hash = "c7acc8c8f123bf7968b265a0f0cdd0b679d88559bfbff33488bff25bb4f54f0f"
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/python/README.md | # LangSmith Client SDK
[](https://github.com/langchain-ai/langsmith-sdk/releases)
[](https://pepy.tech/project/langsmith)
This package contains the Python client for interacting with the [LangSmith platform](https://smith.langchain.com/).
To install:
```bash
pip install -U langsmith
export LANGSMITH_TRACING=true
export LANGSMITH_API_KEY=ls_...
```
Then trace:
```python
import openai
from langsmith.wrappers import wrap_openai
from langsmith import traceable
# Auto-trace LLM calls in-context
client = wrap_openai(openai.Client())
@traceable # Auto-trace this function
def pipeline(user_input: str):
result = client.chat.completions.create(
messages=[{"role": "user", "content": user_input}],
model="gpt-3.5-turbo"
)
return result.choices[0].message.content
pipeline("Hello, world!")
```
See the resulting nested trace [🌐 here](https://smith.langchain.com/public/b37ca9b1-60cd-4a2a-817e-3c4e4443fdc0/r).
LangSmith helps you and your team develop and evaluate language models and intelligent agents. It is compatible with any LLM application.
> **Cookbook:** For tutorials on how to get more value out of LangSmith, check out the [Langsmith Cookbook](https://github.com/langchain-ai/langsmith-cookbook/tree/main) repo.
A typical workflow looks like:
1. Set up an account with LangSmith.
2. Log traces while debugging and prototyping.
3. Run benchmark evaluations and continuously improve with the collected data.
We'll walk through these steps in more detail below.
## 1. Connect to LangSmith
Sign up for [LangSmith](https://smith.langchain.com/) using your GitHub, Discord accounts, or an email address and password. If you sign up with an email, make sure to verify your email address before logging in.
Then, create a unique API key on the [Settings Page](https://smith.langchain.com/settings), which is found in the menu at the top right corner of the page.
Note: Save the API Key in a secure location. It will not be shown again.
## 2. Log Traces
You can log traces natively using the LangSmith SDK or within your LangChain application.
### Logging Traces with LangChain
LangSmith seamlessly integrates with the Python LangChain library to record traces from your LLM applications.
1. **Copy the environment variables from the Settings Page and add them to your application.**
Tracing can be activated by setting the following environment variables or by manually specifying the LangChainTracer.
```python
import os
os.environ["LANGSMITH_TRACING_V2"] = "true"
os.environ["LANGSMITH_ENDPOINT"] = "https://api.smith.langchain.com"
# os.environ["LANGSMITH_ENDPOINT"] = "https://eu.api.smith.langchain.com" # If signed up in the EU region
os.environ["LANGSMITH_API_KEY"] = "<YOUR-LANGSMITH-API-KEY>"
# os.environ["LANGSMITH_PROJECT"] = "My Project Name" # Optional: "default" is used if not set
```
> **Tip:** Projects are groups of traces. All runs are logged to a project. If not specified, the project is set to `default`.
2. **Run an Agent, Chain, or Language Model in LangChain**
If the environment variables are correctly set, your application will automatically connect to the LangSmith platform.
```python
from langchain_core.runnables import chain
@chain
def add_val(x: dict) -> dict:
return {"val": x["val"] + 1}
add_val({"val": 1})
```
### Logging Traces Outside LangChain
You can still use the LangSmith development platform without depending on any
LangChain code.
1. **Copy the environment variables from the Settings Page and add them to your application.**
```python
import os
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "<YOUR-LANGSMITH-API-KEY>"
# os.environ["LANGCHAIN_PROJECT"] = "My Project Name" # Optional: "default" is used if not set
```
2. **Log traces**
The easiest way to log traces using the SDK is via the `@traceable` decorator. Below is an example.
```python
from datetime import datetime
from typing import List, Optional, Tuple
import openai
from langsmith import traceable
from langsmith.wrappers import wrap_openai
client = wrap_openai(openai.Client())
@traceable
def argument_generator(query: str, additional_description: str = "") -> str:
return client.chat.completions.create(
[
{"role": "system", "content": "You are a debater making an argument on a topic."
f"{additional_description}"
f" The current time is {datetime.now()}"},
{"role": "user", "content": f"The discussion topic is {query}"}
]
).choices[0].message.content
@traceable
def argument_chain(query: str, additional_description: str = "") -> str:
argument = argument_generator(query, additional_description)
# ... Do other processing or call other functions...
return argument
argument_chain("Why is blue better than orange?")
```
Alternatively, you can manually log events using the `Client` directly or using a `RunTree`, which is what the traceable decorator is meant to manage for you!
A RunTree tracks your application. Each RunTree object is required to have a `name` and `run_type`. These and other important attributes are as follows:
- `name`: `str` - used to identify the component's purpose
- `run_type`: `str` - Currently one of "llm", "chain" or "tool"; more options will be added in the future
- `inputs`: `dict` - the inputs to the component
- `outputs`: `Optional[dict]` - the (optional) returned values from the component
- `error`: `Optional[str]` - Any error messages that may have arisen during the call
```python
from langsmith.run_trees import RunTree
parent_run = RunTree(
name="My Chat Bot",
run_type="chain",
inputs={"text": "Summarize this morning's meetings."},
# project_name= "Defaults to the LANGCHAIN_PROJECT env var"
)
parent_run.post()
# .. My Chat Bot calls an LLM
child_llm_run = parent_run.create_child(
name="My Proprietary LLM",
run_type="llm",
inputs={
"prompts": [
"You are an AI Assistant. The time is XYZ."
" Summarize this morning's meetings."
]
},
)
child_llm_run.post()
child_llm_run.end(
outputs={
"generations": [
"I should use the transcript_loader tool"
" to fetch meeting_transcripts from XYZ"
]
}
)
child_llm_run.patch()
# .. My Chat Bot takes the LLM output and calls
# a tool / function for fetching transcripts ..
child_tool_run = parent_run.create_child(
name="transcript_loader",
run_type="tool",
inputs={"date": "XYZ", "content_type": "meeting_transcripts"},
)
child_tool_run.post()
# The tool returns meeting notes to the chat bot
child_tool_run.end(outputs={"meetings": ["Meeting1 notes.."]})
child_tool_run.patch()
child_chain_run = parent_run.create_child(
name="Unreliable Component",
run_type="tool",
inputs={"input": "Summarize these notes..."},
)
child_chain_run.post()
try:
# .... the component does work
raise ValueError("Something went wrong")
child_chain_run.end(outputs={"output": "foo"}
child_chain_run.patch()
except Exception as e:
child_chain_run.end(error=f"I errored again {e}")
child_chain_run.patch()
pass
# .. The chat agent recovers
parent_run.end(outputs={"output": ["The meeting notes are as follows:..."]})
res = parent_run.patch()
res.result()
```
## Create a Dataset from Existing Runs
Once your runs are stored in LangSmith, you can convert them into a dataset.
For this example, we will do so using the Client, but you can also do this using
the web interface, as explained in the [LangSmith docs](https://docs.smith.langchain.com/docs/).
```python
from langsmith import Client
client = Client()
dataset_name = "Example Dataset"
# We will only use examples from the top level AgentExecutor run here,
# and exclude runs that errored.
runs = client.list_runs(
project_name="my_project",
execution_order=1,
error=False,
)
dataset = client.create_dataset(dataset_name, description="An example dataset")
for run in runs:
client.create_example(
inputs=run.inputs,
outputs=run.outputs,
dataset_id=dataset.id,
)
```
## Evaluating Runs
Check out the [LangSmith Testing & Evaluation dos](https://docs.smith.langchain.com/docs/evaluation/) for up-to-date workflows.
For generating automated feedback on individual runs, you can run evaluations directly using the LangSmith client.
```python
from typing import Optional
from langsmith.evaluation import StringEvaluator
def jaccard_chars(output: str, answer: str) -> float:
"""Naive Jaccard similarity between two strings."""
prediction_chars = set(output.strip().lower())
answer_chars = set(answer.strip().lower())
intersection = prediction_chars.intersection(answer_chars)
union = prediction_chars.union(answer_chars)
return len(intersection) / len(union)
def grader(run_input: str, run_output: str, answer: Optional[str]) -> dict:
"""Compute the score and/or label for this run."""
if answer is None:
value = "AMBIGUOUS"
score = 0.5
else:
score = jaccard_chars(run_output, answer)
value = "CORRECT" if score > 0.9 else "INCORRECT"
return dict(score=score, value=value)
evaluator = StringEvaluator(evaluation_name="Jaccard", grading_function=grader)
runs = client.list_runs(
project_name="my_project",
execution_order=1,
error=False,
)
for run in runs:
client.evaluate_run(run, evaluator)
```
## Integrations
LangSmith easily integrates with your favorite LLM framework.
## OpenAI SDK
<!-- markdown-link-check-disable -->
We provide a convenient wrapper for the [OpenAI SDK](https://platform.openai.com/docs/api-reference).
In order to use, you first need to set your LangSmith API key.
```shell
export LANGCHAIN_API_KEY=<your-api-key>
```
Next, you will need to install the LangSmith SDK:
```shell
pip install -U langsmith
```
After that, you can wrap the OpenAI client:
```python
from openai import OpenAI
from langsmith import wrappers
client = wrappers.wrap_openai(OpenAI())
```
Now, you can use the OpenAI client as you normally would, but now everything is logged to LangSmith!
```python
client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
)
```
Oftentimes, you use the OpenAI client inside of other functions.
You can get nested traces by using this wrapped client and decorating those functions with `@traceable`.
See [this documentation](https://docs.smith.langchain.com/tracing/faq/logging_and_viewing) for more documentation how to use this decorator
```python
from langsmith import traceable
@traceable(name="Call OpenAI")
def my_function(text: str):
return client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Say {text}"}],
)
my_function("hello world")
```
# Instructor
We provide a convenient integration with [Instructor](https://jxnl.github.io/instructor/), largely by virtue of it essentially just using the OpenAI SDK.
In order to use, you first need to set your LangSmith API key.
```shell
export LANGCHAIN_API_KEY=<your-api-key>
```
Next, you will need to install the LangSmith SDK:
```shell
pip install -U langsmith
```
After that, you can wrap the OpenAI client:
```python
from openai import OpenAI
from langsmith import wrappers
client = wrappers.wrap_openai(OpenAI())
```
After this, you can patch the OpenAI client using `instructor`:
```python
import instructor
client = instructor.patch(OpenAI())
```
Now, you can use `instructor` as you normally would, but now everything is logged to LangSmith!
```python
from pydantic import BaseModel
class UserDetail(BaseModel):
name: str
age: int
user = client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=UserDetail,
messages=[
{"role": "user", "content": "Extract Jason is 25 years old"},
]
)
```
Oftentimes, you use `instructor` inside of other functions.
You can get nested traces by using this wrapped client and decorating those functions with `@traceable`.
See [this documentation](https://docs.smith.langchain.com/tracing/faq/logging_and_viewing) for more documentation how to use this decorator
```python
@traceable()
def my_function(text: str) -> UserDetail:
return client.chat.completions.create(
model="gpt-3.5-turbo",
response_model=UserDetail,
messages=[
{"role": "user", "content": f"Extract {text}"},
]
)
my_function("Jason is 25 years old")
```
## Additional Documentation
To learn more about the LangSmith platform, check out the [docs](https://docs.smith.langchain.com/docs/).
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/python/pyproject.toml | [tool.poetry]
name = "langsmith"
version = "0.2.0"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
authors = ["LangChain <support@langchain.dev>"]
license = "MIT"
readme = "README.md"
repository = "https://github.com/langchain-ai/langsmith-sdk"
homepage = "https://smith.langchain.com/"
documentation = "https://docs.smith.langchain.com/"
keywords = [
"langsmith",
"langchain",
"llm",
"nlp",
"language",
"translation",
"evaluation",
"tracing",
"platform",
]
packages = [{ include = "langsmith" }]
[tool.poetry.scripts]
langsmith = "langsmith.cli.main:main"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
pydantic = [
{ version = ">=1,<3", python = "<3.12.4" },
{ version = "^2.7.4", python = ">=3.12.4" },
]
requests = "^2"
orjson = { version = "^3.9.14", markers = "platform_python_implementation != 'PyPy'" }
httpx = ">=0.23.0,<1"
requests-toolbelt = "^1.0.0"
# Enabled via `langsmith_pyo3` extra: `pip install langsmith[langsmith_pyo3]`.
langsmith-pyo3 = { version = "^0.1.0rc2", optional = true }
[tool.poetry.group.dev.dependencies]
pytest = "^7.3.1"
black = ">=23.3,<25.0"
mypy = "^1.9.0"
ruff = "^0.6.9"
types-requests = "^2.31.0.1"
pandas-stubs = "^2.0.1.230501"
types-pyyaml = "^6.0.12.10"
pytest-asyncio = "^0.21.0"
types-psutil = "^5.9.5.16"
psutil = "^5.9.5"
freezegun = "^1.2.2"
pytest-subtests = "^0.11.0"
pytest-watcher = "^0.3.4"
pytest-xdist = "^3.5.0"
pytest-cov = "^4.1.0"
dataclasses-json = "^0.6.4"
types-tqdm = "^4.66.0.20240106"
vcrpy = "^6.0.1"
fastapi = "^0.115.4"
uvicorn = "^0.29.0"
pytest-rerunfailures = "^14.0"
pytest-socket = "^0.7.0"
pyperf = "^2.7.0"
py-spy = "^0.3.14"
multipart = "^1.0.0"
[tool.poetry.group.lint.dependencies]
openai = "^1.10"
[tool.poetry.group.test.dependencies]
pytest-socket = "^0.7.0"
[tool.poetry.extras]
vcr = ["vcrpy"]
langsmith_pyo3 = ["langsmith-pyo3"]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.ruff]
lint.select = [
"E", # pycodestyle
"F", # pyflakes
"I", # isort
"D", # pydocstyle
"D401", # First line should be in imperative mood
"T201",
"UP",
]
lint.ignore = [
"UP006",
"UP007",
# Relax the convention by _not_ requiring documentation for every function parameter.
"D417",
]
[tool.ruff.lint.pydocstyle]
convention = "google"
[tool.ruff.lint.per-file-ignores]
"langsmith/run_helpers.py" = ["E501"]
"docs/conf.py" = ["E501"]
"langsmith/cli/*" = ["T201", "D", "UP"]
"docs/create_api_rst.py" = ["D101", "D103", "E501"]
"docs/scripts/custom_formatter.py" = ["D100"]
"langsmith/anonymizer.py" = ["E501"]
"langsmith/async_client.py" = ["E501"]
"langsmith/client.py" = ["E501"]
"langsmith/schemas.py" = ["E501"]
"tests/evaluation/__init__.py" = ["E501"]
"tests/unit_tests/test_client.py" = ["E501"]
"tests/*" = ["D", "UP"]
"bench/*" = ["D", "UP", "T"]
"docs/*" = ["T", "D"]
[tool.ruff.format]
docstring-code-format = true
docstring-code-line-length = 80
[tool.mypy]
plugins = ["pydantic.v1.mypy", "pydantic.mypy"]
ignore_missing_imports = "True"
disallow_untyped_defs = "True"
[tool.pytest.ini_options]
asyncio_mode = "auto"
markers = ["slow: long-running tests"]
|
0 | lc_public_repos/langsmith-sdk | lc_public_repos/langsmith-sdk/python/mypy.ini | [mypy]
plugins = pydantic.mypy
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/schemas.py | """Schemas for the LangSmith API."""
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from enum import Enum
from typing import (
Any,
Dict,
List,
NamedTuple,
Optional,
Protocol,
Tuple,
Union,
runtime_checkable,
)
from uuid import UUID
from typing_extensions import NotRequired, TypedDict
try:
from pydantic.v1 import (
BaseModel,
Field, # type: ignore[import]
PrivateAttr,
StrictBool,
StrictFloat,
StrictInt,
)
except ImportError:
from pydantic import ( # type: ignore[assignment]
BaseModel,
Field,
PrivateAttr,
StrictBool,
StrictFloat,
StrictInt,
)
from typing_extensions import Literal
SCORE_TYPE = Union[StrictBool, StrictInt, StrictFloat, None]
VALUE_TYPE = Union[Dict, str, None]
class Attachment(NamedTuple):
"""Annotated type that will be stored as an attachment if used.
Examples:
--------
.. code-block:: python
@traceable
def my_function(bar: int, my_val: Attachment):
# my_val will be stored as an attachment
# bar will be stored as inputs
return bar
"""
mime_type: str
data: bytes
Attachments = Dict[str, Union[Tuple[str, bytes], Attachment]]
"""Attachments associated with the run. Each entry is a tuple of (mime_type, bytes)."""
class ExampleBase(BaseModel):
"""Example base model."""
dataset_id: UUID
inputs: Dict[str, Any] = Field(default_factory=dict)
outputs: Optional[Dict[str, Any]] = Field(default=None)
metadata: Optional[Dict[str, Any]] = Field(default=None)
class Config:
"""Configuration class for the schema."""
frozen = True
class ExampleCreate(ExampleBase):
"""Example create model."""
id: Optional[UUID]
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
split: Optional[Union[str, List[str]]] = None
class Example(ExampleBase):
"""Example model."""
id: UUID
created_at: datetime = Field(
default_factory=lambda: datetime.fromtimestamp(0, tz=timezone.utc)
)
dataset_id: UUID = Field(default=UUID("00000000-0000-0000-0000-000000000000"))
modified_at: Optional[datetime] = Field(default=None)
runs: List[Run] = Field(default_factory=list)
source_run_id: Optional[UUID] = None
_host_url: Optional[str] = PrivateAttr(default=None)
_tenant_id: Optional[UUID] = PrivateAttr(default=None)
def __init__(
self,
_host_url: Optional[str] = None,
_tenant_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
"""Initialize a Dataset object."""
super().__init__(**kwargs)
self._host_url = _host_url
self._tenant_id = _tenant_id
@property
def url(self) -> Optional[str]:
"""URL of this run within the app."""
if self._host_url:
path = f"/datasets/{self.dataset_id}/e/{self.id}"
if self._tenant_id:
return f"{self._host_url}/o/{str(self._tenant_id)}{path}"
return f"{self._host_url}{path}"
return None
def __repr__(self):
"""Return a string representation of the RunBase object."""
return f"{self.__class__}(id={self.id}, dataset_id={self.dataset_id}, link='{self.url}')"
class ExampleSearch(ExampleBase):
"""Example returned via search."""
id: UUID
class ExampleUpdate(BaseModel):
"""Update class for Example."""
dataset_id: Optional[UUID] = None
inputs: Optional[Dict[str, Any]] = None
outputs: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
split: Optional[Union[str, List[str]]] = None
class Config:
"""Configuration class for the schema."""
frozen = True
class DataType(str, Enum):
"""Enum for dataset data types."""
kv = "kv"
llm = "llm"
chat = "chat"
class DatasetBase(BaseModel):
"""Dataset base model."""
name: str
description: Optional[str] = None
data_type: Optional[DataType] = None
class Config:
"""Configuration class for the schema."""
frozen = True
DatasetTransformationType = Literal[
"remove_system_messages",
"convert_to_openai_message",
"convert_to_openai_tool",
"remove_extra_fields",
"extract_tools_from_run",
]
class DatasetTransformation(TypedDict, total=False):
"""Schema for dataset transformations."""
path: List[str]
transformation_type: Union[DatasetTransformationType, str]
class Dataset(DatasetBase):
"""Dataset ORM model."""
id: UUID
created_at: datetime
modified_at: Optional[datetime] = Field(default=None)
example_count: Optional[int] = None
session_count: Optional[int] = None
last_session_start_time: Optional[datetime] = None
inputs_schema: Optional[Dict[str, Any]] = None
outputs_schema: Optional[Dict[str, Any]] = None
transformations: Optional[List[DatasetTransformation]] = None
_host_url: Optional[str] = PrivateAttr(default=None)
_tenant_id: Optional[UUID] = PrivateAttr(default=None)
_public_path: Optional[str] = PrivateAttr(default=None)
def __init__(
self,
_host_url: Optional[str] = None,
_tenant_id: Optional[UUID] = None,
_public_path: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Initialize a Dataset object."""
if "inputs_schema_definition" in kwargs:
kwargs["inputs_schema"] = kwargs.pop("inputs_schema_definition")
if "outputs_schema_definition" in kwargs:
kwargs["outputs_schema"] = kwargs.pop("outputs_schema_definition")
super().__init__(**kwargs)
self._host_url = _host_url
self._tenant_id = _tenant_id
self._public_path = _public_path
@property
def url(self) -> Optional[str]:
"""URL of this run within the app."""
if self._host_url:
if self._public_path:
return f"{self._host_url}{self._public_path}"
if self._tenant_id:
return f"{self._host_url}/o/{str(self._tenant_id)}/datasets/{self.id}"
return f"{self._host_url}/datasets/{self.id}"
return None
class DatasetVersion(BaseModel):
"""Class representing a dataset version."""
tags: Optional[List[str]] = None
as_of: datetime
def _default_extra():
return {"metadata": {}}
class RunBase(BaseModel):
"""Base Run schema.
A Run is a span representing a single unit of work or operation within your LLM app.
This could be a single call to an LLM or chain, to a prompt formatting call,
to a runnable lambda invocation. If you are familiar with OpenTelemetry,
you can think of a run as a span.
"""
id: UUID
"""Unique identifier for the run."""
name: str
"""Human-readable name for the run."""
start_time: datetime
"""Start time of the run."""
run_type: str
"""The type of run, such as tool, chain, llm, retriever,
embedding, prompt, parser."""
end_time: Optional[datetime] = None
"""End time of the run, if applicable."""
extra: Optional[dict] = Field(default_factory=_default_extra)
"""Additional metadata or settings related to the run."""
error: Optional[str] = None
"""Error message, if the run encountered any issues."""
serialized: Optional[dict] = None
"""Serialized object that executed the run for potential reuse."""
events: Optional[List[Dict]] = None
"""List of events associated with the run, like
start and end events."""
inputs: dict = Field(default_factory=dict)
"""Inputs used for the run."""
outputs: Optional[dict] = None
"""Outputs generated by the run, if any."""
reference_example_id: Optional[UUID] = None
"""Reference to an example that this run may be based on."""
parent_run_id: Optional[UUID] = None
"""Identifier for a parent run, if this run is a sub-run."""
tags: Optional[List[str]] = None
"""Tags for categorizing or annotating the run."""
attachments: Attachments = Field(default_factory=dict)
"""Attachments associated with the run.
Each entry is a tuple of (mime_type, bytes)."""
@property
def metadata(self) -> dict[str, Any]:
"""Retrieve the metadata (if any)."""
if self.extra is None:
self.extra = {}
return self.extra.setdefault("metadata", {})
@property
def revision_id(self) -> Optional[UUID]:
"""Retrieve the revision ID (if any)."""
return self.metadata.get("revision_id")
def __repr__(self):
"""Return a string representation of the RunBase object."""
return f"{self.__class__}(id={self.id}, name='{self.name}', run_type='{self.run_type}')"
class Run(RunBase):
"""Run schema when loading from the DB."""
session_id: Optional[UUID] = None
"""The project ID this run belongs to."""
child_run_ids: Optional[List[UUID]] = None
"""The child run IDs of this run."""
child_runs: Optional[List[Run]] = None
"""The child runs of this run, if instructed to load using the client
These are not populated by default, as it is a heavier query to make."""
feedback_stats: Optional[Dict[str, Any]] = None
"""Feedback stats for this run."""
app_path: Optional[str] = None
"""Relative URL path of this run within the app."""
manifest_id: Optional[UUID] = None
"""Unique ID of the serialized object for this run."""
status: Optional[str] = None
"""Status of the run (e.g., 'success')."""
prompt_tokens: Optional[int] = None
"""Number of tokens used for the prompt."""
completion_tokens: Optional[int] = None
"""Number of tokens generated as output."""
total_tokens: Optional[int] = None
"""Total tokens for prompt and completion."""
first_token_time: Optional[datetime] = None
"""Time the first token was processed."""
total_cost: Optional[Decimal] = None
"""The total estimated LLM cost associated with the completion tokens."""
prompt_cost: Optional[Decimal] = None
"""The estimated cost associated with the prompt (input) tokens."""
completion_cost: Optional[Decimal] = None
"""The estimated cost associated with the completion tokens."""
parent_run_ids: Optional[List[UUID]] = None
"""List of parent run IDs."""
trace_id: UUID
"""Unique ID assigned to every run within this nested trace."""
dotted_order: str = Field(default="")
"""Dotted order for the run.
This is a string composed of {time}{run-uuid}.* so that a trace can be
sorted in the order it was executed.
Example:
- Parent: 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8
- Children:
- 20230914T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155649Z809ed3a2-0172-4f4d-8a02-a64e9b7a0f8a
- 20230915T223155647Z1b64098b-4ab7-43f6-afee-992304f198d8.20230914T223155650Zc8d9f4c5-6c5a-4b2d-9b1c-3d9d7a7c5c7c
""" # noqa: E501
in_dataset: Optional[bool] = None
"""Whether this run is in a dataset."""
_host_url: Optional[str] = PrivateAttr(default=None)
def __init__(self, _host_url: Optional[str] = None, **kwargs: Any) -> None:
"""Initialize a Run object."""
if not kwargs.get("trace_id"):
kwargs = {"trace_id": kwargs.get("id"), **kwargs}
inputs = kwargs.pop("inputs", None) or {}
super().__init__(**kwargs, inputs=inputs)
self._host_url = _host_url
if not self.dotted_order.strip() and not self.parent_run_id:
self.dotted_order = f"{self.start_time.isoformat()}{self.id}"
@property
def url(self) -> Optional[str]:
"""URL of this run within the app."""
if self._host_url and self.app_path:
return f"{self._host_url}{self.app_path}"
return None
class RunTypeEnum(str, Enum):
"""(Deprecated) Enum for run types. Use string directly."""
tool = "tool"
chain = "chain"
llm = "llm"
retriever = "retriever"
embedding = "embedding"
prompt = "prompt"
parser = "parser"
class RunLikeDict(TypedDict, total=False):
"""Run-like dictionary, for type-hinting."""
name: str
run_type: RunTypeEnum
start_time: datetime
inputs: Optional[dict]
outputs: Optional[dict]
end_time: Optional[datetime]
extra: Optional[dict]
error: Optional[str]
serialized: Optional[dict]
parent_run_id: Optional[UUID]
manifest_id: Optional[UUID]
events: Optional[List[dict]]
tags: Optional[List[str]]
inputs_s3_urls: Optional[dict]
outputs_s3_urls: Optional[dict]
id: Optional[UUID]
session_id: Optional[UUID]
session_name: Optional[str]
reference_example_id: Optional[UUID]
input_attachments: Optional[dict]
output_attachments: Optional[dict]
trace_id: UUID
dotted_order: str
attachments: Attachments
class RunWithAnnotationQueueInfo(RunBase):
"""Run schema with annotation queue info."""
last_reviewed_time: Optional[datetime] = None
"""The last time this run was reviewed."""
added_at: Optional[datetime] = None
"""The time this run was added to the queue."""
class FeedbackSourceBase(BaseModel):
"""Base class for feedback sources.
This represents whether feedback is submitted from the API, model, human labeler,
etc.
"""
type: str
"""The type of the feedback source."""
metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
"""Additional metadata for the feedback source."""
class APIFeedbackSource(FeedbackSourceBase):
"""API feedback source."""
type: Literal["api"] = "api"
class ModelFeedbackSource(FeedbackSourceBase):
"""Model feedback source."""
type: Literal["model"] = "model"
class FeedbackSourceType(Enum):
"""Feedback source type."""
API = "api"
"""General feedback submitted from the API."""
MODEL = "model"
"""Model-assisted feedback."""
class FeedbackBase(BaseModel):
"""Feedback schema."""
id: UUID
"""The unique ID of the feedback."""
created_at: Optional[datetime] = None
"""The time the feedback was created."""
modified_at: Optional[datetime] = None
"""The time the feedback was last modified."""
run_id: Optional[UUID]
"""The associated run ID this feedback is logged for."""
trace_id: Optional[UUID]
"""The associated trace ID this feedback is logged for."""
key: str
"""The metric name, tag, or aspect to provide feedback on."""
score: SCORE_TYPE = None
"""Value or score to assign the run."""
value: VALUE_TYPE = None
"""The display value, tag or other value for the feedback if not a metric."""
comment: Optional[str] = None
"""Comment or explanation for the feedback."""
correction: Union[str, dict, None] = None
"""Correction for the run."""
feedback_source: Optional[FeedbackSourceBase] = None
"""The source of the feedback."""
session_id: Optional[UUID] = None
"""The associated project ID (Session = Project) this feedback is logged for."""
comparative_experiment_id: Optional[UUID] = None
"""If logged within a 'comparative experiment', this is the ID of the experiment."""
feedback_group_id: Optional[UUID] = None
"""For preference scoring, this group ID is shared across feedbacks for each
run in the group that was being compared."""
extra: Optional[Dict] = None
"""The metadata of the feedback."""
class Config:
"""Configuration class for the schema."""
frozen = True
class FeedbackCategory(TypedDict, total=False):
"""Specific value and label pair for feedback."""
value: float
"""The numeric value associated with this feedback category."""
label: Optional[str]
"""An optional label to interpret the value for this feedback category."""
class FeedbackConfig(TypedDict, total=False):
"""Represents _how_ a feedback value ought to be interpreted."""
type: Literal["continuous", "categorical", "freeform"]
"""The type of feedback."""
min: Optional[float]
"""The minimum value for continuous feedback."""
max: Optional[float]
"""The maximum value for continuous feedback."""
categories: Optional[List[FeedbackCategory]]
"""If feedback is categorical, this defines the valid categories the server will accept.
Not applicable to continuous or freeform feedback types.""" # noqa
class FeedbackCreate(FeedbackBase):
"""Schema used for creating feedback."""
feedback_source: FeedbackSourceBase
"""The source of the feedback."""
feedback_config: Optional[FeedbackConfig] = None
class Feedback(FeedbackBase):
"""Schema for getting feedback."""
id: UUID
created_at: datetime
"""The time the feedback was created."""
modified_at: datetime
"""The time the feedback was last modified."""
feedback_source: Optional[FeedbackSourceBase] = None
"""The source of the feedback. In this case"""
class TracerSession(BaseModel):
"""TracerSession schema for the API.
Sessions are also referred to as "Projects" in the UI.
"""
id: UUID
"""The ID of the project."""
start_time: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
"""The time the project was created."""
end_time: Optional[datetime] = None
"""The time the project was ended."""
description: Optional[str] = None
"""The description of the project."""
name: Optional[str] = None
"""The name of the session."""
extra: Optional[Dict[str, Any]] = None
"""Extra metadata for the project."""
tenant_id: UUID
"""The tenant ID this project belongs to."""
reference_dataset_id: Optional[UUID]
"""The reference dataset IDs this project's runs were generated on."""
_host_url: Optional[str] = PrivateAttr(default=None)
def __init__(self, _host_url: Optional[str] = None, **kwargs: Any) -> None:
"""Initialize a Run object."""
super().__init__(**kwargs)
self._host_url = _host_url
if self.start_time.tzinfo is None:
self.start_time = self.start_time.replace(tzinfo=timezone.utc)
@property
def url(self) -> Optional[str]:
"""URL of this run within the app."""
if self._host_url:
return f"{self._host_url}/o/{self.tenant_id}/projects/p/{self.id}"
return None
@property
def metadata(self) -> dict[str, Any]:
"""Retrieve the metadata (if any)."""
if self.extra is None or "metadata" not in self.extra:
return {}
return self.extra["metadata"]
@property
def tags(self) -> List[str]:
"""Retrieve the tags (if any)."""
if self.extra is None or "tags" not in self.extra:
return []
return self.extra["tags"]
class TracerSessionResult(TracerSession):
"""A project, hydrated with additional information.
Sessions are also referred to as "Projects" in the UI.
"""
run_count: Optional[int]
"""The number of runs in the project."""
latency_p50: Optional[timedelta]
"""The median (50th percentile) latency for the project."""
latency_p99: Optional[timedelta]
"""The 99th percentile latency for the project."""
total_tokens: Optional[int]
"""The total number of tokens consumed in the project."""
prompt_tokens: Optional[int]
"""The total number of prompt tokens consumed in the project."""
completion_tokens: Optional[int]
"""The total number of completion tokens consumed in the project."""
last_run_start_time: Optional[datetime]
"""The start time of the last run in the project."""
feedback_stats: Optional[Dict[str, Any]]
"""Feedback stats for the project."""
run_facets: Optional[List[Dict[str, Any]]]
"""Facets for the runs in the project."""
total_cost: Optional[Decimal]
"""The total estimated LLM cost associated with the completion tokens."""
prompt_cost: Optional[Decimal]
"""The estimated cost associated with the prompt (input) tokens."""
completion_cost: Optional[Decimal]
"""The estimated cost associated with the completion tokens."""
first_token_p50: Optional[timedelta]
"""The median (50th percentile) time to process the first token."""
first_token_p99: Optional[timedelta]
"""The 99th percentile time to process the first token."""
error_rate: Optional[float]
"""The error rate for the project."""
@runtime_checkable
class BaseMessageLike(Protocol):
"""A protocol representing objects similar to BaseMessage."""
content: str
"""The content of the message."""
additional_kwargs: Dict[Any, Any]
"""Additional keyword arguments associated with the message."""
@property
def type(self) -> str:
"""Type of the Message, used for serialization."""
class DatasetShareSchema(TypedDict, total=False):
"""Represents the schema for a dataset share."""
dataset_id: UUID
"""The ID of the dataset."""
share_token: UUID
"""The token for sharing the dataset."""
url: str
"""The URL of the shared dataset."""
class AnnotationQueue(BaseModel):
"""Represents an annotation queue."""
id: UUID
"""The unique identifier of the annotation queue."""
name: str
"""The name of the annotation queue."""
description: Optional[str] = None
"""An optional description of the annotation queue."""
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
"""The timestamp when the annotation queue was created."""
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
"""The timestamp when the annotation queue was last updated."""
tenant_id: UUID
"""The ID of the tenant associated with the annotation queue."""
class BatchIngestConfig(TypedDict, total=False):
"""Configuration for batch ingestion."""
use_multipart_endpoint: bool
"""Whether to use the multipart endpoint for batch ingestion."""
scale_up_qsize_trigger: int
"""The queue size threshold that triggers scaling up."""
scale_up_nthreads_limit: int
"""The maximum number of threads to scale up to."""
scale_down_nempty_trigger: int
"""The number of empty threads that triggers scaling down."""
size_limit: int
"""The maximum size limit for the batch."""
size_limit_bytes: Optional[int]
"""The maximum size limit in bytes for the batch."""
class LangSmithInfo(BaseModel):
"""Information about the LangSmith server."""
version: str = ""
"""The version of the LangSmith server."""
license_expiration_time: Optional[datetime] = None
"""The time the license will expire."""
batch_ingest_config: Optional[BatchIngestConfig] = None
Example.update_forward_refs()
class LangSmithSettings(BaseModel):
"""Settings for the LangSmith tenant."""
id: str
"""The ID of the tenant."""
display_name: str
"""The display name of the tenant."""
created_at: datetime
"""The creation time of the tenant."""
tenant_handle: Optional[str] = None
class FeedbackIngestToken(BaseModel):
"""Represents the schema for a feedback ingest token."""
id: UUID
"""The ID of the feedback ingest token."""
url: str
"""The URL to GET when logging the feedback."""
expires_at: datetime
"""The expiration time of the token."""
class RunEvent(TypedDict, total=False):
"""Run event schema."""
name: str
"""Type of event."""
time: Union[datetime, str]
"""Time of the event."""
kwargs: Optional[Dict[str, Any]]
"""Additional metadata for the event."""
class TimeDeltaInput(TypedDict, total=False):
"""Timedelta input schema."""
days: int
"""Number of days."""
hours: int
"""Number of hours."""
minutes: int
"""Number of minutes."""
class DatasetDiffInfo(BaseModel):
"""Represents the difference information between two datasets."""
examples_modified: List[UUID]
"""A list of UUIDs representing the modified examples."""
examples_added: List[UUID]
"""A list of UUIDs representing the added examples."""
examples_removed: List[UUID]
"""A list of UUIDs representing the removed examples."""
class ComparativeExperiment(BaseModel):
"""Represents a comparative experiment.
This information summarizes evaluation results comparing
two or more models on a given dataset.
"""
id: UUID
"""The unique identifier for the comparative experiment."""
name: Optional[str] = None
"""The optional name of the comparative experiment."""
description: Optional[str] = None
"""An optional description of the comparative experiment."""
tenant_id: UUID
"""The identifier of the tenant associated with this experiment."""
created_at: datetime
"""The timestamp when the comparative experiment was created."""
modified_at: datetime
"""The timestamp when the comparative experiment was last modified."""
reference_dataset_id: UUID
"""The identifier of the reference dataset used in this experiment."""
extra: Optional[Dict[str, Any]] = None
"""Optional additional information about the experiment."""
experiments_info: Optional[List[dict]] = None
"""Optional list of dictionaries containing information about individual experiments."""
feedback_stats: Optional[Dict[str, Any]] = None
"""Optional dictionary containing feedback statistics for the experiment."""
@property
def metadata(self) -> dict[str, Any]:
"""Retrieve the metadata (if any)."""
if self.extra is None or "metadata" not in self.extra:
return {}
return self.extra["metadata"]
class PromptCommit(BaseModel):
"""Represents a Prompt with a manifest."""
owner: str
"""The handle of the owner of the prompt."""
repo: str
"""The name of the prompt."""
commit_hash: str
"""The commit hash of the prompt."""
manifest: Dict[str, Any]
"""The manifest of the prompt."""
examples: List[dict]
"""The list of examples."""
class ListedPromptCommit(BaseModel):
"""Represents a listed prompt commit with associated metadata."""
id: UUID
"""The unique identifier for the prompt commit."""
owner: str
"""The owner of the prompt commit."""
repo: str
"""The repository name of the prompt commit."""
manifest_id: Optional[UUID] = None
"""The optional identifier for the manifest associated with this commit."""
repo_id: Optional[UUID] = None
"""The optional identifier for the repository."""
parent_id: Optional[UUID] = None
"""The optional identifier for the parent commit."""
commit_hash: Optional[str] = None
"""The optional hash of the commit."""
created_at: Optional[datetime] = None
"""The optional timestamp when the commit was created."""
updated_at: Optional[datetime] = None
"""The optional timestamp when the commit was last updated."""
example_run_ids: Optional[List[UUID]] = Field(default_factory=list)
"""A list of example run identifiers associated with this commit."""
num_downloads: Optional[int] = 0
"""The number of times this commit has been downloaded."""
num_views: Optional[int] = 0
"""The number of times this commit has been viewed."""
parent_commit_hash: Optional[str] = None
"""The optional hash of the parent commit."""
class Prompt(BaseModel):
"""Represents a Prompt with metadata."""
repo_handle: str
"""The name of the prompt."""
description: Optional[str] = None
"""The description of the prompt."""
readme: Optional[str] = None
"""The README of the prompt."""
id: str
"""The ID of the prompt."""
tenant_id: str
"""The tenant ID of the prompt owner."""
created_at: datetime
"""The creation time of the prompt."""
updated_at: datetime
"""The last update time of the prompt."""
is_public: bool
"""Whether the prompt is public."""
is_archived: bool
"""Whether the prompt is archived."""
tags: List[str]
"""The tags associated with the prompt."""
original_repo_id: Optional[str] = None
"""The ID of the original prompt, if forked."""
upstream_repo_id: Optional[str] = None
"""The ID of the upstream prompt, if forked."""
owner: Optional[str]
"""The handle of the owner of the prompt."""
full_name: str
"""The full name of the prompt. (owner + repo_handle)"""
num_likes: int
"""The number of likes."""
num_downloads: int
"""The number of downloads."""
num_views: int
"""The number of views."""
liked_by_auth_user: Optional[bool] = None
"""Whether the prompt is liked by the authenticated user."""
last_commit_hash: Optional[str] = None
"""The hash of the last commit."""
num_commits: int
"""The number of commits."""
original_repo_full_name: Optional[str] = None
"""The full name of the original prompt, if forked."""
upstream_repo_full_name: Optional[str] = None
"""The full name of the upstream prompt, if forked."""
class ListPromptsResponse(BaseModel):
"""A list of prompts with metadata."""
repos: List[Prompt]
"""The list of prompts."""
total: int
"""The total number of prompts."""
class PromptSortField(str, Enum):
"""Enum for sorting fields for prompts."""
num_downloads = "num_downloads"
"""Number of downloads."""
num_views = "num_views"
"""Number of views."""
updated_at = "updated_at"
"""Last updated time."""
num_likes = "num_likes"
"""Number of likes."""
class InputTokenDetails(TypedDict, total=False):
"""Breakdown of input token counts.
Does *not* need to sum to full input token count. Does *not* need to have all keys.
"""
audio: int
"""Audio input tokens."""
cache_creation: int
"""Input tokens that were cached and there was a cache miss.
Since there was a cache miss, the cache was created from these tokens.
"""
cache_read: int
"""Input tokens that were cached and there was a cache hit.
Since there was a cache hit, the tokens were read from the cache. More precisely,
the model state given these tokens was read from the cache.
"""
class OutputTokenDetails(TypedDict, total=False):
"""Breakdown of output token counts.
Does *not* need to sum to full output token count. Does *not* need to have all keys.
"""
audio: int
"""Audio output tokens."""
reasoning: int
"""Reasoning output tokens.
Tokens generated by the model in a chain of thought process (i.e. by OpenAI's o1
models) that are not returned as part of model output.
"""
class UsageMetadata(TypedDict):
"""Usage metadata for a message, such as token counts.
This is a standard representation of token usage that is consistent across models.
"""
input_tokens: int
"""Count of input (or prompt) tokens. Sum of all input token types."""
output_tokens: int
"""Count of output (or completion) tokens. Sum of all output token types."""
total_tokens: int
"""Total token count. Sum of input_tokens + output_tokens."""
input_token_details: NotRequired[InputTokenDetails]
"""Breakdown of input token counts.
Does *not* need to sum to full input token count. Does *not* need to have all keys.
"""
output_token_details: NotRequired[OutputTokenDetails]
"""Breakdown of output token counts.
Does *not* need to sum to full output token count. Does *not* need to have all keys.
"""
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/_expect.py | """Make approximate assertions as "expectations" on test results.
This module is designed to be used within test cases decorated with the `@test` decorator
It allows you to log scores about a test case and optionally make assertions that log as
"expectation" feedback to LangSmith.
Example usage:
from langsmith import expect, test
@test
def test_output_semantically_close():
response = oai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Say hello!"},
],
)
response_txt = response.choices[0].message.content
# Intended usage
expect.embedding_distance(
prediction=response_txt,
reference="Hello!",
).to_be_less_than(0.9)
# Score the test case
matcher = expect.edit_distance(
prediction=response_txt,
reference="Hello!",
)
# Apply an assertion and log 'expectation' feedback to LangSmith
matcher.to_be_less_than(1)
# You can also directly make assertions on values directly
expect.value(response_txt).to_contain("Hello!")
# Or using a custom check
expect.value(response_txt).against(lambda x: "Hello" in x)
# You can even use this for basic metric logging within tests
expect.score(0.8)
expect.score(0.7, key="similarity").to_be_greater_than(0.7)
""" # noqa: E501
from __future__ import annotations
import atexit
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Literal,
Optional,
Union,
overload,
)
from langsmith import client as ls_client
from langsmith import run_helpers as rh
from langsmith import run_trees as rt
from langsmith import utils as ls_utils
if TYPE_CHECKING:
from langsmith._internal._edit_distance import EditDistanceConfig
from langsmith._internal._embedding_distance import EmbeddingConfig
# Sentinel class used until PEP 0661 is accepted
class _NULL_SENTRY:
"""A sentinel singleton class used to distinguish omitted keyword arguments
from those passed in with the value None (which may have different behavior).
""" # noqa: D205
def __bool__(self) -> Literal[False]:
return False
def __repr__(self) -> str:
return "NOT_GIVEN"
NOT_GIVEN = _NULL_SENTRY()
class _Matcher:
"""A class for making assertions on expectation values."""
def __init__(
self,
client: Optional[ls_client.Client],
key: str,
value: Any,
_executor: Optional[ls_utils.ContextThreadPoolExecutor] = None,
run_id: Optional[str] = None,
):
self._client = client
self.key = key
self.value = value
self._executor = _executor or ls_utils.ContextThreadPoolExecutor(max_workers=3)
rt = rh.get_current_run_tree()
self._run_id = rt.trace_id if rt else run_id
def _submit_feedback(self, score: int, message: Optional[str] = None) -> None:
if not ls_utils.test_tracking_is_disabled():
if not self._client:
self._client = rt.get_cached_client()
self._executor.submit(
self._client.create_feedback,
run_id=self._run_id,
key="expectation",
score=score,
comment=message,
)
def _assert(self, condition: bool, message: str, method_name: str) -> None:
try:
assert condition, message
self._submit_feedback(1, message=f"Success: {self.key}.{method_name}")
except AssertionError as e:
self._submit_feedback(0, repr(e))
raise e from None
def to_be_less_than(self, value: float) -> None:
"""Assert that the expectation value is less than the given value.
Args:
value: The value to compare against.
Raises:
AssertionError: If the expectation value is not less than the given value.
"""
self._assert(
self.value < value,
f"Expected {self.key} to be less than {value}, but got {self.value}",
"to_be_less_than",
)
def to_be_greater_than(self, value: float) -> None:
"""Assert that the expectation value is greater than the given value.
Args:
value: The value to compare against.
Raises:
AssertionError: If the expectation value is not
greater than the given value.
"""
self._assert(
self.value > value,
f"Expected {self.key} to be greater than {value}, but got {self.value}",
"to_be_greater_than",
)
def to_be_between(self, min_value: float, max_value: float) -> None:
"""Assert that the expectation value is between the given min and max values.
Args:
min_value: The minimum value (exclusive).
max_value: The maximum value (exclusive).
Raises:
AssertionError: If the expectation value
is not between the given min and max.
"""
self._assert(
min_value < self.value < max_value,
f"Expected {self.key} to be between {min_value} and {max_value},"
f" but got {self.value}",
"to_be_between",
)
def to_be_approximately(self, value: float, precision: int = 2) -> None:
"""Assert that the expectation value is approximately equal to the given value.
Args:
value: The value to compare against.
precision: The number of decimal places to round to for comparison.
Raises:
AssertionError: If the rounded expectation value
does not equal the rounded given value.
"""
self._assert(
round(self.value, precision) == round(value, precision),
f"Expected {self.key} to be approximately {value}, but got {self.value}",
"to_be_approximately",
)
def to_equal(self, value: float) -> None:
"""Assert that the expectation value equals the given value.
Args:
value: The value to compare against.
Raises:
AssertionError: If the expectation value does
not exactly equal the given value.
"""
self._assert(
self.value == value,
f"Expected {self.key} to be equal to {value}, but got {self.value}",
"to_equal",
)
def to_be_none(self) -> None:
"""Assert that the expectation value is None.
Raises:
AssertionError: If the expectation value is not None.
"""
self._assert(
self.value is None,
f"Expected {self.key} to be None, but got {self.value}",
"to_be_none",
)
def to_contain(self, value: Any) -> None:
"""Assert that the expectation value contains the given value.
Args:
value: The value to check for containment.
Raises:
AssertionError: If the expectation value does not contain the given value.
"""
self._assert(
value in self.value,
f"Expected {self.key} to contain {value}, but it does not",
"to_contain",
)
# Custom assertions
def against(self, func: Callable, /) -> None:
"""Assert the expectation value against a custom function.
Args:
func: A custom function that takes the expectation value as input.
Raises:
AssertionError: If the custom function returns False.
"""
func_signature = inspect.signature(func)
self._assert(
func(self.value),
f"Assertion {func_signature} failed for {self.key}",
"against",
)
class _Expect:
"""A class for setting expectations on test results."""
def __init__(self, *, client: Optional[ls_client.Client] = None):
self._client = client
self.executor = ls_utils.ContextThreadPoolExecutor(max_workers=3)
atexit.register(self.executor.shutdown, wait=True)
def embedding_distance(
self,
prediction: str,
reference: str,
*,
config: Optional[EmbeddingConfig] = None,
) -> _Matcher:
"""Compute the embedding distance between the prediction and reference.
This logs the embedding distance to LangSmith and returns a `_Matcher` instance
for making assertions on the distance value.
By default, this uses the OpenAI API for computing embeddings.
Args:
prediction: The predicted string to compare.
reference: The reference string to compare against.
config: Optional configuration for the embedding distance evaluator.
Supported options:
- `encoder`: A custom encoder function to encode the list of input
strings to embeddings. Defaults to the OpenAI API.
- `metric`: The distance metric to use for comparison.
Supported values: "cosine", "euclidean", "manhattan",
"chebyshev", "hamming".
Returns:
A `_Matcher` instance for the embedding distance value.
Examples:
>>> expect.embedding_distance(
... prediction="hello",
... reference="hi",
... ).to_be_less_than(1.0)
""" # noqa: E501
from langsmith._internal._embedding_distance import EmbeddingDistance
config = config or {}
encoder_func = "custom" if config.get("encoder") else "openai"
evaluator = EmbeddingDistance(config=config)
score = evaluator.evaluate(prediction=prediction, reference=reference)
src_info = {"encoder": encoder_func, "metric": evaluator.distance}
self._submit_feedback(
"embedding_distance",
{
"score": score,
"source_info": src_info,
"comment": f"Using {encoder_func}, Metric: {evaluator.distance}",
},
)
return _Matcher(
self._client, "embedding_distance", score, _executor=self.executor
)
def edit_distance(
self,
prediction: str,
reference: str,
*,
config: Optional[EditDistanceConfig] = None,
) -> _Matcher:
"""Compute the string distance between the prediction and reference.
This logs the string distance (Damerau-Levenshtein) to LangSmith and returns
a `_Matcher` instance for making assertions on the distance value.
This depends on the `rapidfuzz` package for string distance computation.
Args:
prediction: The predicted string to compare.
reference: The reference string to compare against.
config: Optional configuration for the string distance evaluator.
Supported options:
- `metric`: The distance metric to use for comparison.
Supported values: "damerau_levenshtein", "levenshtein",
"jaro", "jaro_winkler", "hamming", "indel".
- `normalize_score`: Whether to normalize the score between 0 and 1.
Returns:
A `_Matcher` instance for the string distance value.
Examples:
>>> expect.edit_distance("hello", "helo").to_be_less_than(1)
"""
from langsmith._internal._edit_distance import EditDistance
config = config or {}
metric = config.get("metric") or "damerau_levenshtein"
normalize = config.get("normalize_score", True)
evaluator = EditDistance(config=config)
score = evaluator.evaluate(prediction=prediction, reference=reference)
src_info = {"metric": metric, "normalize": normalize}
self._submit_feedback(
"edit_distance",
{
"score": score,
"source_info": src_info,
"comment": f"Using {metric}, Normalize: {normalize}",
},
)
return _Matcher(
self._client,
"edit_distance",
score,
_executor=self.executor,
)
def value(self, value: Any) -> _Matcher:
"""Create a `_Matcher` instance for making assertions on the given value.
Args:
value: The value to make assertions on.
Returns:
A `_Matcher` instance for the given value.
Examples:
>>> expect.value(10).to_be_less_than(20)
"""
return _Matcher(self._client, "value", value, _executor=self.executor)
def score(
self,
score: Union[float, int],
*,
key: str = "score",
source_run_id: Optional[ls_client.ID_TYPE] = None,
comment: Optional[str] = None,
) -> _Matcher:
"""Log a numeric score to LangSmith.
Args:
score: The score value to log.
key: The key to use for logging the score. Defaults to "score".
Examples:
>>> expect.score(0.8) # doctest: +ELLIPSIS
<langsmith._expect._Matcher object at ...>
>>> expect.score(0.8, key="similarity").to_be_greater_than(0.7)
"""
self._submit_feedback(
key,
{
"score": score,
"source_info": {"method": "expect.score"},
"source_run_id": source_run_id,
"comment": comment,
},
)
return _Matcher(self._client, key, score, _executor=self.executor)
## Private Methods
@overload
def __call__(self, value: Any, /) -> _Matcher: ...
@overload
def __call__(self, /, *, client: ls_client.Client) -> _Expect: ...
def __call__(
self,
value: Optional[Any] = NOT_GIVEN,
/,
client: Optional[ls_client.Client] = None,
) -> Union[_Expect, _Matcher]:
expected = _Expect(client=client)
if value is not NOT_GIVEN:
return expected.value(value)
return expected
def _submit_feedback(self, key: str, results: dict):
current_run = rh.get_current_run_tree()
run_id = current_run.trace_id if current_run else None
if not ls_utils.test_tracking_is_disabled():
if not self._client:
self._client = rt.get_cached_client()
self.executor.submit(
self._client.create_feedback, run_id=run_id, key=key, **results
)
expect = _Expect()
__all__ = ["expect"]
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/async_client.py | """The Async LangSmith Client."""
from __future__ import annotations
import asyncio
import datetime
import uuid
from typing import (
Any,
AsyncIterator,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import httpx
from langsmith import client as ls_client
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils
from langsmith._internal import _beta_decorator as ls_beta
class AsyncClient:
"""Async Client for interacting with the LangSmith API."""
__slots__ = ("_retry_config", "_client", "_web_url")
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
timeout_ms: Optional[
Union[
int, Tuple[Optional[int], Optional[int], Optional[int], Optional[int]]
]
] = None,
retry_config: Optional[Mapping[str, Any]] = None,
web_url: Optional[str] = None,
):
"""Initialize the async client."""
ls_beta._warn_once("Class AsyncClient is in beta.")
self._retry_config = retry_config or {"max_retries": 3}
_headers = {
"Content-Type": "application/json",
}
api_key = ls_utils.get_api_key(api_key)
api_url = ls_utils.get_api_url(api_url)
if api_key:
_headers[ls_client.X_API_KEY] = api_key
ls_client._validate_api_key_if_hosted(api_url, api_key)
if isinstance(timeout_ms, int):
timeout_: Union[Tuple, float] = (timeout_ms / 1000, None, None, None)
elif isinstance(timeout_ms, tuple):
timeout_ = tuple([t / 1000 if t is not None else None for t in timeout_ms])
else:
timeout_ = 10
self._client = httpx.AsyncClient(
base_url=api_url, headers=_headers, timeout=timeout_
)
self._web_url = web_url
async def __aenter__(self) -> AsyncClient:
"""Enter the async client."""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Exit the async client."""
await self.aclose()
async def aclose(self):
"""Close the async client."""
await self._client.aclose()
@property
def _api_url(self):
return str(self._client.base_url)
@property
def _host_url(self) -> str:
"""The web host url."""
return ls_utils.get_host_url(self._web_url, self._api_url)
async def _arequest_with_retries(
self,
method: str,
endpoint: str,
**kwargs: Any,
) -> httpx.Response:
"""Make an async HTTP request with retries."""
max_retries = cast(int, self._retry_config.get("max_retries", 3))
for attempt in range(max_retries):
try:
response = await self._client.request(method, endpoint, **kwargs)
ls_utils.raise_for_status_with_text(response)
return response
except httpx.HTTPStatusError as e:
if attempt == max_retries - 1:
raise ls_utils.LangSmithAPIError(f"HTTP error: {repr(e)}")
await asyncio.sleep(2**attempt)
except httpx.RequestError as e:
if attempt == max_retries - 1:
raise ls_utils.LangSmithConnectionError(f"Request error: {repr(e)}")
await asyncio.sleep(2**attempt)
raise ls_utils.LangSmithAPIError(
"Unexpected error connecting to the LangSmith API"
)
async def _aget_paginated_list(
self,
path: str,
params: Optional[Dict[str, Any]] = None,
) -> AsyncIterator[Dict[str, Any]]:
"""Get a paginated list of items."""
params = params or {}
offset = params.get("offset", 0)
params["limit"] = params.get("limit", 100)
while True:
params["offset"] = offset
response = await self._arequest_with_retries("GET", path, params=params)
items = response.json()
if not items:
break
for item in items:
yield item
if len(items) < params["limit"]:
break
offset += len(items)
async def _aget_cursor_paginated_list(
self,
path: str,
*,
body: Optional[dict] = None,
request_method: str = "POST",
data_key: str = "runs",
) -> AsyncIterator[dict]:
"""Get a cursor paginated list of items."""
params_ = body.copy() if body else {}
while True:
response = await self._arequest_with_retries(
request_method,
path,
content=ls_client._dumps_json(params_),
)
response_body = response.json()
if not response_body:
break
if not response_body.get(data_key):
break
for run in response_body[data_key]:
yield run
cursors = response_body.get("cursors")
if not cursors:
break
if not cursors.get("next"):
break
params_["cursor"] = cursors["next"]
async def create_run(
self,
name: str,
inputs: Dict[str, Any],
run_type: str,
*,
project_name: Optional[str] = None,
revision_id: Optional[ls_client.ID_TYPE] = None,
**kwargs: Any,
) -> None:
"""Create a run."""
run_create = {
"name": name,
"id": kwargs.get("id") or uuid.uuid4(),
"inputs": inputs,
"run_type": run_type,
"session_name": project_name or ls_utils.get_tracer_project(),
"revision_id": revision_id,
**kwargs,
}
await self._arequest_with_retries(
"POST", "/runs", content=ls_client._dumps_json(run_create)
)
async def update_run(
self,
run_id: ls_client.ID_TYPE,
**kwargs: Any,
) -> None:
"""Update a run."""
data = {**kwargs, "id": ls_client._as_uuid(run_id)}
await self._arequest_with_retries(
"PATCH",
f"/runs/{ls_client._as_uuid(run_id)}",
content=ls_client._dumps_json(data),
)
async def read_run(self, run_id: ls_client.ID_TYPE) -> ls_schemas.Run:
"""Read a run."""
response = await self._arequest_with_retries(
"GET",
f"/runs/{ls_client._as_uuid(run_id)}",
)
return ls_schemas.Run(**response.json())
async def list_runs(
self,
*,
project_id: Optional[
Union[ls_client.ID_TYPE, Sequence[ls_client.ID_TYPE]]
] = None,
project_name: Optional[Union[str, Sequence[str]]] = None,
run_type: Optional[str] = None,
trace_id: Optional[ls_client.ID_TYPE] = None,
reference_example_id: Optional[ls_client.ID_TYPE] = None,
query: Optional[str] = None,
filter: Optional[str] = None,
trace_filter: Optional[str] = None,
tree_filter: Optional[str] = None,
is_root: Optional[bool] = None,
parent_run_id: Optional[ls_client.ID_TYPE] = None,
start_time: Optional[datetime.datetime] = None,
error: Optional[bool] = None,
run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None,
select: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
**kwargs: Any,
) -> AsyncIterator[ls_schemas.Run]:
"""List runs from the LangSmith API.
Parameters
----------
project_id : UUID or None, default=None
The ID(s) of the project to filter by.
project_name : str or None, default=None
The name(s) of the project to filter by.
run_type : str or None, default=None
The type of the runs to filter by.
trace_id : UUID or None, default=None
The ID of the trace to filter by.
reference_example_id : UUID or None, default=None
The ID of the reference example to filter by.
query : str or None, default=None
The query string to filter by.
filter : str or None, default=None
The filter string to filter by.
trace_filter : str or None, default=None
Filter to apply to the ROOT run in the trace tree. This is meant to
be used in conjunction with the regular `filter` parameter to let you
filter runs by attributes of the root run within a trace.
tree_filter : str or None, default=None
Filter to apply to OTHER runs in the trace tree, including
sibling and child runs. This is meant to be used in conjunction with
the regular `filter` parameter to let you filter runs by attributes
of any run within a trace.
is_root : bool or None, default=None
Whether to filter by root runs.
parent_run_id : UUID or None, default=None
The ID of the parent run to filter by.
start_time : datetime or None, default=None
The start time to filter by.
error : bool or None, default=None
Whether to filter by error status.
run_ids : List[str or UUID] or None, default=None
The IDs of the runs to filter by.
limit : int or None, default=None
The maximum number of runs to return.
**kwargs : Any
Additional keyword arguments.
Yields:
------
Run
The runs.
Examples:
--------
List all runs in a project:
.. code-block:: python
project_runs = client.list_runs(project_name="<your_project>")
List LLM and Chat runs in the last 24 hours:
.. code-block:: python
todays_llm_runs = client.list_runs(
project_name="<your_project>",
start_time=datetime.now() - timedelta(days=1),
run_type="llm",
)
List root traces in a project:
.. code-block:: python
root_runs = client.list_runs(project_name="<your_project>", is_root=1)
List runs without errors:
.. code-block:: python
correct_runs = client.list_runs(project_name="<your_project>", error=False)
List runs and only return their inputs/outputs (to speed up the query):
.. code-block:: python
input_output_runs = client.list_runs(
project_name="<your_project>", select=["inputs", "outputs"]
)
List runs by run ID:
.. code-block:: python
run_ids = [
"a36092d2-4ad5-4fb4-9c0d-0dba9a2ed836",
"9398e6be-964f-4aa4-8ae9-ad78cd4b7074",
]
selected_runs = client.list_runs(id=run_ids)
List all "chain" type runs that took more than 10 seconds and had
`total_tokens` greater than 5000:
.. code-block:: python
chain_runs = client.list_runs(
project_name="<your_project>",
filter='and(eq(run_type, "chain"), gt(latency, 10), gt(total_tokens, 5000))',
)
List all runs called "extractor" whose root of the trace was assigned feedback "user_score" score of 1:
.. code-block:: python
good_extractor_runs = client.list_runs(
project_name="<your_project>",
filter='eq(name, "extractor")',
trace_filter='and(eq(feedback_key, "user_score"), eq(feedback_score, 1))',
)
List all runs that started after a specific timestamp and either have "error" not equal to null or a "Correctness" feedback score equal to 0:
.. code-block:: python
complex_runs = client.list_runs(
project_name="<your_project>",
filter='and(gt(start_time, "2023-07-15T12:34:56Z"), or(neq(error, null), and(eq(feedback_key, "Correctness"), eq(feedback_score, 0.0))))',
)
List all runs where `tags` include "experimental" or "beta" and `latency` is greater than 2 seconds:
.. code-block:: python
tagged_runs = client.list_runs(
project_name="<your_project>",
filter='and(or(has(tags, "experimental"), has(tags, "beta")), gt(latency, 2))',
)
"""
project_ids = []
if isinstance(project_id, (uuid.UUID, str)):
project_ids.append(project_id)
elif isinstance(project_id, list):
project_ids.extend(project_id)
if project_name is not None:
if isinstance(project_name, str):
project_name = [project_name]
projects = await asyncio.gather(
*[self.read_project(project_name=name) for name in project_name]
)
project_ids.extend([project.id for project in projects])
body_query: Dict[str, Any] = {
"session": project_ids if project_ids else None,
"run_type": run_type,
"reference_example": (
[reference_example_id] if reference_example_id else None
),
"query": query,
"filter": filter,
"trace_filter": trace_filter,
"tree_filter": tree_filter,
"is_root": is_root,
"parent_run": parent_run_id,
"start_time": start_time.isoformat() if start_time else None,
"error": error,
"id": run_ids,
"trace": trace_id,
"select": select,
**kwargs,
}
if project_ids:
body_query["session"] = [
str(ls_client._as_uuid(id_)) for id_ in project_ids
]
body = {k: v for k, v in body_query.items() if v is not None}
ix = 0
async for run in self._aget_cursor_paginated_list("/runs/query", body=body):
yield ls_schemas.Run(**run)
ix += 1
if limit is not None and ix >= limit:
break
async def share_run(
self, run_id: ls_client.ID_TYPE, *, share_id: Optional[ls_client.ID_TYPE] = None
) -> str:
"""Get a share link for a run asynchronously.
Args:
run_id (ID_TYPE): The ID of the run to share.
share_id (Optional[ID_TYPE], optional): Custom share ID.
If not provided, a random UUID will be generated.
Returns:
str: The URL of the shared run.
Raises:
httpx.HTTPStatusError: If the API request fails.
"""
run_id_ = ls_client._as_uuid(run_id, "run_id")
data = {
"run_id": str(run_id_),
"share_token": str(share_id or uuid.uuid4()),
}
response = await self._arequest_with_retries(
"PUT",
f"/runs/{run_id_}/share",
content=ls_client._dumps_json(data),
)
ls_utils.raise_for_status_with_text(response)
share_token = response.json()["share_token"]
return f"{self._host_url}/public/{share_token}/r"
async def run_is_shared(self, run_id: ls_client.ID_TYPE) -> bool:
"""Get share state for a run asynchronously."""
link = await self.read_run_shared_link(ls_client._as_uuid(run_id, "run_id"))
return link is not None
async def read_run_shared_link(self, run_id: ls_client.ID_TYPE) -> Optional[str]:
"""Retrieve the shared link for a specific run asynchronously.
Args:
run_id (ID_TYPE): The ID of the run.
Returns:
Optional[str]: The shared link for the run, or None if the link is not
available.
Raises:
httpx.HTTPStatusError: If the API request fails.
"""
response = await self._arequest_with_retries(
"GET",
f"/runs/{ls_client._as_uuid(run_id, 'run_id')}/share",
)
ls_utils.raise_for_status_with_text(response)
result = response.json()
if result is None or "share_token" not in result:
return None
return f"{self._host_url}/public/{result['share_token']}/r"
async def create_project(
self,
project_name: str,
**kwargs: Any,
) -> ls_schemas.TracerSession:
"""Create a project."""
data = {"name": project_name, **kwargs}
response = await self._arequest_with_retries(
"POST", "/sessions", content=ls_client._dumps_json(data)
)
return ls_schemas.TracerSession(**response.json())
async def read_project(
self,
project_name: Optional[str] = None,
project_id: Optional[ls_client.ID_TYPE] = None,
) -> ls_schemas.TracerSession:
"""Read a project."""
if project_id:
response = await self._arequest_with_retries(
"GET", f"/sessions/{ls_client._as_uuid(project_id)}"
)
elif project_name:
response = await self._arequest_with_retries(
"GET", "/sessions", params={"name": project_name}
)
else:
raise ValueError("Either project_name or project_id must be provided")
data = response.json()
if isinstance(data, list):
if not data:
raise ls_utils.LangSmithNotFoundError(
f"Project {project_name} not found"
)
return ls_schemas.TracerSession(**data[0])
return ls_schemas.TracerSession(**data)
async def delete_project(
self, *, project_name: Optional[str] = None, project_id: Optional[str] = None
) -> None:
"""Delete a project from LangSmith.
Parameters
----------
project_name : str or None, default=None
The name of the project to delete.
project_id : str or None, default=None
The ID of the project to delete.
"""
if project_id is None and project_name is None:
raise ValueError("Either project_name or project_id must be provided")
if project_id is None:
project = await self.read_project(project_name=project_name)
project_id = str(project.id)
if not project_id:
raise ValueError("Project not found")
await self._arequest_with_retries(
"DELETE",
f"/sessions/{ls_client._as_uuid(project_id)}",
)
async def create_dataset(
self,
dataset_name: str,
**kwargs: Any,
) -> ls_schemas.Dataset:
"""Create a dataset."""
data = {"name": dataset_name, **kwargs}
response = await self._arequest_with_retries(
"POST", "/datasets", content=ls_client._dumps_json(data)
)
return ls_schemas.Dataset(**response.json())
async def read_dataset(
self,
dataset_name: Optional[str] = None,
dataset_id: Optional[ls_client.ID_TYPE] = None,
) -> ls_schemas.Dataset:
"""Read a dataset."""
if dataset_id:
response = await self._arequest_with_retries(
"GET", f"/datasets/{ls_client._as_uuid(dataset_id)}"
)
elif dataset_name:
response = await self._arequest_with_retries(
"GET", "/datasets", params={"name": dataset_name}
)
else:
raise ValueError("Either dataset_name or dataset_id must be provided")
data = response.json()
if isinstance(data, list):
if not data:
raise ls_utils.LangSmithNotFoundError(
f"Dataset {dataset_name} not found"
)
return ls_schemas.Dataset(**data[0])
return ls_schemas.Dataset(**data)
async def delete_dataset(self, dataset_id: ls_client.ID_TYPE) -> None:
"""Delete a dataset."""
await self._arequest_with_retries(
"DELETE",
f"/datasets/{ls_client._as_uuid(dataset_id)}",
)
async def list_datasets(
self,
**kwargs: Any,
) -> AsyncIterator[ls_schemas.Dataset]:
"""List datasets."""
async for dataset in self._aget_paginated_list("/datasets", params=kwargs):
yield ls_schemas.Dataset(**dataset)
async def create_example(
self,
inputs: Dict[str, Any],
outputs: Optional[Dict[str, Any]] = None,
dataset_id: Optional[ls_client.ID_TYPE] = None,
dataset_name: Optional[str] = None,
**kwargs: Any,
) -> ls_schemas.Example:
"""Create an example."""
if dataset_id is None and dataset_name is None:
raise ValueError("Either dataset_id or dataset_name must be provided")
if dataset_id is None:
dataset = await self.read_dataset(dataset_name=dataset_name)
dataset_id = dataset.id
data = {
"inputs": inputs,
"outputs": outputs,
"dataset_id": str(dataset_id),
**kwargs,
}
response = await self._arequest_with_retries(
"POST", "/examples", content=ls_client._dumps_json(data)
)
return ls_schemas.Example(**response.json())
async def read_example(self, example_id: ls_client.ID_TYPE) -> ls_schemas.Example:
"""Read an example."""
response = await self._arequest_with_retries(
"GET", f"/examples/{ls_client._as_uuid(example_id)}"
)
return ls_schemas.Example(**response.json())
async def list_examples(
self,
*,
dataset_id: Optional[ls_client.ID_TYPE] = None,
dataset_name: Optional[str] = None,
**kwargs: Any,
) -> AsyncIterator[ls_schemas.Example]:
"""List examples."""
params = kwargs.copy()
if dataset_id:
params["dataset"] = ls_client._as_uuid(dataset_id)
elif dataset_name:
dataset = await self.read_dataset(dataset_name=dataset_name)
params["dataset"] = dataset.id
async for example in self._aget_paginated_list("/examples", params=params):
yield ls_schemas.Example(**example)
async def create_feedback(
self,
run_id: Optional[ls_client.ID_TYPE],
key: str,
score: Optional[float] = None,
value: Optional[Any] = None,
comment: Optional[str] = None,
**kwargs: Any,
) -> ls_schemas.Feedback:
"""Create feedback for a run.
Args:
run_id (Optional[ls_client.ID_TYPE]): The ID of the run to provide feedback for.
Can be None for project-level feedback.
key (str): The name of the metric or aspect this feedback is about.
score (Optional[float]): The score to rate this run on the metric or aspect.
value (Optional[Any]): The display value or non-numeric value for this feedback.
comment (Optional[str]): A comment about this feedback.
**kwargs: Additional keyword arguments to include in the feedback data.
Returns:
ls_schemas.Feedback: The created feedback object.
Raises:
httpx.HTTPStatusError: If the API request fails.
""" # noqa: E501
data = {
"run_id": ls_client._ensure_uuid(run_id, accept_null=True),
"key": key,
"score": score,
"value": value,
"comment": comment,
**kwargs,
}
response = await self._arequest_with_retries(
"POST", "/feedback", content=ls_client._dumps_json(data)
)
return ls_schemas.Feedback(**response.json())
async def create_feedback_from_token(
self,
token_or_url: Union[str, uuid.UUID],
score: Union[float, int, bool, None] = None,
*,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
metadata: Optional[dict] = None,
) -> None:
"""Create feedback from a presigned token or URL.
Args:
token_or_url (Union[str, uuid.UUID]): The token or URL from which to create
feedback.
score (Union[float, int, bool, None], optional): The score of the feedback.
Defaults to None.
value (Union[float, int, bool, str, dict, None], optional): The value of the
feedback. Defaults to None.
correction (Union[dict, None], optional): The correction of the feedback.
Defaults to None.
comment (Union[str, None], optional): The comment of the feedback. Defaults
to None.
metadata (Optional[dict], optional): Additional metadata for the feedback.
Defaults to None.
Raises:
ValueError: If the source API URL is invalid.
Returns:
None: This method does not return anything.
"""
source_api_url, token_uuid = ls_client._parse_token_or_url(
token_or_url, self._api_url, num_parts=1
)
if source_api_url != self._api_url:
raise ValueError(f"Invalid source API URL. {source_api_url}")
response = await self._arequest_with_retries(
"POST",
f"/feedback/tokens/{ls_client._as_uuid(token_uuid)}",
content=ls_client._dumps_json(
{
"score": score,
"value": value,
"correction": correction,
"comment": comment,
"metadata": metadata,
# TODO: Add ID once the API supports it.
}
),
)
ls_utils.raise_for_status_with_text(response)
async def create_presigned_feedback_token(
self,
run_id: ls_client.ID_TYPE,
feedback_key: str,
*,
expiration: Optional[datetime.datetime | datetime.timedelta] = None,
feedback_config: Optional[ls_schemas.FeedbackConfig] = None,
feedback_id: Optional[ls_client.ID_TYPE] = None,
) -> ls_schemas.FeedbackIngestToken:
"""Create a pre-signed URL to send feedback data to.
This is useful for giving browser-based clients a way to upload
feedback data directly to LangSmith without accessing the
API key.
Args:
run_id:
feedback_key:
expiration: The expiration time of the pre-signed URL.
Either a datetime or a timedelta offset from now.
Default to 3 hours.
feedback_config: FeedbackConfig or None.
If creating a feedback_key for the first time,
this defines how the metric should be interpreted,
such as a continuous score (w/ optional bounds),
or distribution over categorical values.
feedback_id: The ID of the feedback to create. If not provided, a new
feedback will be created.
Returns:
The pre-signed URL for uploading feedback data.
"""
body: Dict[str, Any] = {
"run_id": run_id,
"feedback_key": feedback_key,
"feedback_config": feedback_config,
"id": feedback_id or str(uuid.uuid4()),
}
if expiration is None:
body["expires_in"] = ls_schemas.TimeDeltaInput(
days=0,
hours=3,
minutes=0,
)
elif isinstance(expiration, datetime.datetime):
body["expires_at"] = expiration.isoformat()
elif isinstance(expiration, datetime.timedelta):
body["expires_in"] = ls_schemas.TimeDeltaInput(
days=expiration.days,
hours=expiration.seconds // 3600,
minutes=(expiration.seconds % 3600) // 60,
)
else:
raise ValueError(
f"Invalid expiration type: {type(expiration)}. "
"Expected datetime.datetime or datetime.timedelta."
)
response = await self._arequest_with_retries(
"POST",
"/feedback/tokens",
content=ls_client._dumps_json(body),
)
return ls_schemas.FeedbackIngestToken(**response.json())
async def read_feedback(
self, feedback_id: ls_client.ID_TYPE
) -> ls_schemas.Feedback:
"""Read feedback."""
response = await self._arequest_with_retries(
"GET", f"/feedback/{ls_client._as_uuid(feedback_id)}"
)
return ls_schemas.Feedback(**response.json())
async def list_feedback(
self,
*,
run_ids: Optional[Sequence[ls_client.ID_TYPE]] = None,
feedback_key: Optional[Sequence[str]] = None,
feedback_source_type: Optional[Sequence[ls_schemas.FeedbackSourceType]] = None,
limit: Optional[int] = None,
**kwargs: Any,
) -> AsyncIterator[ls_schemas.Feedback]:
"""List feedback."""
params = {
"run": (
[str(ls_client._as_uuid(id_)) for id_ in run_ids] if run_ids else None
),
"limit": min(limit, 100) if limit is not None else 100,
**kwargs,
}
if feedback_key is not None:
params["key"] = feedback_key
if feedback_source_type is not None:
params["source"] = feedback_source_type
ix = 0
async for feedback in self._aget_paginated_list("/feedback", params=params):
yield ls_schemas.Feedback(**feedback)
ix += 1
if limit is not None and ix >= limit:
break
@ls_beta.warn_beta
async def index_dataset(
self,
*,
dataset_id: ls_client.ID_TYPE,
tag: str = "latest",
**kwargs: Any,
) -> None:
"""Enable dataset indexing. Examples are indexed by their inputs.
This enables searching for similar examples by inputs with
``client.similar_examples()``.
Args:
dataset_id (UUID): The ID of the dataset to index.
tag (str, optional): The version of the dataset to index. If 'latest'
then any updates to the dataset (additions, updates, deletions of
examples) will be reflected in the index.
Returns:
None
Raises:
requests.HTTPError
""" # noqa: E501
dataset_id = ls_client._as_uuid(dataset_id, "dataset_id")
resp = await self._arequest_with_retries(
"POST",
f"/datasets/{dataset_id}/index",
content=ls_client._dumps_json({"tag": tag, **kwargs}),
)
ls_utils.raise_for_status_with_text(resp)
@ls_beta.warn_beta
async def similar_examples(
self,
inputs: dict,
/,
*,
limit: int,
dataset_id: ls_client.ID_TYPE,
filter: Optional[str] = None,
**kwargs: Any,
) -> List[ls_schemas.ExampleSearch]:
r"""Retrieve the dataset examples whose inputs best match the current inputs.
**Note**: Must have few-shot indexing enabled for the dataset. See
``client.index_dataset()``.
Args:
inputs (dict): The inputs to use as a search query. Must match the dataset
input schema. Must be JSON serializable.
limit (int): The maximum number of examples to return.
dataset_id (str or UUID): The ID of the dataset to search over.
filter (str, optional): A filter string to apply to the search results. Uses
the same syntax as the `filter` parameter in `list_runs()`. Only a subset
of operations are supported. Defaults to None.
kwargs (Any): Additional keyword args to pass as part of request body.
Returns:
List of ExampleSearch objects.
Example:
.. code-block:: python
from langsmith import Client
client = Client()
await client.similar_examples(
{"question": "When would i use the runnable generator"},
limit=3,
dataset_id="...",
)
.. code-block:: pycon
[
ExampleSearch(
inputs={'question': 'How do I cache a Chat model? What caches can I use?'},
outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\n```python\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\n```\n\nYou can also use SQLite Cache which uses a SQLite database:\n\n```python\n rm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n```\n'},
metadata=None,
id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
ExampleSearch(
inputs={'question': "What's a runnable lambda?"},
outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."},
metadata=None,
id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
ExampleSearch(
inputs={'question': 'Show me how to use RecursiveURLLoader'},
outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\n```python\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n```\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'},
metadata=None,
id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
]
""" # noqa: E501
dataset_id = ls_client._as_uuid(dataset_id, "dataset_id")
req = {
"inputs": inputs,
"limit": limit,
**kwargs,
}
if filter:
req["filter"] = filter
resp = await self._arequest_with_retries(
"POST",
f"/datasets/{dataset_id}/search",
content=ls_client._dumps_json(req),
)
ls_utils.raise_for_status_with_text(resp)
examples = []
for ex in resp.json()["examples"]:
examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id))
return examples
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/client.py | """Client for interacting with the LangSmith API.
Use the client to customize API keys / workspace ocnnections, SSl certs,
etc. for tracing.
Also used to create, read, update, and delete LangSmith resources
such as runs (~trace spans), datasets, examples (~records),
feedback (~metrics), projects (tracer sessions/groups), etc.
For detailed API documentation, visit: https://docs.smith.langchain.com/.
"""
from __future__ import annotations
import atexit
import collections
import concurrent.futures as cf
import contextlib
import datetime
import functools
import importlib
import importlib.metadata
import io
import itertools
import json
import logging
import os
import random
import threading
import time
import traceback
import typing
import uuid
import warnings
import weakref
from inspect import signature
from queue import PriorityQueue
from typing import (
TYPE_CHECKING,
Any,
AsyncIterable,
Callable,
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from urllib import parse as urllib_parse
import requests
from requests import adapters as requests_adapters
from requests_toolbelt import ( # type: ignore[import-untyped]
multipart as rqtb_multipart,
)
from typing_extensions import TypeGuard, overload
from urllib3.poolmanager import PoolKey # type: ignore[attr-defined, import-untyped]
from urllib3.util import Retry # type: ignore[import-untyped]
import langsmith
from langsmith import env as ls_env
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils
from langsmith._internal import _orjson
from langsmith._internal._background_thread import (
TracingQueueItem,
)
from langsmith._internal._background_thread import (
tracing_control_thread_func as _tracing_control_thread_func,
)
from langsmith._internal._beta_decorator import warn_beta
from langsmith._internal._constants import (
_AUTO_SCALE_UP_NTHREADS_LIMIT,
_BLOCKSIZE_BYTES,
_SIZE_LIMIT_BYTES,
)
from langsmith._internal._multipart import (
MultipartPartsAndContext,
join_multipart_parts_and_context,
)
from langsmith._internal._operations import (
SerializedFeedbackOperation,
SerializedRunOperation,
combine_serialized_queue_operations,
serialize_feedback_dict,
serialize_run_dict,
serialized_feedback_operation_to_multipart_parts_and_context,
serialized_run_operation_to_multipart_parts_and_context,
)
from langsmith._internal._serde import dumps_json as _dumps_json
try:
from zoneinfo import ZoneInfo # type: ignore[import-not-found]
except ImportError:
class ZoneInfo: # type: ignore[no-redef]
"""Introduced in python 3.9."""
if TYPE_CHECKING:
import pandas as pd # type: ignore
from langchain_core.runnables import Runnable
from langsmith import schemas
from langsmith.evaluation import evaluator as ls_evaluator
from langsmith.evaluation._arunner import (
AEVALUATOR_T,
ATARGET_T,
AsyncExperimentResults,
)
from langsmith.evaluation._runner import (
COMPARATIVE_EVALUATOR_T,
DATA_T,
EVALUATOR_T,
EXPERIMENT_T,
SUMMARY_EVALUATOR_T,
TARGET_T,
ComparativeExperimentResults,
ExperimentResults,
)
logger = logging.getLogger(__name__)
_urllib3_logger = logging.getLogger("urllib3.connectionpool")
X_API_KEY = "x-api-key"
WARNED_ATTACHMENTS = False
EMPTY_SEQ: tuple[Dict, ...] = ()
BOUNDARY = uuid.uuid4().hex
URLLIB3_SUPPORTS_BLOCKSIZE = "key_blocksize" in signature(PoolKey).parameters
def _parse_token_or_url(
url_or_token: Union[str, uuid.UUID],
api_url: str,
num_parts: int = 2,
kind: str = "dataset",
) -> Tuple[str, str]:
"""Parse a public dataset URL or share token."""
try:
if isinstance(url_or_token, uuid.UUID) or uuid.UUID(url_or_token):
return api_url, str(url_or_token)
except ValueError:
pass
# Then it's a URL
parsed_url = urllib_parse.urlparse(str(url_or_token))
# Extract the UUID from the path
path_parts = parsed_url.path.split("/")
if len(path_parts) >= num_parts:
token_uuid = path_parts[-num_parts]
_as_uuid(token_uuid, var="token parts")
else:
raise ls_utils.LangSmithUserError(f"Invalid public {kind} URL: {url_or_token}")
if parsed_url.netloc == "smith.langchain.com":
api_url = "https://api.smith.langchain.com"
elif parsed_url.netloc == "beta.smith.langchain.com":
api_url = "https://beta.api.smith.langchain.com"
return api_url, token_uuid
def _is_langchain_hosted(url: str) -> bool:
"""Check if the URL is langchain hosted.
Parameters
----------
url : str
The URL to check.
Returns:
-------
bool
True if the URL is langchain hosted, False otherwise.
"""
try:
netloc = urllib_parse.urlsplit(url).netloc.split(":")[0]
return netloc.endswith("langchain.com")
except Exception:
return False
ID_TYPE = Union[uuid.UUID, str]
RUN_TYPE_T = Literal[
"tool", "chain", "llm", "retriever", "embedding", "prompt", "parser"
]
def _default_retry_config() -> Retry:
"""Get the default retry configuration.
If urllib3 version is 1.26 or greater, retry on all methods.
Returns:
-------
Retry
The default retry configuration.
"""
retry_params = dict(
total=3,
status_forcelist=[502, 503, 504, 408, 425],
backoff_factor=0.5,
# Sadly urllib3 1.x doesn't support backoff_jitter
raise_on_redirect=False,
raise_on_status=False,
respect_retry_after_header=True,
)
# the `allowed_methods` keyword is not available in urllib3 < 1.26
# check to see if urllib3 version is 1.26 or greater
urllib3_version = importlib.metadata.version("urllib3")
use_allowed_methods = tuple(map(int, urllib3_version.split("."))) >= (1, 26)
if use_allowed_methods:
# Retry on all methods
retry_params["allowed_methods"] = None
return ls_utils.LangSmithRetry(**retry_params) # type: ignore
def close_session(session: requests.Session) -> None:
"""Close the session.
Parameters
----------
session : Session
The session to close.
"""
logger.debug("Closing Client.session")
session.close()
def _validate_api_key_if_hosted(api_url: str, api_key: Optional[str]) -> None:
"""Verify API key is provided if url not localhost.
Parameters
----------
api_url : str
The API URL.
api_key : str or None
The API key.
Raises:
------
LangSmithUserError
If the API key is not provided when using the hosted service.
"""
# If the domain is langchain.com, raise error if no api_key
if not api_key:
if _is_langchain_hosted(api_url):
warnings.warn(
"API key must be provided when using hosted LangSmith API",
ls_utils.LangSmithMissingAPIKeyWarning,
)
def _get_tracing_sampling_rate() -> float | None:
"""Get the tracing sampling rate.
Returns:
-------
float
The tracing sampling rate.
"""
sampling_rate_str = ls_utils.get_env_var("TRACING_SAMPLING_RATE")
if sampling_rate_str is None:
return None
sampling_rate = float(sampling_rate_str)
if sampling_rate < 0 or sampling_rate > 1:
raise ls_utils.LangSmithUserError(
"LANGSMITH_TRACING_SAMPLING_RATE must be between 0 and 1 if set."
f" Got: {sampling_rate}"
)
return sampling_rate
def _get_write_api_urls(_write_api_urls: Optional[Dict[str, str]]) -> Dict[str, str]:
_write_api_urls = _write_api_urls or json.loads(
os.getenv("LANGSMITH_RUNS_ENDPOINTS", "{}")
)
processed_write_api_urls = {}
for url, api_key in _write_api_urls.items():
processed_url = url.strip()
if not processed_url:
raise ls_utils.LangSmithUserError(
"LangSmith runs API URL within LANGSMITH_RUNS_ENDPOINTS cannot be empty"
)
processed_url = processed_url.strip().strip('"').strip("'").rstrip("/")
processed_api_key = api_key.strip().strip('"').strip("'")
_validate_api_key_if_hosted(processed_url, processed_api_key)
processed_write_api_urls[processed_url] = processed_api_key
return processed_write_api_urls
def _as_uuid(value: ID_TYPE, var: Optional[str] = None) -> uuid.UUID:
try:
return uuid.UUID(value) if not isinstance(value, uuid.UUID) else value
except ValueError as e:
var = var or "value"
raise ls_utils.LangSmithUserError(
f"{var} must be a valid UUID or UUID string. Got {value}"
) from e
@typing.overload
def _ensure_uuid(value: Optional[Union[str, uuid.UUID]]) -> uuid.UUID: ...
@typing.overload
def _ensure_uuid(
value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = True
) -> Optional[uuid.UUID]: ...
def _ensure_uuid(value: Optional[Union[str, uuid.UUID]], *, accept_null: bool = False):
if value is None:
if accept_null:
return None
return uuid.uuid4()
return _as_uuid(value)
@functools.lru_cache(maxsize=1)
def _parse_url(url):
parsed_url = urllib_parse.urlparse(url)
host = parsed_url.netloc.split(":")[0]
return host
class _LangSmithHttpAdapter(requests_adapters.HTTPAdapter):
__attrs__ = [
"max_retries",
"config",
"_pool_connections",
"_pool_maxsize",
"_pool_block",
"_blocksize",
]
def __init__(
self,
pool_connections: int = requests_adapters.DEFAULT_POOLSIZE,
pool_maxsize: int = requests_adapters.DEFAULT_POOLSIZE,
max_retries: Union[Retry, int, None] = requests_adapters.DEFAULT_RETRIES,
pool_block: bool = requests_adapters.DEFAULT_POOLBLOCK,
blocksize: int = 16384, # default from urllib3.BaseHTTPSConnection
) -> None:
self._blocksize = blocksize
super().__init__(pool_connections, pool_maxsize, max_retries, pool_block)
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
if URLLIB3_SUPPORTS_BLOCKSIZE:
# urllib3 before 2.0 doesn't support blocksize
pool_kwargs["blocksize"] = self._blocksize
return super().init_poolmanager(connections, maxsize, block, **pool_kwargs)
class Client:
"""Client for interacting with the LangSmith API."""
__slots__ = [
"__weakref__",
"api_url",
"api_key",
"retry_config",
"timeout_ms",
"session",
"_get_data_type_cached",
"_web_url",
"_tenant_id",
"tracing_sample_rate",
"_filtered_post_uuids",
"tracing_queue",
"_anonymizer",
"_hide_inputs",
"_hide_outputs",
"_info",
"_write_api_urls",
"_settings",
"_manual_cleanup",
"_pyo3_client",
]
def __init__(
self,
api_url: Optional[str] = None,
*,
api_key: Optional[str] = None,
retry_config: Optional[Retry] = None,
timeout_ms: Optional[Union[int, Tuple[int, int]]] = None,
web_url: Optional[str] = None,
session: Optional[requests.Session] = None,
auto_batch_tracing: bool = True,
anonymizer: Optional[Callable[[dict], dict]] = None,
hide_inputs: Optional[Union[Callable[[dict], dict], bool]] = None,
hide_outputs: Optional[Union[Callable[[dict], dict], bool]] = None,
info: Optional[Union[dict, ls_schemas.LangSmithInfo]] = None,
api_urls: Optional[Dict[str, str]] = None,
) -> None:
"""Initialize a Client instance.
Parameters
----------
api_url : str or None, default=None
URL for the LangSmith API. Defaults to the LANGCHAIN_ENDPOINT
environment variable or https://api.smith.langchain.com if not set.
api_key : str or None, default=None
API key for the LangSmith API. Defaults to the LANGCHAIN_API_KEY
environment variable.
retry_config : Retry or None, default=None
Retry configuration for the HTTPAdapter.
timeout_ms : int, tuple[int, int], or None, default=None
Timeout for the HTTPAdapter. Can also be a 2-tuple of
(connect timeout, read timeout) to set them separately.
web_url : str or None, default=None
URL for the LangSmith web app. Default is auto-inferred from
the ENDPOINT.
session: requests.Session or None, default=None
The session to use for requests. If None, a new session will be
created.
anonymizer : Optional[Callable[[dict], dict]]
A function applied for masking serialized run inputs and outputs,
before sending to the API.
hide_inputs: Whether to hide run inputs when tracing with this client.
If True, hides the entire inputs. If a function, applied to
all run inputs when creating runs.
hide_outputs: Whether to hide run outputs when tracing with this client.
If True, hides the entire outputs. If a function, applied to
all run outputs when creating runs.
info: Optional[ls_schemas.LangSmithInfo]
The information about the LangSmith API. If not provided, it will
be fetched from the API.
api_urls: Optional[Dict[str, str]]
A dictionary of write API URLs and their corresponding API keys.
Useful for multi-tenant setups. Data is only read from the first
URL in the dictionary. However, ONLY Runs are written (POST and PATCH)
to all URLs in the dictionary. Feedback, sessions, datasets, examples,
annotation queues and evaluation results are only written to the first.
Raises:
------
LangSmithUserError
If the API key is not provided when using the hosted service.
If both api_url and api_urls are provided.
"""
if api_url and api_urls:
raise ls_utils.LangSmithUserError(
"You cannot provide both api_url and api_urls."
)
if (
os.getenv("LANGSMITH_ENDPOINT") or os.getenv("LANGCHAIN_ENDPOINT")
) and os.getenv("LANGSMITH_RUNS_ENDPOINTS"):
raise ls_utils.LangSmithUserError(
"You cannot provide both LANGSMITH_ENDPOINT / LANGCHAIN_ENDPOINT "
"and LANGSMITH_RUNS_ENDPOINTS."
)
self.tracing_sample_rate = _get_tracing_sampling_rate()
self._filtered_post_uuids: set[uuid.UUID] = set()
self._write_api_urls: Mapping[str, Optional[str]] = _get_write_api_urls(
api_urls
)
if self._write_api_urls:
self.api_url = next(iter(self._write_api_urls))
self.api_key: Optional[str] = self._write_api_urls[self.api_url]
else:
self.api_url = ls_utils.get_api_url(api_url)
self.api_key = ls_utils.get_api_key(api_key)
_validate_api_key_if_hosted(self.api_url, self.api_key)
self._write_api_urls = {self.api_url: self.api_key}
self.retry_config = retry_config or _default_retry_config()
self.timeout_ms = (
(timeout_ms, timeout_ms)
if isinstance(timeout_ms, int)
else (timeout_ms or (10_000, 90_001))
)
self._web_url = web_url
self._tenant_id: Optional[uuid.UUID] = None
# Create a session and register a finalizer to close it
session_ = session if session else requests.Session()
self.session = session_
self._info = (
info
if info is None or isinstance(info, ls_schemas.LangSmithInfo)
else ls_schemas.LangSmithInfo(**info)
)
weakref.finalize(self, close_session, self.session)
atexit.register(close_session, session_)
# Initialize auto batching
if auto_batch_tracing:
self.tracing_queue: Optional[PriorityQueue] = PriorityQueue()
threading.Thread(
target=_tracing_control_thread_func,
# arg must be a weakref to self to avoid the Thread object
# preventing garbage collection of the Client object
args=(weakref.ref(self),),
).start()
else:
self.tracing_queue = None
# Mount the HTTPAdapter with the retry configuration.
adapter = _LangSmithHttpAdapter(
max_retries=self.retry_config,
blocksize=_BLOCKSIZE_BYTES,
# We need to set the pool_maxsize to a value greater than the
# number of threads used for batch tracing, plus 1 for other
# requests.
pool_maxsize=_AUTO_SCALE_UP_NTHREADS_LIMIT + 1,
)
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
self._get_data_type_cached = functools.lru_cache(maxsize=10)(
self._get_data_type
)
self._anonymizer = anonymizer
self._hide_inputs = (
hide_inputs
if hide_inputs is not None
else ls_utils.get_env_var("HIDE_INPUTS") == "true"
)
self._hide_outputs = (
hide_outputs
if hide_outputs is not None
else ls_utils.get_env_var("HIDE_OUTPUTS") == "true"
)
# To trigger this code, set the `LANGSMITH_USE_PYO3_CLIENT` env var to any value.
self._pyo3_client = None
if ls_utils.get_env_var("USE_PYO3_CLIENT") is not None:
langsmith_pyo3 = None
try:
import langsmith_pyo3 # type: ignore[import-not-found, no-redef]
except ImportError as e:
logger.warning(
"Failed to import `langsmith_pyo3` when PyO3 client was requested, "
"falling back to Python impl: %s",
repr(e),
)
if langsmith_pyo3:
# TODO: tweak these constants as needed
queue_capacity = 1_000_000
batch_size = 100
batch_timeout_millis = 1000
worker_threads = 1
try:
self._pyo3_client = langsmith_pyo3.BlockingTracingClient(
self.api_url,
self.api_key,
queue_capacity,
batch_size,
batch_timeout_millis,
worker_threads,
)
except Exception as e:
logger.warning(
"Failed to instantiate `langsmith_pyo3.BlockingTracingClient` "
"when PyO3 client was requested, falling back to Python impl: %s",
repr(e),
)
self._settings: Union[ls_schemas.LangSmithSettings, None] = None
self._manual_cleanup = False
def _repr_html_(self) -> str:
"""Return an HTML representation of the instance with a link to the URL.
Returns:
-------
str
The HTML representation of the instance.
"""
link = self._host_url
return f'<a href="{link}", target="_blank" rel="noopener">LangSmith Client</a>'
def __repr__(self) -> str:
"""Return a string representation of the instance with a link to the URL.
Returns:
-------
str
The string representation of the instance.
"""
return f"Client (API URL: {self.api_url})"
@property
def _host(self) -> str:
return _parse_url(self.api_url)
@property
def _host_url(self) -> str:
"""The web host url."""
return ls_utils.get_host_url(self._web_url, self.api_url)
@property
def _headers(self) -> Dict[str, str]:
"""Get the headers for the API request.
Returns:
-------
Dict[str, str]
The headers for the API request.
"""
headers = {
"User-Agent": f"langsmith-py/{langsmith.__version__}",
"Accept": "application/json",
}
if self.api_key:
headers[X_API_KEY] = self.api_key
return headers
@property
def info(self) -> ls_schemas.LangSmithInfo:
"""Get the information about the LangSmith API.
Returns:
-------
Optional[ls_schemas.LangSmithInfo]
The information about the LangSmith API, or None if the API is
not available.
"""
if self._info is None:
try:
response = self.request_with_retries(
"GET",
"/info",
headers={"Accept": "application/json"},
timeout=(self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000),
)
ls_utils.raise_for_status_with_text(response)
self._info = ls_schemas.LangSmithInfo(**response.json())
except BaseException as e:
logger.warning(
f"Failed to get info from {self.api_url}: {repr(e)}",
)
self._info = ls_schemas.LangSmithInfo()
return self._info
def _get_settings(self) -> ls_schemas.LangSmithSettings:
"""Get the settings for the current tenant.
Returns:
dict: The settings for the current tenant.
"""
if self._settings is None:
response = self.request_with_retries("GET", "/settings")
ls_utils.raise_for_status_with_text(response)
self._settings = ls_schemas.LangSmithSettings(**response.json())
return self._settings
def _content_above_size(self, content_length: Optional[int]) -> Optional[str]:
if content_length is None or self._info is None:
return None
info = cast(ls_schemas.LangSmithInfo, self._info)
bic = info.batch_ingest_config
if not bic:
return None
size_limit = bic.get("size_limit_bytes")
if size_limit is None:
return None
if content_length > size_limit:
return (
f"The content length of {content_length} bytes exceeds the "
f"maximum size limit of {size_limit} bytes."
)
return None
def request_with_retries(
self,
/,
method: Literal["GET", "POST", "PUT", "PATCH", "DELETE"],
pathname: str,
*,
request_kwargs: Optional[Mapping] = None,
stop_after_attempt: int = 1,
retry_on: Optional[Sequence[Type[BaseException]]] = None,
to_ignore: Optional[Sequence[Type[BaseException]]] = None,
handle_response: Optional[Callable[[requests.Response, int], Any]] = None,
_context: str = "",
**kwargs: Any,
) -> requests.Response:
"""Send a request with retries.
Parameters
----------
request_method : str
The HTTP request method.
pathname : str
The pathname of the request URL. Will be appended to the API URL.
request_kwargs : Mapping
Additional request parameters.
stop_after_attempt : int, default=1
The number of attempts to make.
retry_on : Sequence[Type[BaseException]] or None, default=None
The exceptions to retry on. In addition to:
[LangSmithConnectionError, LangSmithAPIError].
to_ignore : Sequence[Type[BaseException]] or None, default=None
The exceptions to ignore / pass on.
handle_response : Callable[[requests.Response, int], Any] or None, default=None
A function to handle the response and return whether to continue
retrying.
**kwargs : Any
Additional keyword arguments to pass to the request.
Returns:
-------
Response
The response object.
Raises:
------
LangSmithAPIError
If a server error occurs.
LangSmithUserError
If the request fails.
LangSmithConnectionError
If a connection error occurs.
LangSmithError
If the request fails.
"""
request_kwargs = request_kwargs or {}
request_kwargs = {
"timeout": (self.timeout_ms[0] / 1000, self.timeout_ms[1] / 1000),
**request_kwargs,
**kwargs,
"headers": {
**self._headers,
**request_kwargs.get("headers", {}),
**kwargs.get("headers", {}),
},
}
if (
method != "GET"
and "data" in request_kwargs
and "files" not in request_kwargs
and not request_kwargs["headers"].get("Content-Type")
):
request_kwargs["headers"]["Content-Type"] = "application/json"
logging_filters = [
ls_utils.FilterLangSmithRetry(),
ls_utils.FilterPoolFullWarning(host=str(self._host)),
]
retry_on_: Tuple[Type[BaseException], ...] = (
*(retry_on or ()),
*(
ls_utils.LangSmithConnectionError,
ls_utils.LangSmithRequestTimeout, # 408
ls_utils.LangSmithAPIError, # 500
),
)
to_ignore_: Tuple[Type[BaseException], ...] = (*(to_ignore or ()),)
response = None
for idx in range(stop_after_attempt):
try:
try:
with ls_utils.filter_logs(_urllib3_logger, logging_filters):
response = self.session.request(
method,
(
self.api_url + pathname
if not pathname.startswith("http")
else pathname
),
stream=False,
**request_kwargs,
)
ls_utils.raise_for_status_with_text(response)
return response
except requests.exceptions.ReadTimeout as e:
logger.debug("Passing on exception %s", e)
if idx + 1 == stop_after_attempt:
raise
sleep_time = 2**idx + (random.random() * 0.5)
time.sleep(sleep_time)
continue
except requests.HTTPError as e:
if response is not None:
if handle_response is not None:
if idx + 1 < stop_after_attempt:
should_continue = handle_response(response, idx + 1)
if should_continue:
continue
if response.status_code == 500:
raise ls_utils.LangSmithAPIError(
f"Server error caused failure to {method}"
f" {pathname} in"
f" LangSmith API. {repr(e)}"
f"{_context}"
)
elif response.status_code == 408:
raise ls_utils.LangSmithRequestTimeout(
f"Client took too long to send request to {method}"
f"{pathname} {_context}"
)
elif response.status_code == 429:
raise ls_utils.LangSmithRateLimitError(
f"Rate limit exceeded for {pathname}. {repr(e)}"
f"{_context}"
)
elif response.status_code == 401:
raise ls_utils.LangSmithAuthError(
f"Authentication failed for {pathname}. {repr(e)}"
f"{_context}"
)
elif response.status_code == 404:
raise ls_utils.LangSmithNotFoundError(
f"Resource not found for {pathname}. {repr(e)}"
f"{_context}"
)
elif response.status_code == 409:
raise ls_utils.LangSmithConflictError(
f"Conflict for {pathname}. {repr(e)}" f"{_context}"
)
else:
raise ls_utils.LangSmithError(
f"Failed to {method} {pathname} in LangSmith"
f" API. {repr(e)}"
)
else:
raise ls_utils.LangSmithUserError(
f"Failed to {method} {pathname} in LangSmith API."
f" {repr(e)}"
)
except requests.ConnectionError as e:
recommendation = (
"Please confirm your LANGCHAIN_ENDPOINT."
if self.api_url != "https://api.smith.langchain.com"
else "Please confirm your internet connection."
)
try:
content_length = int(
str(e.request.headers.get("Content-Length"))
if e.request
else ""
)
size_rec = self._content_above_size(content_length)
if size_rec:
recommendation = size_rec
except ValueError:
content_length = None
api_key = (
e.request.headers.get("x-api-key") or "" if e.request else ""
)
prefix, suffix = api_key[:5], api_key[-2:]
filler = "*" * (max(0, len(api_key) - 7))
masked_api_key = f"{prefix}{filler}{suffix}"
raise ls_utils.LangSmithConnectionError(
f"Connection error caused failure to {method} {pathname}"
f" in LangSmith API. {recommendation}"
f" {repr(e)}"
f"\nContent-Length: {content_length}"
f"\nAPI Key: {masked_api_key}"
f"{_context}"
) from e
except Exception as e:
args = list(e.args)
msg = args[1] if len(args) > 1 else ""
msg = msg.replace("session", "session (project)")
if args:
emsg = "\n".join(
[str(args[0])]
+ [msg]
+ [str(arg) for arg in (args[2:] if len(args) > 2 else [])]
)
else:
emsg = msg
raise ls_utils.LangSmithError(
f"Failed to {method} {pathname} in LangSmith API. {emsg}"
f"{_context}"
) from e
except to_ignore_ as e:
if response is not None:
logger.debug("Passing on exception %s", e)
return response
except ls_utils.LangSmithRateLimitError:
if idx + 1 == stop_after_attempt:
raise
if response is not None:
try:
retry_after = float(response.headers.get("retry-after", "30"))
except Exception as e:
logger.warning(
"Invalid retry-after header: %s",
repr(e),
)
retry_after = 30
# Add exponential backoff
retry_after = retry_after * 2**idx + random.random()
time.sleep(retry_after)
except retry_on_:
# Handle other exceptions more immediately
if idx + 1 == stop_after_attempt:
raise
sleep_time = 2**idx + (random.random() * 0.5)
time.sleep(sleep_time)
continue
# Else we still raise an error
raise ls_utils.LangSmithError(
f"Failed to {method} {pathname} in LangSmith API."
)
def _get_paginated_list(
self, path: str, *, params: Optional[dict] = None
) -> Iterator[dict]:
"""Get a paginated list of items.
Parameters
----------
path : str
The path of the request URL.
params : dict or None, default=None
The query parameters.
Yields:
------
dict
The items in the paginated list.
"""
params_ = params.copy() if params else {}
offset = params_.get("offset", 0)
params_["limit"] = params_.get("limit", 100)
while True:
params_["offset"] = offset
response = self.request_with_retries(
"GET",
path,
params=params_,
)
items = response.json()
if not items:
break
yield from items
if len(items) < params_["limit"]:
# offset and limit isn't respected if we're
# querying for specific values
break
offset += len(items)
def _get_cursor_paginated_list(
self,
path: str,
*,
body: Optional[dict] = None,
request_method: Literal["GET", "POST"] = "POST",
data_key: str = "runs",
) -> Iterator[dict]:
"""Get a cursor paginated list of items.
Parameters
----------
path : str
The path of the request URL.
body : dict or None, default=None
The query body.
request_method : str, default="post"
The HTTP request method.
data_key : str, default="runs"
Yields:
------
dict
The items in the paginated list.
"""
params_ = body.copy() if body else {}
while True:
response = self.request_with_retries(
request_method,
path,
request_kwargs={
"data": _dumps_json(params_),
},
)
response_body = response.json()
if not response_body:
break
if not response_body.get(data_key):
break
yield from response_body[data_key]
cursors = response_body.get("cursors")
if not cursors:
break
if not cursors.get("next"):
break
params_["cursor"] = cursors["next"]
def upload_dataframe(
self,
df: pd.DataFrame,
name: str,
input_keys: Sequence[str],
output_keys: Sequence[str],
*,
description: Optional[str] = None,
data_type: Optional[ls_schemas.DataType] = ls_schemas.DataType.kv,
) -> ls_schemas.Dataset:
"""Upload a dataframe as individual examples to the LangSmith API.
Parameters
----------
df : pd.DataFrame
The dataframe to upload.
name : str
The name of the dataset.
input_keys : Sequence[str]
The input keys.
output_keys : Sequence[str]
The output keys.
description : str or None, default=None
The description of the dataset.
data_type : DataType or None, default=DataType.kv
The data type of the dataset.
Returns:
-------
Dataset
The uploaded dataset.
Raises:
------
ValueError
If the csv_file is not a string or tuple.
"""
csv_file = io.BytesIO()
df.to_csv(csv_file, index=False)
csv_file.seek(0)
return self.upload_csv(
("data.csv", csv_file),
input_keys=input_keys,
output_keys=output_keys,
description=description,
name=name,
data_type=data_type,
)
def upload_csv(
self,
csv_file: Union[str, Tuple[str, io.BytesIO]],
input_keys: Sequence[str],
output_keys: Sequence[str],
*,
name: Optional[str] = None,
description: Optional[str] = None,
data_type: Optional[ls_schemas.DataType] = ls_schemas.DataType.kv,
) -> ls_schemas.Dataset:
"""Upload a CSV file to the LangSmith API.
Parameters
----------
csv_file : str or Tuple[str, BytesIO]
The CSV file to upload. If a string, it should be the path
If a tuple, it should be a tuple containing the filename
and a BytesIO object.
input_keys : Sequence[str]
The input keys.
output_keys : Sequence[str]
The output keys.
name : str or None, default=None
The name of the dataset.
description : str or None, default=None
The description of the dataset.
data_type : DataType or None, default=DataType.kv
The data type of the dataset.
Returns:
-------
Dataset
The uploaded dataset.
Raises:
------
ValueError
If the csv_file is not a string or tuple.
"""
data = {
"input_keys": input_keys,
"output_keys": output_keys,
}
if name:
data["name"] = name
if description:
data["description"] = description
if data_type:
data["data_type"] = ls_utils.get_enum_value(data_type)
data["id"] = str(uuid.uuid4())
if isinstance(csv_file, str):
with open(csv_file, "rb") as f:
file_ = {"file": f}
response = self.request_with_retries(
"POST",
"/datasets/upload",
data=data,
files=file_,
)
elif isinstance(csv_file, tuple):
response = self.request_with_retries(
"POST",
"/datasets/upload",
data=data,
files={"file": csv_file},
)
else:
raise ValueError("csv_file must be a string or tuple")
ls_utils.raise_for_status_with_text(response)
result = response.json()
# TODO: Make this more robust server-side
if "detail" in result and "already exists" in result["detail"]:
file_name = csv_file if isinstance(csv_file, str) else csv_file[0]
file_name = file_name.split("/")[-1]
raise ValueError(f"Dataset {file_name} already exists")
return ls_schemas.Dataset(
**result,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
def _run_transform(
self,
run: Union[ls_schemas.Run, dict, ls_schemas.RunLikeDict],
update: bool = False,
copy: bool = False,
) -> dict:
"""Transform the given run object into a dictionary representation.
Args:
run (Union[ls_schemas.Run, dict]): The run object to transform.
update (bool, optional): Whether the payload is for an "update" event.
copy (bool, optional): Whether to deepcopy run inputs/outputs.
Returns:
dict: The transformed run object as a dictionary.
"""
global WARNED_ATTACHMENTS
if hasattr(run, "dict") and callable(getattr(run, "dict")):
run_create: dict = run.dict() # type: ignore
else:
run_create = cast(dict, run)
if "id" not in run_create:
run_create["id"] = uuid.uuid4()
elif isinstance(run_create["id"], str):
run_create["id"] = uuid.UUID(run_create["id"])
if "inputs" in run_create and run_create["inputs"] is not None:
if copy:
run_create["inputs"] = ls_utils.deepish_copy(run_create["inputs"])
run_create["inputs"] = self._hide_run_inputs(run_create["inputs"])
if "outputs" in run_create and run_create["outputs"] is not None:
if copy:
run_create["outputs"] = ls_utils.deepish_copy(run_create["outputs"])
run_create["outputs"] = self._hide_run_outputs(run_create["outputs"])
if not update and not run_create.get("start_time"):
run_create["start_time"] = datetime.datetime.now(datetime.timezone.utc)
# Only retain LLM & Prompt manifests
if "serialized" in run_create:
if run_create.get("run_type") not in (
"llm",
"prompt",
):
# Drop completely
run_create.pop("serialized", None)
else:
# Drop graph
run_create["serialized"].pop("graph", None)
return run_create
@staticmethod
def _insert_runtime_env(runs: Sequence[dict]) -> None:
runtime_env = ls_env.get_runtime_environment()
for run_create in runs:
run_extra = cast(dict, run_create.setdefault("extra", {}))
# update runtime
runtime: dict = run_extra.setdefault("runtime", {})
run_extra["runtime"] = {**runtime_env, **runtime}
# update metadata
metadata: dict = run_extra.setdefault("metadata", {})
langchain_metadata = ls_env.get_langchain_env_var_metadata()
metadata.update(
{k: v for k, v in langchain_metadata.items() if k not in metadata}
)
def _filter_for_sampling(
self, runs: Iterable[dict], *, patch: bool = False
) -> list[dict]:
if self.tracing_sample_rate is None:
return list(runs)
if patch:
sampled = []
for run in runs:
run_id = _as_uuid(run["id"])
if run_id not in self._filtered_post_uuids:
sampled.append(run)
else:
self._filtered_post_uuids.remove(run_id)
return sampled
else:
sampled = []
for run in runs:
if (
# Child run
run["id"] != run.get("trace_id")
# Whose trace is included
and run.get("trace_id") not in self._filtered_post_uuids
# Or a root that's randomly sampled
) or random.random() < self.tracing_sample_rate:
sampled.append(run)
else:
self._filtered_post_uuids.add(_as_uuid(run["id"]))
return sampled
def create_run(
self,
name: str,
inputs: Dict[str, Any],
run_type: RUN_TYPE_T,
*,
project_name: Optional[str] = None,
revision_id: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Persist a run to the LangSmith API.
Parameters
----------
name : str
The name of the run.
inputs : Dict[str, Any]
The input values for the run.
run_type : str
The type of the run, such as tool, chain, llm, retriever,
embedding, prompt, or parser.
revision_id : ID_TYPE or None, default=None
The revision ID of the run.
**kwargs : Any
Additional keyword arguments.
Raises:
------
LangSmithUserError
If the API key is not provided when using the hosted service.
"""
project_name = project_name or kwargs.pop(
"session_name",
# if the project is not provided, use the environment's project
ls_utils.get_tracer_project(),
)
run_create = {
**kwargs,
"session_name": project_name,
"name": name,
"inputs": inputs,
"run_type": run_type,
}
if not self._filter_for_sampling([run_create]):
return
if revision_id is not None:
run_create["extra"]["metadata"]["revision_id"] = revision_id
run_create = self._run_transform(
run_create,
copy=False,
)
self._insert_runtime_env([run_create])
if (
# batch ingest requires trace_id and dotted_order to be set
run_create.get("trace_id") is not None
and run_create.get("dotted_order") is not None
):
if self._pyo3_client is not None:
self._pyo3_client.create_run(run_create)
elif self.tracing_queue is not None:
serialized_op = serialize_run_dict("post", run_create)
self.tracing_queue.put(
TracingQueueItem(run_create["dotted_order"], serialized_op)
)
else:
# Neither Rust nor Python batch ingestion is configured,
# fall back to the non-batch approach.
self._create_run(run_create)
else:
self._create_run(run_create)
def _create_run(self, run_create: dict):
for api_url, api_key in self._write_api_urls.items():
headers = {**self._headers, X_API_KEY: api_key}
self.request_with_retries(
"POST",
f"{api_url}/runs",
request_kwargs={
"data": _dumps_json(run_create),
"headers": headers,
},
to_ignore=(ls_utils.LangSmithConflictError,),
)
def _hide_run_inputs(self, inputs: dict):
if self._hide_inputs is True:
return {}
if self._anonymizer:
json_inputs = _orjson.loads(_dumps_json(inputs))
return self._anonymizer(json_inputs)
if self._hide_inputs is False:
return inputs
return self._hide_inputs(inputs)
def _hide_run_outputs(self, outputs: dict):
if self._hide_outputs is True:
return {}
if self._anonymizer:
json_outputs = _orjson.loads(_dumps_json(outputs))
return self._anonymizer(json_outputs)
if self._hide_outputs is False:
return outputs
return self._hide_outputs(outputs)
def _batch_ingest_run_ops(
self,
ops: List[SerializedRunOperation],
) -> None:
ids_and_partial_body: dict[
Literal["post", "patch"], list[tuple[str, bytes]]
] = {
"post": [],
"patch": [],
}
# form the partial body and ids
for op in ops:
if isinstance(op, SerializedRunOperation):
curr_dict = _orjson.loads(op._none)
if op.inputs:
curr_dict["inputs"] = _orjson.Fragment(op.inputs)
if op.outputs:
curr_dict["outputs"] = _orjson.Fragment(op.outputs)
if op.events:
curr_dict["events"] = _orjson.Fragment(op.events)
if op.attachments:
logger.warning(
"Attachments are not supported when use_multipart_endpoint "
"is False"
)
ids_and_partial_body[op.operation].append(
(f"trace={op.trace_id},id={op.id}", _orjson.dumps(curr_dict))
)
elif isinstance(op, SerializedFeedbackOperation):
logger.warning(
"Feedback operations are not supported in non-multipart mode"
)
else:
logger.error("Unknown item type in tracing queue: %s", type(op))
# send the requests in batches
info = self.info
size_limit_bytes = (info.batch_ingest_config or {}).get(
"size_limit_bytes"
) or _SIZE_LIMIT_BYTES
body_chunks: DefaultDict[str, list] = collections.defaultdict(list)
context_ids: DefaultDict[str, list] = collections.defaultdict(list)
body_size = 0
for key in cast(List[Literal["post", "patch"]], ["post", "patch"]):
body_deque = collections.deque(ids_and_partial_body[key])
while body_deque:
if (
body_size > 0
and body_size + len(body_deque[0][1]) > size_limit_bytes
):
self._post_batch_ingest_runs(
_orjson.dumps(body_chunks),
_context=f"\n{key}: {'; '.join(context_ids[key])}",
)
body_size = 0
body_chunks.clear()
context_ids.clear()
curr_id, curr_body = body_deque.popleft()
body_size += len(curr_body)
body_chunks[key].append(_orjson.Fragment(curr_body))
context_ids[key].append(curr_id)
if body_size:
context = "; ".join(f"{k}: {'; '.join(v)}" for k, v in context_ids.items())
self._post_batch_ingest_runs(
_orjson.dumps(body_chunks), _context="\n" + context
)
def batch_ingest_runs(
self,
create: Optional[
Sequence[Union[ls_schemas.Run, ls_schemas.RunLikeDict, Dict]]
] = None,
update: Optional[
Sequence[Union[ls_schemas.Run, ls_schemas.RunLikeDict, Dict]]
] = None,
*,
pre_sampled: bool = False,
) -> None:
"""Batch ingest/upsert multiple runs in the Langsmith system.
Args:
create (Optional[Sequence[Union[ls_schemas.Run, RunLikeDict]]]):
A sequence of `Run` objects or equivalent dictionaries representing
runs to be created / posted.
update (Optional[Sequence[Union[ls_schemas.Run, RunLikeDict]]]):
A sequence of `Run` objects or equivalent dictionaries representing
runs that have already been created and should be updated / patched.
pre_sampled (bool, optional): Whether the runs have already been subject
to sampling, and therefore should not be sampled again.
Defaults to False.
Returns:
None
Raises:
LangsmithAPIError: If there is an error in the API request.
Note:
- The run objects MUST contain the dotted_order and trace_id fields
to be accepted by the API.
"""
if not create and not update:
return
# transform and convert to dicts
create_dicts = [
self._run_transform(run, copy=False) for run in create or EMPTY_SEQ
]
update_dicts = [
self._run_transform(run, update=True, copy=False)
for run in update or EMPTY_SEQ
]
for run in create_dicts:
if not run.get("trace_id") or not run.get("dotted_order"):
raise ls_utils.LangSmithUserError(
"Batch ingest requires trace_id and dotted_order to be set."
)
for run in update_dicts:
if not run.get("trace_id") or not run.get("dotted_order"):
raise ls_utils.LangSmithUserError(
"Batch ingest requires trace_id and dotted_order to be set."
)
# filter out runs that are not sampled
if not pre_sampled:
create_dicts = self._filter_for_sampling(create_dicts)
update_dicts = self._filter_for_sampling(update_dicts, patch=True)
if not create_dicts and not update_dicts:
return
self._insert_runtime_env(create_dicts + update_dicts)
# convert to serialized ops
serialized_ops = cast(
List[SerializedRunOperation],
combine_serialized_queue_operations(
list(
itertools.chain(
(serialize_run_dict("post", run) for run in create_dicts),
(serialize_run_dict("patch", run) for run in update_dicts),
)
)
),
)
self._batch_ingest_run_ops(serialized_ops)
def _post_batch_ingest_runs(self, body: bytes, *, _context: str):
for api_url, api_key in self._write_api_urls.items():
try:
self.request_with_retries(
"POST",
f"{api_url}/runs/batch",
request_kwargs={
"data": body,
"headers": {
**self._headers,
X_API_KEY: api_key,
},
},
to_ignore=(ls_utils.LangSmithConflictError,),
stop_after_attempt=3,
_context=_context,
)
except Exception as e:
try:
exc_desc_lines = traceback.format_exception_only(type(e), e)
exc_desc = "".join(exc_desc_lines).rstrip()
logger.warning(f"Failed to batch ingest runs: {exc_desc}")
except Exception:
logger.warning(f"Failed to batch ingest runs: {repr(e)}")
def _multipart_ingest_ops(
self, ops: list[Union[SerializedRunOperation, SerializedFeedbackOperation]]
) -> None:
parts: list[MultipartPartsAndContext] = []
for op in ops:
if isinstance(op, SerializedRunOperation):
parts.append(
serialized_run_operation_to_multipart_parts_and_context(op)
)
elif isinstance(op, SerializedFeedbackOperation):
parts.append(
serialized_feedback_operation_to_multipart_parts_and_context(op)
)
else:
logger.error("Unknown operation type in tracing queue: %s", type(op))
acc_multipart = join_multipart_parts_and_context(parts)
if acc_multipart:
self._send_multipart_req(acc_multipart)
def multipart_ingest(
self,
create: Optional[
Sequence[Union[ls_schemas.Run, ls_schemas.RunLikeDict, Dict]]
] = None,
update: Optional[
Sequence[Union[ls_schemas.Run, ls_schemas.RunLikeDict, Dict]]
] = None,
*,
pre_sampled: bool = False,
) -> None:
"""Batch ingest/upsert multiple runs in the Langsmith system.
Args:
create (Optional[Sequence[Union[ls_schemas.Run, RunLikeDict]]]):
A sequence of `Run` objects or equivalent dictionaries representing
runs to be created / posted.
update (Optional[Sequence[Union[ls_schemas.Run, RunLikeDict]]]):
A sequence of `Run` objects or equivalent dictionaries representing
runs that have already been created and should be updated / patched.
pre_sampled (bool, optional): Whether the runs have already been subject
to sampling, and therefore should not be sampled again.
Defaults to False.
Returns:
None
Raises:
LangsmithAPIError: If there is an error in the API request.
Note:
- The run objects MUST contain the dotted_order and trace_id fields
to be accepted by the API.
"""
if not (create or update):
return
# transform and convert to dicts
create_dicts = [self._run_transform(run) for run in create or EMPTY_SEQ]
update_dicts = [
self._run_transform(run, update=True) for run in update or EMPTY_SEQ
]
# require trace_id and dotted_order
if create_dicts:
for run in create_dicts:
if not run.get("trace_id") or not run.get("dotted_order"):
raise ls_utils.LangSmithUserError(
"Multipart ingest requires trace_id and dotted_order"
" to be set in create dicts."
)
else:
del run
if update_dicts:
for run in update_dicts:
if not run.get("trace_id") or not run.get("dotted_order"):
raise ls_utils.LangSmithUserError(
"Multipart ingest requires trace_id and dotted_order"
" to be set in update dicts."
)
else:
del run
# combine post and patch dicts where possible
if update_dicts and create_dicts:
create_by_id = {run["id"]: run for run in create_dicts}
standalone_updates: list[dict] = []
for run in update_dicts:
if run["id"] in create_by_id:
for k, v in run.items():
if v is not None:
create_by_id[run["id"]][k] = v
else:
standalone_updates.append(run)
else:
del run
update_dicts = standalone_updates
# filter out runs that are not sampled
if not pre_sampled:
create_dicts = self._filter_for_sampling(create_dicts)
update_dicts = self._filter_for_sampling(update_dicts, patch=True)
if not create_dicts and not update_dicts:
return
# insert runtime environment
self._insert_runtime_env(create_dicts)
self._insert_runtime_env(update_dicts)
# format as serialized operations
serialized_ops = combine_serialized_queue_operations(
list(
itertools.chain(
(serialize_run_dict("post", run) for run in create_dicts),
(serialize_run_dict("patch", run) for run in update_dicts),
)
)
)
# sent the runs in multipart requests
self._multipart_ingest_ops(serialized_ops)
def _send_multipart_req(self, acc: MultipartPartsAndContext, *, attempts: int = 3):
parts = acc.parts
_context = acc.context
for api_url, api_key in self._write_api_urls.items():
for idx in range(1, attempts + 1):
try:
encoder = rqtb_multipart.MultipartEncoder(parts, boundary=BOUNDARY)
if encoder.len <= 20_000_000: # ~20 MB
data = encoder.to_string()
else:
data = encoder
self.request_with_retries(
"POST",
f"{api_url}/runs/multipart",
request_kwargs={
"data": data,
"headers": {
**self._headers,
X_API_KEY: api_key,
"Content-Type": encoder.content_type,
},
},
stop_after_attempt=1,
_context=_context,
)
break
except ls_utils.LangSmithConflictError:
break
except (
ls_utils.LangSmithConnectionError,
ls_utils.LangSmithRequestTimeout,
ls_utils.LangSmithAPIError,
) as exc:
if idx == attempts:
logger.warning(f"Failed to multipart ingest runs: {exc}")
else:
continue
except Exception as e:
try:
exc_desc_lines = traceback.format_exception_only(type(e), e)
exc_desc = "".join(exc_desc_lines).rstrip()
logger.warning(f"Failed to multipart ingest runs: {exc_desc}")
except Exception:
logger.warning(f"Failed to multipart ingest runs: {repr(e)}")
# do not retry by default
return
def update_run(
self,
run_id: ID_TYPE,
*,
name: Optional[str] = None,
end_time: Optional[datetime.datetime] = None,
error: Optional[str] = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None,
events: Optional[Sequence[dict]] = None,
extra: Optional[Dict] = None,
tags: Optional[List[str]] = None,
attachments: Optional[
Dict[str, tuple[str, bytes] | ls_schemas.Attachment]
] = None,
**kwargs: Any,
) -> None:
"""Update a run in the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to update.
name : str or None, default=None
The name of the run.
end_time : datetime or None
The end time of the run.
error : str or None, default=None
The error message of the run.
inputs : Dict or None, default=None
The input values for the run.
outputs : Dict or None, default=None
The output values for the run.
events : Sequence[dict] or None, default=None
The events for the run.
extra : Dict or None, default=None
The extra information for the run.
tags : List[str] or None, default=None
The tags for the run.
attachments: dict[str, ls_schemas.Attachment] or None, default=None
A dictionary of attachments to add to the run. The keys are the attachment names,
and the values are Attachment objects containing the data and mime type.
**kwargs : Any
Kwargs are ignored.
"""
data: Dict[str, Any] = {
"id": _as_uuid(run_id, "run_id"),
"name": name,
"trace_id": kwargs.pop("trace_id", None),
"parent_run_id": kwargs.pop("parent_run_id", None),
"dotted_order": kwargs.pop("dotted_order", None),
"tags": tags,
"extra": extra,
"session_id": kwargs.pop("session_id", None),
"session_name": kwargs.pop("session_name", None),
}
if attachments:
data["attachments"] = attachments
use_multipart = (
self.tracing_queue is not None
# batch ingest requires trace_id and dotted_order to be set
and data["trace_id"] is not None
and data["dotted_order"] is not None
)
if not self._filter_for_sampling([data], patch=True):
return
if end_time is not None:
data["end_time"] = end_time.isoformat()
else:
data["end_time"] = datetime.datetime.now(datetime.timezone.utc).isoformat()
if error is not None:
data["error"] = error
if inputs is not None:
data["inputs"] = self._hide_run_inputs(inputs)
if outputs is not None:
if not use_multipart:
outputs = ls_utils.deepish_copy(outputs)
data["outputs"] = self._hide_run_outputs(outputs)
if events is not None:
data["events"] = events
if data["extra"]:
self._insert_runtime_env([data])
if use_multipart and self.tracing_queue is not None:
# not collecting attachments currently, use empty dict
serialized_op = serialize_run_dict(operation="patch", payload=data)
self.tracing_queue.put(
TracingQueueItem(data["dotted_order"], serialized_op)
)
else:
self._update_run(data)
def _update_run(self, run_update: dict) -> None:
for api_url, api_key in self._write_api_urls.items():
headers = {
**self._headers,
X_API_KEY: api_key,
}
self.request_with_retries(
"PATCH",
f"{api_url}/runs/{run_update['id']}",
request_kwargs={
"data": _dumps_json(run_update),
"headers": headers,
},
)
def _load_child_runs(self, run: ls_schemas.Run) -> ls_schemas.Run:
"""Load child runs for a given run.
Parameters
----------
run : Run
The run to load child runs for.
Returns:
-------
Run
The run with loaded child runs.
Raises:
------
LangSmithError
If a child run has no parent.
"""
child_runs = self.list_runs(id=run.child_run_ids)
treemap: DefaultDict[uuid.UUID, List[ls_schemas.Run]] = collections.defaultdict(
list
)
runs: Dict[uuid.UUID, ls_schemas.Run] = {}
for child_run in sorted(
child_runs,
key=lambda r: r.dotted_order,
):
if child_run.parent_run_id is None:
raise ls_utils.LangSmithError(f"Child run {child_run.id} has no parent")
treemap[child_run.parent_run_id].append(child_run)
runs[child_run.id] = child_run
run.child_runs = treemap.pop(run.id, [])
for run_id, children in treemap.items():
runs[run_id].child_runs = children
return run
def read_run(
self, run_id: ID_TYPE, load_child_runs: bool = False
) -> ls_schemas.Run:
"""Read a run from the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to read.
load_child_runs : bool, default=False
Whether to load nested child runs.
Returns:
-------
Run
The run.
"""
response = self.request_with_retries(
"GET", f"/runs/{_as_uuid(run_id, 'run_id')}"
)
run = ls_schemas.Run(**response.json(), _host_url=self._host_url)
if load_child_runs and run.child_run_ids:
run = self._load_child_runs(run)
return run
def list_runs(
self,
*,
project_id: Optional[Union[ID_TYPE, Sequence[ID_TYPE]]] = None,
project_name: Optional[Union[str, Sequence[str]]] = None,
run_type: Optional[str] = None,
trace_id: Optional[ID_TYPE] = None,
reference_example_id: Optional[ID_TYPE] = None,
query: Optional[str] = None,
filter: Optional[str] = None,
trace_filter: Optional[str] = None,
tree_filter: Optional[str] = None,
is_root: Optional[bool] = None,
parent_run_id: Optional[ID_TYPE] = None,
start_time: Optional[datetime.datetime] = None,
error: Optional[bool] = None,
run_ids: Optional[Sequence[ID_TYPE]] = None,
select: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
**kwargs: Any,
) -> Iterator[ls_schemas.Run]:
"""List runs from the LangSmith API.
Parameters
----------
project_id : UUID or None, default=None
The ID(s) of the project to filter by.
project_name : str or None, default=None
The name(s) of the project to filter by.
run_type : str or None, default=None
The type of the runs to filter by.
trace_id : UUID or None, default=None
The ID of the trace to filter by.
reference_example_id : UUID or None, default=None
The ID of the reference example to filter by.
query : str or None, default=None
The query string to filter by.
filter : str or None, default=None
The filter string to filter by.
trace_filter : str or None, default=None
Filter to apply to the ROOT run in the trace tree. This is meant to
be used in conjunction with the regular `filter` parameter to let you
filter runs by attributes of the root run within a trace.
tree_filter : str or None, default=None
Filter to apply to OTHER runs in the trace tree, including
sibling and child runs. This is meant to be used in conjunction with
the regular `filter` parameter to let you filter runs by attributes
of any run within a trace.
is_root : bool or None, default=None
Whether to filter by root runs.
parent_run_id : UUID or None, default=None
The ID of the parent run to filter by.
start_time : datetime or None, default=None
The start time to filter by.
error : bool or None, default=None
Whether to filter by error status.
run_ids : List[str or UUID] or None, default=None
The IDs of the runs to filter by.
limit : int or None, default=None
The maximum number of runs to return.
**kwargs : Any
Additional keyword arguments.
Yields:
------
Run
The runs.
Examples:
--------
.. code-block:: python
# List all runs in a project
project_runs = client.list_runs(project_name="<your_project>")
# List LLM and Chat runs in the last 24 hours
todays_llm_runs = client.list_runs(
project_name="<your_project>",
start_time=datetime.now() - timedelta(days=1),
run_type="llm",
)
# List root traces in a project
root_runs = client.list_runs(project_name="<your_project>", is_root=1)
# List runs without errors
correct_runs = client.list_runs(project_name="<your_project>", error=False)
# List runs and only return their inputs/outputs (to speed up the query)
input_output_runs = client.list_runs(
project_name="<your_project>", select=["inputs", "outputs"]
)
# List runs by run ID
run_ids = [
"a36092d2-4ad5-4fb4-9c0d-0dba9a2ed836",
"9398e6be-964f-4aa4-8ae9-ad78cd4b7074",
]
selected_runs = client.list_runs(id=run_ids)
# List all "chain" type runs that took more than 10 seconds and had
# `total_tokens` greater than 5000
chain_runs = client.list_runs(
project_name="<your_project>",
filter='and(eq(run_type, "chain"), gt(latency, 10), gt(total_tokens, 5000))',
)
# List all runs called "extractor" whose root of the trace was assigned feedback "user_score" score of 1
good_extractor_runs = client.list_runs(
project_name="<your_project>",
filter='eq(name, "extractor")',
trace_filter='and(eq(feedback_key, "user_score"), eq(feedback_score, 1))',
)
# List all runs that started after a specific timestamp and either have "error" not equal to null or a "Correctness" feedback score equal to 0
complex_runs = client.list_runs(
project_name="<your_project>",
filter='and(gt(start_time, "2023-07-15T12:34:56Z"), or(neq(error, null), and(eq(feedback_key, "Correctness"), eq(feedback_score, 0.0))))',
)
# List all runs where `tags` include "experimental" or "beta" and `latency` is greater than 2 seconds
tagged_runs = client.list_runs(
project_name="<your_project>",
filter='and(or(has(tags, "experimental"), has(tags, "beta")), gt(latency, 2))',
)
""" # noqa: E501
project_ids = []
if isinstance(project_id, (uuid.UUID, str)):
project_ids.append(project_id)
elif isinstance(project_id, list):
project_ids.extend(project_id)
if project_name is not None:
if isinstance(project_name, str):
project_name = [project_name]
project_ids.extend(
[self.read_project(project_name=name).id for name in project_name]
)
default_select = [
"app_path",
"child_run_ids",
"completion_cost",
"completion_tokens",
"dotted_order",
"end_time",
"error",
"events",
"extra",
"feedback_stats",
"first_token_time",
"id",
"inputs",
"name",
"outputs",
"parent_run_id",
"parent_run_ids",
"prompt_cost",
"prompt_tokens",
"reference_example_id",
"run_type",
"session_id",
"start_time",
"status",
"tags",
"total_cost",
"total_tokens",
"trace_id",
]
select = select or default_select
body_query: Dict[str, Any] = {
"session": project_ids if project_ids else None,
"run_type": run_type,
"reference_example": (
[reference_example_id] if reference_example_id else None
),
"query": query,
"filter": filter,
"trace_filter": trace_filter,
"tree_filter": tree_filter,
"is_root": is_root,
"parent_run": parent_run_id,
"start_time": start_time.isoformat() if start_time else None,
"error": error,
"id": run_ids,
"trace": trace_id,
"select": select,
**kwargs,
}
body_query = {k: v for k, v in body_query.items() if v is not None}
for i, run in enumerate(
self._get_cursor_paginated_list("/runs/query", body=body_query)
):
yield ls_schemas.Run(**run, _host_url=self._host_url)
if limit is not None and i + 1 >= limit:
break
def get_run_stats(
self,
*,
id: Optional[List[ID_TYPE]] = None,
trace: Optional[ID_TYPE] = None,
parent_run: Optional[ID_TYPE] = None,
run_type: Optional[str] = None,
project_names: Optional[List[str]] = None,
project_ids: Optional[List[ID_TYPE]] = None,
reference_example_ids: Optional[List[ID_TYPE]] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
error: Optional[bool] = None,
query: Optional[str] = None,
filter: Optional[str] = None,
trace_filter: Optional[str] = None,
tree_filter: Optional[str] = None,
is_root: Optional[bool] = None,
data_source_type: Optional[str] = None,
) -> Dict[str, Any]:
"""Get aggregate statistics over queried runs.
Takes in similar query parameters to `list_runs` and returns statistics
based on the runs that match the query.
Args:
id (Optional[List[ID_TYPE]]): List of run IDs to filter by.
trace (Optional[ID_TYPE]): Trace ID to filter by.
parent_run (Optional[ID_TYPE]): Parent run ID to filter by.
run_type (Optional[str]): Run type to filter by.
projects (Optional[List[ID_TYPE]]): List of session IDs to filter by.
reference_example (Optional[List[ID_TYPE]]): List of reference example IDs to filter by.
start_time (Optional[str]): Start time to filter by.
end_time (Optional[str]): End time to filter by.
error (Optional[bool]): Filter by error status.
query (Optional[str]): Query string to filter by.
filter (Optional[str]): Filter string to apply.
trace_filter (Optional[str]): Trace filter string to apply.
tree_filter (Optional[str]): Tree filter string to apply.
is_root (Optional[bool]): Filter by root run status.
data_source_type (Optional[str]): Data source type to filter by.
Returns:
Dict[str, Any]: A dictionary containing the run statistics.
""" # noqa: E501
from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore
project_ids = project_ids or []
if project_names:
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(self.read_project, project_name=name)
for name in project_names
]
for future in as_completed(futures):
project_ids.append(future.result().id)
payload = {
"id": id,
"trace": trace,
"parent_run": parent_run,
"run_type": run_type,
"session": project_ids,
"reference_example": reference_example_ids,
"start_time": start_time,
"end_time": end_time,
"error": error,
"query": query,
"filter": filter,
"trace_filter": trace_filter,
"tree_filter": tree_filter,
"is_root": is_root,
"data_source_type": data_source_type,
}
# Remove None values from the payload
payload = {k: v for k, v in payload.items() if v is not None}
response = self.request_with_retries(
"POST",
"/runs/stats",
request_kwargs={
"data": _dumps_json(payload),
},
)
ls_utils.raise_for_status_with_text(response)
return response.json()
def get_run_url(
self,
*,
run: ls_schemas.RunBase,
project_name: Optional[str] = None,
project_id: Optional[ID_TYPE] = None,
) -> str:
"""Get the URL for a run.
Not recommended for use within your agent runtime.
More for use interacting with runs after the fact
for data analysis or ETL workloads.
Parameters
----------
run : Run
The run.
project_name : str or None, default=None
The name of the project.
project_id : UUID or None, default=None
The ID of the project.
Returns:
-------
str
The URL for the run.
"""
if session_id := getattr(run, "session_id", None):
pass
elif session_name := getattr(run, "session_name", None):
session_id = self.read_project(project_name=session_name).id
elif project_id is not None:
session_id = project_id
elif project_name is not None:
session_id = self.read_project(project_name=project_name).id
else:
project_name = ls_utils.get_tracer_project()
session_id = self.read_project(project_name=project_name).id
session_id_ = _as_uuid(session_id, "session_id")
return (
f"{self._host_url}/o/{self._get_tenant_id()}/projects/p/{session_id_}/"
f"r/{run.id}?poll=true"
)
def share_run(self, run_id: ID_TYPE, *, share_id: Optional[ID_TYPE] = None) -> str:
"""Get a share link for a run."""
run_id_ = _as_uuid(run_id, "run_id")
data = {
"run_id": str(run_id_),
"share_token": share_id or str(uuid.uuid4()),
}
response = self.request_with_retries(
"PUT",
f"/runs/{run_id_}/share",
headers=self._headers,
json=data,
)
ls_utils.raise_for_status_with_text(response)
share_token = response.json()["share_token"]
return f"{self._host_url}/public/{share_token}/r"
def unshare_run(self, run_id: ID_TYPE) -> None:
"""Delete share link for a run."""
response = self.request_with_retries(
"DELETE",
f"/runs/{_as_uuid(run_id, 'run_id')}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def read_run_shared_link(self, run_id: ID_TYPE) -> Optional[str]:
"""Retrieve the shared link for a specific run.
Args:
run_id (ID_TYPE): The ID of the run.
Returns:
Optional[str]: The shared link for the run, or None if the link is not
available.
"""
response = self.request_with_retries(
"GET",
f"/runs/{_as_uuid(run_id, 'run_id')}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
result = response.json()
if result is None or "share_token" not in result:
return None
return f"{self._host_url}/public/{result['share_token']}/r"
def run_is_shared(self, run_id: ID_TYPE) -> bool:
"""Get share state for a run."""
link = self.read_run_shared_link(_as_uuid(run_id, "run_id"))
return link is not None
def read_shared_run(
self, share_token: Union[ID_TYPE, str], run_id: Optional[ID_TYPE] = None
) -> ls_schemas.Run:
"""Get shared runs."""
_, token_uuid = _parse_token_or_url(share_token, "", kind="run")
path = f"/public/{token_uuid}/run"
if run_id is not None:
path += f"/{_as_uuid(run_id, 'run_id')}"
response = self.request_with_retries(
"GET",
path,
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.Run(**response.json(), _host_url=self._host_url)
def list_shared_runs(
self, share_token: Union[ID_TYPE, str], run_ids: Optional[List[str]] = None
) -> Iterator[ls_schemas.Run]:
"""Get shared runs."""
body = {"id": run_ids} if run_ids else {}
_, token_uuid = _parse_token_or_url(share_token, "", kind="run")
for run in self._get_cursor_paginated_list(
f"/public/{token_uuid}/runs/query", body=body
):
yield ls_schemas.Run(**run, _host_url=self._host_url)
def read_dataset_shared_schema(
self,
dataset_id: Optional[ID_TYPE] = None,
*,
dataset_name: Optional[str] = None,
) -> ls_schemas.DatasetShareSchema:
"""Retrieve the shared schema of a dataset.
Args:
dataset_id (Optional[ID_TYPE]): The ID of the dataset.
Either `dataset_id` or `dataset_name` must be given.
dataset_name (Optional[str]): The name of the dataset.
Either `dataset_id` or `dataset_name` must be given.
Returns:
ls_schemas.DatasetShareSchema: The shared schema of the dataset.
Raises:
ValueError: If neither `dataset_id` nor `dataset_name` is given.
"""
if dataset_id is None and dataset_name is None:
raise ValueError("Either dataset_id or dataset_name must be given")
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
response = self.request_with_retries(
"GET",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
d = response.json()
return cast(
ls_schemas.DatasetShareSchema,
{
**d,
"url": f"{self._host_url}/public/"
f"{_as_uuid(d['share_token'], 'response.share_token')}/d",
},
)
def share_dataset(
self,
dataset_id: Optional[ID_TYPE] = None,
*,
dataset_name: Optional[str] = None,
) -> ls_schemas.DatasetShareSchema:
"""Get a share link for a dataset."""
if dataset_id is None and dataset_name is None:
raise ValueError("Either dataset_id or dataset_name must be given")
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"dataset_id": str(dataset_id),
}
response = self.request_with_retries(
"PUT",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share",
headers=self._headers,
json=data,
)
ls_utils.raise_for_status_with_text(response)
d: dict = response.json()
return cast(
ls_schemas.DatasetShareSchema,
{**d, "url": f"{self._host_url}/public/{d['share_token']}/d"},
)
def unshare_dataset(self, dataset_id: ID_TYPE) -> None:
"""Delete share link for a dataset."""
response = self.request_with_retries(
"DELETE",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/share",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def read_shared_dataset(
self,
share_token: str,
) -> ls_schemas.Dataset:
"""Get shared datasets."""
_, token_uuid = _parse_token_or_url(share_token, self.api_url)
response = self.request_with_retries(
"GET",
f"/public/{token_uuid}/datasets",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.Dataset(
**response.json(),
_host_url=self._host_url,
_public_path=f"/public/{share_token}/d",
)
def list_shared_examples(
self, share_token: str, *, example_ids: Optional[List[ID_TYPE]] = None
) -> List[ls_schemas.Example]:
"""Get shared examples."""
params = {}
if example_ids is not None:
params["id"] = [str(id) for id in example_ids]
response = self.request_with_retries(
"GET",
f"/public/{_as_uuid(share_token, 'share_token')}/examples",
headers=self._headers,
params=params,
)
ls_utils.raise_for_status_with_text(response)
return [
ls_schemas.Example(**dataset, _host_url=self._host_url)
for dataset in response.json()
]
def list_shared_projects(
self,
*,
dataset_share_token: str,
project_ids: Optional[List[ID_TYPE]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
limit: Optional[int] = None,
) -> Iterator[ls_schemas.TracerSessionResult]:
"""List shared projects.
Args:
dataset_share_token : str
The share token of the dataset.
project_ids : List[ID_TYPE], optional
List of project IDs to filter the results, by default None.
name : str, optional
Name of the project to filter the results, by default None.
name_contains : str, optional
Substring to search for in project names, by default None.
limit : int, optional
Yields:
TracerSessionResult: The shared projects.
"""
params = {"id": project_ids, "name": name, "name_contains": name_contains}
share_token = _as_uuid(dataset_share_token, "dataset_share_token")
for i, project in enumerate(
self._get_paginated_list(
f"/public/{share_token}/datasets/sessions",
params=params,
)
):
yield ls_schemas.TracerSessionResult(**project, _host_url=self._host_url)
if limit is not None and i + 1 >= limit:
break
def create_project(
self,
project_name: str,
*,
description: Optional[str] = None,
metadata: Optional[dict] = None,
upsert: bool = False,
project_extra: Optional[dict] = None,
reference_dataset_id: Optional[ID_TYPE] = None,
) -> ls_schemas.TracerSession:
"""Create a project on the LangSmith API.
Parameters
----------
project_name : str
The name of the project.
project_extra : dict or None, default=None
Additional project information.
metadata: dict or None, default=None
Additional metadata to associate with the project.
description : str or None, default=None
The description of the project.
upsert : bool, default=False
Whether to update the project if it already exists.
reference_dataset_id: UUID or None, default=None
The ID of the reference dataset to associate with the project.
Returns:
-------
TracerSession
The created project.
"""
endpoint = f"{self.api_url}/sessions"
extra = project_extra
if metadata:
extra = {**(extra or {}), "metadata": metadata}
body: Dict[str, Any] = {
"name": project_name,
"extra": extra,
"description": description,
"id": str(uuid.uuid4()),
}
params = {}
if upsert:
params["upsert"] = True
if reference_dataset_id is not None:
body["reference_dataset_id"] = reference_dataset_id
response = self.request_with_retries(
"POST",
endpoint,
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json(body),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.TracerSession(**response.json(), _host_url=self._host_url)
def update_project(
self,
project_id: ID_TYPE,
*,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[dict] = None,
project_extra: Optional[dict] = None,
end_time: Optional[datetime.datetime] = None,
) -> ls_schemas.TracerSession:
"""Update a LangSmith project.
Parameters
----------
project_id : UUID
The ID of the project to update.
name : str or None, default=None
The new name to give the project. This is only valid if the project
has been assigned an end_time, meaning it has been completed/closed.
description : str or None, default=None
The new description to give the project.
metadata: dict or None, default=None
project_extra : dict or None, default=None
Additional project information.
Returns:
-------
TracerSession
The updated project.
"""
endpoint = f"{self.api_url}/sessions/{_as_uuid(project_id, 'project_id')}"
extra = project_extra
if metadata:
extra = {**(extra or {}), "metadata": metadata}
body: Dict[str, Any] = {
"name": name,
"extra": extra,
"description": description,
"end_time": end_time.isoformat() if end_time else None,
}
response = self.request_with_retries(
"PATCH",
endpoint,
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json(body),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.TracerSession(**response.json(), _host_url=self._host_url)
def _get_optional_tenant_id(self) -> Optional[uuid.UUID]:
if self._tenant_id is not None:
return self._tenant_id
try:
response = self.request_with_retries(
"GET", "/sessions", params={"limit": 1}
)
result = response.json()
if isinstance(result, list) and len(result) > 0:
tracer_session = ls_schemas.TracerSessionResult(
**result[0], _host_url=self._host_url
)
self._tenant_id = tracer_session.tenant_id
return self._tenant_id
except Exception as e:
logger.debug(
"Failed to get tenant ID from LangSmith: %s", repr(e), exc_info=True
)
return None
def _get_tenant_id(self) -> uuid.UUID:
tenant_id = self._get_optional_tenant_id()
if tenant_id is None:
raise ls_utils.LangSmithError("No tenant ID found")
return tenant_id
@ls_utils.xor_args(("project_id", "project_name"))
def read_project(
self,
*,
project_id: Optional[str] = None,
project_name: Optional[str] = None,
include_stats: bool = False,
) -> ls_schemas.TracerSessionResult:
"""Read a project from the LangSmith API.
Parameters
----------
project_id : str or None, default=None
The ID of the project to read.
project_name : str or None, default=None
The name of the project to read.
Note: Only one of project_id or project_name may be given.
include_stats : bool, default=False
Whether to include a project's aggregate statistics in the response.
Returns:
-------
TracerSessionResult
The project.
"""
path = "/sessions"
params: Dict[str, Any] = {"limit": 1}
if project_id is not None:
path += f"/{_as_uuid(project_id, 'project_id')}"
elif project_name is not None:
params["name"] = project_name
else:
raise ValueError("Must provide project_name or project_id")
params["include_stats"] = include_stats
response = self.request_with_retries("GET", path, params=params)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise ls_utils.LangSmithNotFoundError(
f"Project {project_name} not found"
)
return ls_schemas.TracerSessionResult(**result[0], _host_url=self._host_url)
return ls_schemas.TracerSessionResult(
**response.json(), _host_url=self._host_url
)
def has_project(
self, project_name: str, *, project_id: Optional[str] = None
) -> bool:
"""Check if a project exists.
Parameters
----------
project_name : str
The name of the project to check for.
project_id : str or None, default=None
The ID of the project to check for.
Returns:
-------
bool
Whether the project exists.
"""
try:
self.read_project(project_name=project_name)
except ls_utils.LangSmithNotFoundError:
return False
return True
def get_test_results(
self,
*,
project_id: Optional[ID_TYPE] = None,
project_name: Optional[str] = None,
) -> pd.DataFrame:
"""Read the record-level information from an experiment into a Pandas DF.
Note: this will fetch whatever data exists in the DB. Results are not
immediately available in the DB upon evaluation run completion.
Returns:
--------
pd.DataFrame
A dataframe containing the test results.
"""
warnings.warn(
"Function get_test_results is in beta.", UserWarning, stacklevel=2
)
from concurrent.futures import ThreadPoolExecutor, as_completed # type: ignore
import pandas as pd # type: ignore
runs = self.list_runs(
project_id=project_id,
project_name=project_name,
is_root=True,
select=[
"id",
"reference_example_id",
"inputs",
"outputs",
"error",
"feedback_stats",
"start_time",
"end_time",
],
)
results: list[dict] = []
example_ids = []
def fetch_examples(batch):
examples = self.list_examples(example_ids=batch)
return [
{
"example_id": example.id,
**{f"reference.{k}": v for k, v in (example.outputs or {}).items()},
}
for example in examples
]
batch_size = 50
cursor = 0
with ThreadPoolExecutor() as executor:
futures = []
for r in runs:
row = {
"example_id": r.reference_example_id,
**{f"input.{k}": v for k, v in r.inputs.items()},
**{f"outputs.{k}": v for k, v in (r.outputs or {}).items()},
"execution_time": (
(r.end_time - r.start_time).total_seconds()
if r.end_time
else None
),
"error": r.error,
"id": r.id,
}
if r.feedback_stats:
row.update(
{
f"feedback.{k}": v.get("avg")
for k, v in r.feedback_stats.items()
}
)
if r.reference_example_id:
example_ids.append(r.reference_example_id)
else:
logger.warning(f"Run {r.id} has no reference example ID.")
if len(example_ids) % batch_size == 0:
# Ensure not empty
if batch := example_ids[cursor : cursor + batch_size]:
futures.append(executor.submit(fetch_examples, batch))
cursor += batch_size
results.append(row)
# Handle any remaining examples
if example_ids[cursor:]:
futures.append(executor.submit(fetch_examples, example_ids[cursor:]))
result_df = pd.DataFrame(results).set_index("example_id")
example_outputs = [
output for future in as_completed(futures) for output in future.result()
]
if example_outputs:
example_df = pd.DataFrame(example_outputs).set_index("example_id")
result_df = example_df.merge(result_df, left_index=True, right_index=True)
# Flatten dict columns into dot syntax for easier access
return pd.json_normalize(result_df.to_dict(orient="records"))
def list_projects(
self,
project_ids: Optional[List[ID_TYPE]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
reference_dataset_id: Optional[ID_TYPE] = None,
reference_dataset_name: Optional[str] = None,
reference_free: Optional[bool] = None,
limit: Optional[int] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> Iterator[ls_schemas.TracerSession]:
"""List projects from the LangSmith API.
Parameters
----------
project_ids : Optional[List[ID_TYPE]], optional
A list of project IDs to filter by, by default None
name : Optional[str], optional
The name of the project to filter by, by default None
name_contains : Optional[str], optional
A string to search for in the project name, by default None
reference_dataset_id : Optional[List[ID_TYPE]], optional
A dataset ID to filter by, by default None
reference_dataset_name : Optional[str], optional
The name of the reference dataset to filter by, by default None
reference_free : Optional[bool], optional
Whether to filter for only projects not associated with a dataset.
limit : Optional[int], optional
The maximum number of projects to return, by default None
metadata: Optional[Dict[str, Any]], optional
Metadata to filter by.
Yields:
------
TracerSession
The projects.
"""
params: Dict[str, Any] = {
"limit": min(limit, 100) if limit is not None else 100
}
if project_ids is not None:
params["id"] = project_ids
if name is not None:
params["name"] = name
if name_contains is not None:
params["name_contains"] = name_contains
if reference_dataset_id is not None:
if reference_dataset_name is not None:
raise ValueError(
"Only one of reference_dataset_id or"
" reference_dataset_name may be given"
)
params["reference_dataset"] = reference_dataset_id
elif reference_dataset_name is not None:
reference_dataset_id = self.read_dataset(
dataset_name=reference_dataset_name
).id
params["reference_dataset"] = reference_dataset_id
if reference_free is not None:
params["reference_free"] = reference_free
if metadata is not None:
params["metadata"] = json.dumps(metadata)
for i, project in enumerate(
self._get_paginated_list("/sessions", params=params)
):
yield ls_schemas.TracerSession(**project, _host_url=self._host_url)
if limit is not None and i + 1 >= limit:
break
@ls_utils.xor_args(("project_name", "project_id"))
def delete_project(
self, *, project_name: Optional[str] = None, project_id: Optional[str] = None
) -> None:
"""Delete a project from LangSmith.
Parameters
----------
project_name : str or None, default=None
The name of the project to delete.
project_id : str or None, default=None
The ID of the project to delete.
"""
if project_name is not None:
project_id = str(self.read_project(project_name=project_name).id)
elif project_id is None:
raise ValueError("Must provide project_name or project_id")
response = self.request_with_retries(
"DELETE",
f"/sessions/{_as_uuid(project_id, 'project_id')}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def create_dataset(
self,
dataset_name: str,
*,
description: Optional[str] = None,
data_type: ls_schemas.DataType = ls_schemas.DataType.kv,
inputs_schema: Optional[Dict[str, Any]] = None,
outputs_schema: Optional[Dict[str, Any]] = None,
transformations: Optional[List[ls_schemas.DatasetTransformation]] = None,
metadata: Optional[dict] = None,
) -> ls_schemas.Dataset:
"""Create a dataset in the LangSmith API.
Parameters
----------
dataset_name : str
The name of the dataset.
description : Optional[str], default=None
The description of the dataset.
data_type : ls_schemas.DataType, default=ls_schemas.DataType.kv
The data type of the dataset.
inputs_schema : Optional[Dict[str, Any]], default=None
The schema definition for the inputs of the dataset.
outputs_schema : Optional[Dict[str, Any]], default=None
The schema definition for the outputs of the dataset.
transformations : Optional[List[ls_schemas.DatasetTransformation]], default=None
A list of transformations to apply to the dataset.
metadata : Optional[dict], default=None
Additional metadata to associate with the dataset.
Returns:
-------
ls_schemas.Dataset
The created dataset.
Raises:
------
requests.HTTPError
If the request to create the dataset fails.
"""
dataset: Dict[str, Any] = {
"name": dataset_name,
"data_type": data_type.value,
"created_at": datetime.datetime.now().isoformat(),
"transformations": transformations,
"extra": {"metadata": metadata} if metadata else None,
}
if description is not None:
dataset["description"] = description
if inputs_schema is not None:
dataset["inputs_schema_definition"] = inputs_schema
if outputs_schema is not None:
dataset["outputs_schema_definition"] = outputs_schema
response = self.request_with_retries(
"POST",
"/datasets",
headers={**self._headers, "Content-Type": "application/json"},
data=_orjson.dumps(dataset),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.Dataset(
**response.json(),
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
def has_dataset(
self, *, dataset_name: Optional[str] = None, dataset_id: Optional[str] = None
) -> bool:
"""Check whether a dataset exists in your tenant.
Parameters
----------
dataset_name : str or None, default=None
The name of the dataset to check.
dataset_id : str or None, default=None
The ID of the dataset to check.
Returns:
-------
bool
Whether the dataset exists.
"""
try:
self.read_dataset(dataset_name=dataset_name, dataset_id=dataset_id)
return True
except ls_utils.LangSmithNotFoundError:
return False
@ls_utils.xor_args(("dataset_name", "dataset_id"))
def read_dataset(
self,
*,
dataset_name: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> ls_schemas.Dataset:
"""Read a dataset from the LangSmith API.
Parameters
----------
dataset_name : str or None, default=None
The name of the dataset to read.
dataset_id : UUID or None, default=None
The ID of the dataset to read.
Returns:
-------
Dataset
The dataset.
"""
path = "/datasets"
params: Dict[str, Any] = {"limit": 1}
if dataset_id is not None:
path += f"/{_as_uuid(dataset_id, 'dataset_id')}"
elif dataset_name is not None:
params["name"] = dataset_name
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self.request_with_retries(
"GET",
path,
params=params,
)
result = response.json()
if isinstance(result, list):
if len(result) == 0:
raise ls_utils.LangSmithNotFoundError(
f"Dataset {dataset_name} not found"
)
return ls_schemas.Dataset(
**result[0],
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
return ls_schemas.Dataset(
**result,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
def diff_dataset_versions(
self,
dataset_id: Optional[ID_TYPE] = None,
*,
dataset_name: Optional[str] = None,
from_version: Union[str, datetime.datetime],
to_version: Union[str, datetime.datetime],
) -> ls_schemas.DatasetDiffInfo:
"""Get the difference between two versions of a dataset.
Parameters
----------
dataset_id : str or None, default=None
The ID of the dataset.
dataset_name : str or None, default=None
The name of the dataset.
from_version : str or datetime.datetime
The starting version for the diff.
to_version : str or datetime.datetime
The ending version for the diff.
Returns:
-------
DatasetDiffInfo
The difference between the two versions of the dataset.
Examples:
--------
.. code-block:: python
# Get the difference between two tagged versions of a dataset
from_version = "prod"
to_version = "dev"
diff = client.diff_dataset_versions(
dataset_name="my-dataset",
from_version=from_version,
to_version=to_version,
)
print(diff)
# Get the difference between two timestamped versions of a dataset
from_version = datetime.datetime(2024, 1, 1)
to_version = datetime.datetime(2024, 2, 1)
diff = client.diff_dataset_versions(
dataset_name="my-dataset",
from_version=from_version,
to_version=to_version,
)
print(diff)
"""
if dataset_id is None:
if dataset_name is None:
raise ValueError("Must provide either dataset name or ID")
dataset_id = self.read_dataset(dataset_name=dataset_name).id
dsid = _as_uuid(dataset_id, "dataset_id")
response = self.request_with_retries(
"GET",
f"/datasets/{dsid}/versions/diff",
headers=self._headers,
params={
"from_version": (
from_version.isoformat()
if isinstance(from_version, datetime.datetime)
else from_version
),
"to_version": (
to_version.isoformat()
if isinstance(to_version, datetime.datetime)
else to_version
),
},
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.DatasetDiffInfo(**response.json())
def read_dataset_openai_finetuning(
self, dataset_id: Optional[str] = None, *, dataset_name: Optional[str] = None
) -> list:
"""Download a dataset in OpenAI Jsonl format and load it as a list of dicts.
Parameters
----------
dataset_id : str
The ID of the dataset to download.
dataset_name : str
The name of the dataset to download.
Returns:
-------
list
The dataset loaded as a list of dicts.
"""
path = "/datasets"
if dataset_id is not None:
pass
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
else:
raise ValueError("Must provide dataset_name or dataset_id")
response = self.request_with_retries(
"GET",
f"{path}/{_as_uuid(dataset_id, 'dataset_id')}/openai_ft",
)
dataset = [json.loads(line) for line in response.text.strip().split("\n")]
return dataset
def list_datasets(
self,
*,
dataset_ids: Optional[List[ID_TYPE]] = None,
data_type: Optional[str] = None,
dataset_name: Optional[str] = None,
dataset_name_contains: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
limit: Optional[int] = None,
) -> Iterator[ls_schemas.Dataset]:
"""List the datasets on the LangSmith API.
Yields:
-------
Dataset
The datasets.
"""
params: Dict[str, Any] = {
"limit": min(limit, 100) if limit is not None else 100
}
if dataset_ids is not None:
params["id"] = dataset_ids
if data_type is not None:
params["data_type"] = data_type
if dataset_name is not None:
params["name"] = dataset_name
if dataset_name_contains is not None:
params["name_contains"] = dataset_name_contains
if metadata is not None:
params["metadata"] = json.dumps(metadata)
for i, dataset in enumerate(
self._get_paginated_list("/datasets", params=params)
):
yield ls_schemas.Dataset(
**dataset,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
if limit is not None and i + 1 >= limit:
break
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def delete_dataset(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
) -> None:
"""Delete a dataset from the LangSmith API.
Parameters
----------
dataset_id : UUID or None, default=None
The ID of the dataset to delete.
dataset_name : str or None, default=None
The name of the dataset to delete.
"""
if dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if dataset_id is None:
raise ValueError("Must provide either dataset name or ID")
response = self.request_with_retries(
"DELETE",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def update_dataset_tag(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
as_of: datetime.datetime,
tag: str,
) -> None:
"""Update the tags of a dataset.
If the tag is already assigned to a different version of this dataset,
the tag will be moved to the new version. The as_of parameter is used to
determine which version of the dataset to apply the new tags to.
It must be an exact version of the dataset to succeed. You can
use the read_dataset_version method to find the exact version
to apply the tags to.
Parameters
----------
dataset_id : UUID
The ID of the dataset to update.
as_of : datetime.datetime
The timestamp of the dataset to apply the new tags to.
tag : str
The new tag to apply to the dataset.
Examples:
--------
.. code-block:: python
dataset_name = "my-dataset"
# Get the version of a dataset <= a given timestamp
dataset_version = client.read_dataset_version(
dataset_name=dataset_name, as_of=datetime.datetime(2024, 1, 1)
)
# Assign that version a new tag
client.update_dataset_tags(
dataset_name="my-dataset",
as_of=dataset_version.as_of,
tag="prod",
)
"""
if dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if dataset_id is None:
raise ValueError("Must provide either dataset name or ID")
response = self.request_with_retries(
"PUT",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/tags",
headers=self._headers,
json={
"as_of": as_of.isoformat(),
"tag": tag,
},
)
ls_utils.raise_for_status_with_text(response)
def list_dataset_versions(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
search: Optional[str] = None,
limit: Optional[int] = None,
) -> Iterator[ls_schemas.DatasetVersion]:
"""List dataset versions.
Args:
dataset_id (Optional[ID_TYPE]): The ID of the dataset.
dataset_name (Optional[str]): The name of the dataset.
search (Optional[str]): The search query.
limit (Optional[int]): The maximum number of versions to return.
Returns:
Iterator[ls_schemas.DatasetVersion]: An iterator of dataset versions.
"""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params = {
"search": search,
"limit": min(limit, 100) if limit is not None else 100,
}
for i, version in enumerate(
self._get_paginated_list(
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/versions",
params=params,
)
):
yield ls_schemas.DatasetVersion(**version)
if limit is not None and i + 1 >= limit:
break
def read_dataset_version(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
as_of: Optional[datetime.datetime] = None,
tag: Optional[str] = None,
) -> ls_schemas.DatasetVersion:
"""Get dataset version by as_of or exact tag.
Ues this to resolve the nearest version to a given timestamp or for a given tag.
Args:
dataset_id (Optional[ID_TYPE]): The ID of the dataset.
dataset_name (Optional[str]): The name of the dataset.
as_of (Optional[datetime.datetime]): The timestamp of the dataset
to retrieve.
tag (Optional[str]): The tag of the dataset to retrieve.
Returns:
ls_schemas.DatasetVersion: The dataset version.
Examples:
---------
.. code-block:: python
# Get the latest version of a dataset
client.read_dataset_version(dataset_name="my-dataset", tag="latest")
# Get the version of a dataset <= a given timestamp
client.read_dataset_version(
dataset_name="my-dataset",
as_of=datetime.datetime(2024, 1, 1),
)
# Get the version of a dataset with a specific tag
client.read_dataset_version(dataset_name="my-dataset", tag="prod")
"""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
if (as_of and tag) or (as_of is None and tag is None):
raise ValueError("Exactly one of as_of and tag must be specified.")
response = self.request_with_retries(
"GET",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/version",
params={"as_of": as_of, "tag": tag},
)
return ls_schemas.DatasetVersion(**response.json())
def clone_public_dataset(
self,
token_or_url: str,
*,
source_api_url: Optional[str] = None,
dataset_name: Optional[str] = None,
) -> ls_schemas.Dataset:
"""Clone a public dataset to your own langsmith tenant.
This operation is idempotent. If you already have a dataset with the given name,
this function will do nothing.
Args:
token_or_url (str): The token of the public dataset to clone.
source_api_url: The URL of the langsmith server where the data is hosted.
Defaults to the API URL of your current client.
dataset_name (str): The name of the dataset to create in your tenant.
Defaults to the name of the public dataset.
"""
source_api_url = source_api_url or self.api_url
source_api_url, token_uuid = _parse_token_or_url(token_or_url, source_api_url)
source_client = Client(
# Placeholder API key not needed anymore in most cases, but
# some private deployments may have API key-based rate limiting
# that would cause this to fail if we provide no value.
api_url=source_api_url,
api_key="placeholder",
)
ds = source_client.read_shared_dataset(token_uuid)
dataset_name = dataset_name or ds.name
try:
ds = self.read_dataset(dataset_name=dataset_name)
logger.info(
f"Dataset {dataset_name} already exists in your tenant. Skipping."
)
return ds
except ls_utils.LangSmithNotFoundError:
pass
try:
# Fetch examples first
examples = list(source_client.list_shared_examples(token_uuid))
dataset = self.create_dataset(
dataset_name=dataset_name,
description=ds.description,
data_type=ds.data_type or ls_schemas.DataType.kv,
inputs_schema=ds.inputs_schema,
outputs_schema=ds.outputs_schema,
transformations=ds.transformations,
)
try:
self.create_examples(
inputs=[e.inputs for e in examples],
outputs=[e.outputs for e in examples],
dataset_id=dataset.id,
)
except BaseException as e:
# Let's not do automatic clean up for now in case there might be
# some other reasons why create_examples fails (i.e., not network issue
# or keyboard interrupt).
# The risk is that this is an existing dataset that has valid examples
# populated from another source so we don't want to delete it.
logger.error(
f"An error occurred while creating dataset {dataset_name}. "
"You should delete it manually."
)
raise e
finally:
del source_client
return dataset
def _get_data_type(self, dataset_id: ID_TYPE) -> ls_schemas.DataType:
dataset = self.read_dataset(dataset_id=dataset_id)
return dataset.data_type
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_llm_example(
self,
prompt: str,
generation: Optional[str] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to an LLM-type dataset."""
return self.create_example(
inputs={"input": prompt},
outputs={"output": generation},
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_chat_example(
self,
messages: List[Union[Mapping[str, Any], ls_schemas.BaseMessageLike]],
generations: Optional[
Union[Mapping[str, Any], ls_schemas.BaseMessageLike]
] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to a Chat-type dataset."""
final_input = []
for message in messages:
if ls_utils.is_base_message_like(message):
final_input.append(
ls_utils.convert_langchain_message(
cast(ls_schemas.BaseMessageLike, message)
)
)
else:
final_input.append(cast(dict, message))
final_generations = None
if generations is not None:
if ls_utils.is_base_message_like(generations):
final_generations = ls_utils.convert_langchain_message(
cast(ls_schemas.BaseMessageLike, generations)
)
else:
final_generations = cast(dict, generations)
return self.create_example(
inputs={"input": final_input},
outputs=(
{"output": final_generations} if final_generations is not None else None
),
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
def create_example_from_run(
self,
run: ls_schemas.Run,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
) -> ls_schemas.Example:
"""Add an example (row) to a dataset from a run."""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
dataset_name = None # Nested call expects only 1 defined
dataset_type = self._get_data_type_cached(dataset_id)
if dataset_type == ls_schemas.DataType.llm:
if run.run_type != "llm":
raise ValueError(
f"Run type {run.run_type} is not supported"
" for dataset of type 'LLM'"
)
try:
prompt = ls_utils.get_prompt_from_inputs(run.inputs)
except ValueError:
raise ValueError(
"Error converting LLM run inputs to prompt for run"
f" {run.id} with inputs {run.inputs}"
)
inputs: Dict[str, Any] = {"input": prompt}
if not run.outputs:
outputs: Optional[Dict[str, Any]] = None
else:
try:
generation = ls_utils.get_llm_generation_from_outputs(run.outputs)
except ValueError:
raise ValueError(
"Error converting LLM run outputs to generation for run"
f" {run.id} with outputs {run.outputs}"
)
outputs = {"output": generation}
elif dataset_type == ls_schemas.DataType.chat:
if run.run_type != "llm":
raise ValueError(
f"Run type {run.run_type} is not supported"
" for dataset of type 'chat'"
)
try:
inputs = {"input": ls_utils.get_messages_from_inputs(run.inputs)}
except ValueError:
raise ValueError(
"Error converting LLM run inputs to chat messages for run"
f" {run.id} with inputs {run.inputs}"
)
if not run.outputs:
outputs = None
else:
try:
outputs = {
"output": ls_utils.get_message_generation_from_outputs(
run.outputs
)
}
except ValueError:
raise ValueError(
"Error converting LLM run outputs to chat generations"
f" for run {run.id} with outputs {run.outputs}"
)
elif dataset_type == ls_schemas.DataType.kv:
# Anything goes
inputs = run.inputs
outputs = run.outputs
else:
raise ValueError(f"Dataset type {dataset_type} not recognized.")
return self.create_example(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
dataset_name=dataset_name,
created_at=created_at,
)
def create_examples(
self,
*,
inputs: Sequence[Mapping[str, Any]],
outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None,
metadata: Optional[Sequence[Optional[Mapping[str, Any]]]] = None,
splits: Optional[Sequence[Optional[str | List[str]]]] = None,
source_run_ids: Optional[Sequence[Optional[ID_TYPE]]] = None,
ids: Optional[Sequence[Optional[ID_TYPE]]] = None,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Create examples in a dataset.
Parameters
----------
inputs : Sequence[Mapping[str, Any]]
The input values for the examples.
outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None
The output values for the examples.
metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None
The metadata for the examples.
splits : Optional[Sequence[Optional[str | List[str]]]], default=None
The splits for the examples, which are divisions
of your dataset such as 'train', 'test', or 'validation'.
source_run_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None
The IDs of the source runs associated with the examples.
ids : Optional[Sequence[ID_TYPE]], default=None
The IDs of the examples.
dataset_id : Optional[ID_TYPE], default=None
The ID of the dataset to create the examples in.
dataset_name : Optional[str], default=None
The name of the dataset to create the examples in.
"""
if dataset_id is None and dataset_name is None:
raise ValueError("Either dataset_id or dataset_name must be provided.")
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
sequence_args = {
"outputs": outputs,
"metadata": metadata,
"splits": splits,
"ids": ids,
"source_run_ids": source_run_ids,
}
# Since inputs are required, we will check against them
input_len = len(inputs)
for arg_name, arg_value in sequence_args.items():
if arg_value is not None and len(arg_value) != input_len:
raise ValueError(
f"Length of {arg_name} ({len(arg_value)}) does not match"
f" length of inputs ({input_len})"
)
examples = [
{
"inputs": in_,
"outputs": out_,
"dataset_id": dataset_id,
"metadata": metadata_,
"split": split_,
"id": id_ or str(uuid.uuid4()),
"source_run_id": source_run_id_,
}
for in_, out_, metadata_, split_, id_, source_run_id_ in zip(
inputs,
outputs or [None] * len(inputs),
metadata or [None] * len(inputs),
splits or [None] * len(inputs),
ids or [None] * len(inputs),
source_run_ids or [None] * len(inputs),
)
]
response = self.request_with_retries(
"POST",
"/examples/bulk",
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json(examples),
)
ls_utils.raise_for_status_with_text(response)
@ls_utils.xor_args(("dataset_id", "dataset_name"))
def create_example(
self,
inputs: Mapping[str, Any],
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
outputs: Optional[Mapping[str, Any]] = None,
metadata: Optional[Mapping[str, Any]] = None,
split: Optional[str | List[str]] = None,
example_id: Optional[ID_TYPE] = None,
source_run_id: Optional[ID_TYPE] = None,
) -> ls_schemas.Example:
"""Create a dataset example in the LangSmith API.
Examples are rows in a dataset, containing the inputs
and expected outputs (or other reference information)
for a model or chain.
Args:
inputs : Mapping[str, Any]
The input values for the example.
dataset_id : UUID or None, default=None
The ID of the dataset to create the example in.
dataset_name : str or None, default=None
The name of the dataset to create the example in.
created_at : datetime or None, default=None
The creation timestamp of the example.
outputs : Mapping[str, Any] or None, default=None
The output values for the example.
metadata : Mapping[str, Any] or None, default=None
The metadata for the example.
split : str or List[str] or None, default=None
The splits for the example, which are divisions
of your dataset such as 'train', 'test', or 'validation'.
example_id : UUID or None, default=None
The ID of the example to create. If not provided, a new
example will be created.
source_run_id : UUID or None, default=None
The ID of the source run associated with this example.
Returns:
Example: The created example.
"""
if dataset_id is None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"inputs": inputs,
"outputs": outputs,
"dataset_id": dataset_id,
"metadata": metadata,
"split": split,
"source_run_id": source_run_id,
}
if created_at:
data["created_at"] = created_at.isoformat()
data["id"] = example_id or str(uuid.uuid4())
response = self.request_with_retries(
"POST",
"/examples",
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json({k: v for k, v in data.items() if v is not None}),
)
ls_utils.raise_for_status_with_text(response)
result = response.json()
return ls_schemas.Example(
**result,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
def read_example(
self, example_id: ID_TYPE, *, as_of: Optional[datetime.datetime] = None
) -> ls_schemas.Example:
"""Read an example from the LangSmith API.
Args:
example_id (UUID): The ID of the example to read.
Returns:
Example: The example.
"""
response = self.request_with_retries(
"GET",
f"/examples/{_as_uuid(example_id, 'example_id')}",
params={
"as_of": as_of.isoformat() if as_of else None,
},
)
return ls_schemas.Example(
**response.json(),
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
def list_examples(
self,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
example_ids: Optional[Sequence[ID_TYPE]] = None,
as_of: Optional[Union[datetime.datetime, str]] = None,
splits: Optional[Sequence[str]] = None,
inline_s3_urls: bool = True,
*,
offset: int = 0,
limit: Optional[int] = None,
metadata: Optional[dict] = None,
filter: Optional[str] = None,
**kwargs: Any,
) -> Iterator[ls_schemas.Example]:
"""Retrieve the example rows of the specified dataset.
Args:
dataset_id (UUID, optional): The ID of the dataset to filter by.
Defaults to None.
dataset_name (str, optional): The name of the dataset to filter by.
Defaults to None.
example_ids (List[UUID], optional): The IDs of the examples to filter by.
Defaults to None.
as_of (datetime, str, or optional): The dataset version tag OR
timestamp to retrieve the examples as of.
Response examples will only be those that were present at the time
of the tagged (or timestamped) version.
splits (List[str], optional): A list of dataset splits, which are
divisions of your dataset such as 'train', 'test', or 'validation'.
Returns examples only from the specified splits.
inline_s3_urls (bool, optional): Whether to inline S3 URLs.
Defaults to True.
offset (int): The offset to start from. Defaults to 0.
limit (int, optional): The maximum number of examples to return.
filter (str, optional): A structured fileter string to apply to
the examples.
Yields:
Example: The examples.
"""
params: Dict[str, Any] = {
**kwargs,
"offset": offset,
"id": example_ids,
"as_of": (
as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of
),
"splits": splits,
"inline_s3_urls": inline_s3_urls,
"limit": min(limit, 100) if limit is not None else 100,
"filter": filter,
}
if metadata is not None:
params["metadata"] = _dumps_json(metadata)
if dataset_id is not None:
params["dataset"] = dataset_id
elif dataset_name is not None:
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params["dataset"] = dataset_id
else:
pass
for i, example in enumerate(
self._get_paginated_list("/examples", params=params)
):
yield ls_schemas.Example(
**example,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
if limit is not None and i + 1 >= limit:
break
@warn_beta
def index_dataset(
self,
*,
dataset_id: ID_TYPE,
tag: str = "latest",
**kwargs: Any,
) -> None:
"""Enable dataset indexing. Examples are indexed by their inputs.
This enables searching for similar examples by inputs with
``client.similar_examples()``.
Args:
dataset_id (UUID): The ID of the dataset to index.
tag (str, optional): The version of the dataset to index. If 'latest'
then any updates to the dataset (additions, updates, deletions of
examples) will be reflected in the index.
Returns:
None
Raises:
requests.HTTPError
""" # noqa: E501
dataset_id = _as_uuid(dataset_id, "dataset_id")
resp = self.request_with_retries(
"POST",
f"/datasets/{dataset_id}/index",
headers=self._headers,
data=json.dumps({"tag": tag, **kwargs}),
)
ls_utils.raise_for_status_with_text(resp)
# NOTE: dataset_name arg explicitly not supported to avoid extra API calls.
@warn_beta
def similar_examples(
self,
inputs: dict,
/,
*,
limit: int,
dataset_id: ID_TYPE,
filter: Optional[str] = None,
**kwargs: Any,
) -> List[ls_schemas.ExampleSearch]:
r"""Retrieve the dataset examples whose inputs best match the current inputs.
**Note**: Must have few-shot indexing enabled for the dataset. See
`client.index_dataset()`.
Args:
inputs (dict): The inputs to use as a search query. Must match the dataset
input schema. Must be JSON serializable.
limit (int): The maximum number of examples to return.
dataset_id (str or UUID): The ID of the dataset to search over.
filter (str, optional): A filter string to apply to the search results. Uses
the same syntax as the `filter` parameter in `list_runs()`. Only a subset
of operations are supported. Defaults to None.
For example, you can use ``and(eq(metadata.some_tag, 'some_value'), neq(metadata.env, 'dev'))``
to filter only examples where some_tag has some_value, and the environment is not dev.
kwargs (Any): Additional keyword args to pass as part of request body.
Examples:
.. code-block:: python
from langsmith import Client
client = Client()
client.similar_examples(
{"question": "When would i use the runnable generator"},
limit=3,
dataset_id="...",
)
.. code-block:: pycon
[
ExampleSearch(
inputs={'question': 'How do I cache a Chat model? What caches can I use?'},
outputs={'answer': 'You can use LangChain\'s caching layer for Chat Models. This can save you money by reducing the number of API calls you make to the LLM provider, if you\'re often requesting the same completion multiple times, and speed up your application.\n\nfrom langchain.cache import InMemoryCache\nlangchain.llm_cache = InMemoryCache()\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\')\n\nYou can also use SQLite Cache which uses a SQLite database:\n\nrm .langchain.db\n\nfrom langchain.cache import SQLiteCache\nlangchain.llm_cache = SQLiteCache(database_path=".langchain.db")\n\n# The first time, it is not yet in cache, so it should take longer\nllm.predict(\'Tell me a joke\') \n'},
metadata=None,
id=UUID('b2ddd1c4-dff6-49ae-8544-f48e39053398'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
ExampleSearch(
inputs={'question': "What's a runnable lambda?"},
outputs={'answer': "A runnable lambda is an object that implements LangChain's `Runnable` interface and runs a callbale (i.e., a function). Note the function must accept a single argument."},
metadata=None,
id=UUID('f94104a7-2434-4ba7-8293-6a283f4860b4'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
ExampleSearch(
inputs={'question': 'Show me how to use RecursiveURLLoader'},
outputs={'answer': 'The RecursiveURLLoader comes from the langchain.document_loaders.recursive_url_loader module. Here\'s an example of how to use it:\n\nfrom langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader\n\n# Create an instance of RecursiveUrlLoader with the URL you want to load\nloader = RecursiveUrlLoader(url="https://example.com")\n\n# Load all child links from the URL page\nchild_links = loader.load()\n\n# Print the child links\nfor link in child_links:\n print(link)\n\nMake sure to replace "https://example.com" with the actual URL you want to load. The load() method returns a list of child links found on the URL page. You can iterate over this list to access each child link.'},
metadata=None,
id=UUID('0308ea70-a803-4181-a37d-39e95f138f8c'),
dataset_id=UUID('01b6ce0f-bfb6-4f48-bbb8-f19272135d40')
),
]
"""
dataset_id = _as_uuid(dataset_id, "dataset_id")
req = {
"inputs": inputs,
"limit": limit,
**kwargs,
}
if filter is not None:
req["filter"] = filter
resp = self.request_with_retries(
"POST",
f"/datasets/{dataset_id}/search",
headers=self._headers,
data=json.dumps(req),
)
ls_utils.raise_for_status_with_text(resp)
examples = []
for ex in resp.json()["examples"]:
examples.append(ls_schemas.ExampleSearch(**ex, dataset_id=dataset_id))
return examples
def update_example(
self,
example_id: ID_TYPE,
*,
inputs: Optional[Dict[str, Any]] = None,
outputs: Optional[Mapping[str, Any]] = None,
metadata: Optional[Dict] = None,
split: Optional[str | List[str]] = None,
dataset_id: Optional[ID_TYPE] = None,
) -> Dict[str, Any]:
"""Update a specific example.
Parameters
----------
example_id : str or UUID
The ID of the example to update.
inputs : Dict[str, Any] or None, default=None
The input values to update.
outputs : Mapping[str, Any] or None, default=None
The output values to update.
metadata : Dict or None, default=None
The metadata to update.
split : str or List[str] or None, default=None
The dataset split to update, such as
'train', 'test', or 'validation'.
dataset_id : UUID or None, default=None
The ID of the dataset to update.
Returns:
-------
Dict[str, Any]
The updated example.
"""
example = dict(
inputs=inputs,
outputs=outputs,
dataset_id=dataset_id,
metadata=metadata,
split=split,
)
response = self.request_with_retries(
"PATCH",
f"/examples/{_as_uuid(example_id, 'example_id')}",
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json({k: v for k, v in example.items() if v is not None}),
)
ls_utils.raise_for_status_with_text(response)
return response.json()
def update_examples(
self,
*,
example_ids: Sequence[ID_TYPE],
inputs: Optional[Sequence[Optional[Dict[str, Any]]]] = None,
outputs: Optional[Sequence[Optional[Mapping[str, Any]]]] = None,
metadata: Optional[Sequence[Optional[Dict]]] = None,
splits: Optional[Sequence[Optional[str | List[str]]]] = None,
dataset_ids: Optional[Sequence[Optional[ID_TYPE]]] = None,
) -> Dict[str, Any]:
"""Update multiple examples.
Parameters
----------
example_ids : Sequence[ID_TYPE]
The IDs of the examples to update.
inputs : Optional[Sequence[Optional[Dict[str, Any]]], default=None
The input values for the examples.
outputs : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None
The output values for the examples.
metadata : Optional[Sequence[Optional[Mapping[str, Any]]]], default=None
The metadata for the examples.
split : Optional[Sequence[Optional[str | List[str]]]], default=None
The splits for the examples, which are divisions
of your dataset such as 'train', 'test', or 'validation'.
dataset_ids : Optional[Sequence[Optional[ID_TYPE]]], default=None
The IDs of the datasets to move the examples to.
Returns:
-------
Dict[str, Any]
The response from the server (specifies the number of examples updated).
"""
sequence_args = {
"inputs": inputs,
"outputs": outputs,
"metadata": metadata,
"splits": splits,
"dataset_ids": dataset_ids,
}
# Since inputs are required, we will check against them
examples_len = len(example_ids)
for arg_name, arg_value in sequence_args.items():
if arg_value is not None and len(arg_value) != examples_len:
raise ValueError(
f"Length of {arg_name} ({len(arg_value)}) does not match"
f" length of examples ({examples_len})"
)
examples = [
{
"id": id_,
"inputs": in_,
"outputs": out_,
"dataset_id": dataset_id_,
"metadata": metadata_,
"split": split_,
}
for id_, in_, out_, metadata_, split_, dataset_id_ in zip(
example_ids,
inputs or [None] * len(example_ids),
outputs or [None] * len(example_ids),
metadata or [None] * len(example_ids),
splits or [None] * len(example_ids),
dataset_ids or [None] * len(example_ids),
)
]
response = self.request_with_retries(
"PATCH",
"/examples/bulk",
headers={**self._headers, "Content-Type": "application/json"},
data=(
_dumps_json(
[
{k: v for k, v in example.items() if v is not None}
for example in examples
]
)
),
)
ls_utils.raise_for_status_with_text(response)
return response.json()
def delete_example(self, example_id: ID_TYPE) -> None:
"""Delete an example by ID.
Parameters
----------
example_id : str or UUID
The ID of the example to delete.
"""
response = self.request_with_retries(
"DELETE",
f"/examples/{_as_uuid(example_id, 'example_id')}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def list_dataset_splits(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
as_of: Optional[Union[str, datetime.datetime]] = None,
) -> List[str]:
"""Get the splits for a dataset.
Args:
dataset_id (ID_TYPE): The ID of the dataset.
as_of (Optional[Union[str, datetime.datetime]], optional): The version
of the dataset to retrieve splits for. Can be a timestamp or a
string tag. Defaults to "latest".
Returns:
List[str]: The names of this dataset's.
"""
if dataset_id is None:
if dataset_name is None:
raise ValueError("Must provide dataset name or ID")
dataset_id = self.read_dataset(dataset_name=dataset_name).id
params = {}
if as_of is not None:
params["as_of"] = (
as_of.isoformat() if isinstance(as_of, datetime.datetime) else as_of
)
response = self.request_with_retries(
"GET",
f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits",
params=params,
)
ls_utils.raise_for_status_with_text(response)
return response.json()
def update_dataset_splits(
self,
*,
dataset_id: Optional[ID_TYPE] = None,
dataset_name: Optional[str] = None,
split_name: str,
example_ids: List[ID_TYPE],
remove: bool = False,
) -> None:
"""Update the splits for a dataset.
Args:
dataset_id (ID_TYPE): The ID of the dataset to update.
split_name (str): The name of the split to update.
example_ids (List[ID_TYPE]): The IDs of the examples to add to or
remove from the split.
remove (bool, optional): If True, remove the examples from the split.
If False, add the examples to the split. Defaults to False.
Returns:
None
"""
if dataset_id is None:
if dataset_name is None:
raise ValueError("Must provide dataset name or ID")
dataset_id = self.read_dataset(dataset_name=dataset_name).id
data = {
"split_name": split_name,
"examples": [
str(_as_uuid(id_, f"example_ids[{i}]"))
for i, id_ in enumerate(example_ids)
],
"remove": remove,
}
response = self.request_with_retries(
"PUT", f"/datasets/{_as_uuid(dataset_id, 'dataset_id')}/splits", json=data
)
ls_utils.raise_for_status_with_text(response)
def _resolve_run_id(
self,
run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID],
load_child_runs: bool,
) -> ls_schemas.Run:
"""Resolve the run ID.
Parameters
----------
run : Run or RunBase or str or UUID
The run to resolve.
load_child_runs : bool
Whether to load child runs.
Returns:
-------
Run
The resolved run.
Raises:
------
TypeError
If the run type is invalid.
"""
if isinstance(run, (str, uuid.UUID)):
run_ = self.read_run(run, load_child_runs=load_child_runs)
else:
run_ = cast(ls_schemas.Run, run)
return run_
def _resolve_example_id(
self,
example: Union[ls_schemas.Example, str, uuid.UUID, dict, None],
run: ls_schemas.Run,
) -> Optional[ls_schemas.Example]:
"""Resolve the example ID.
Parameters
----------
example : Example or str or UUID or dict or None
The example to resolve.
run : Run
The run associated with the example.
Returns:
-------
Example or None
The resolved example.
"""
if isinstance(example, (str, uuid.UUID)):
reference_example_ = self.read_example(example)
elif isinstance(example, ls_schemas.Example):
reference_example_ = example
elif isinstance(example, dict):
reference_example_ = ls_schemas.Example(
**example,
_host_url=self._host_url,
_tenant_id=self._get_optional_tenant_id(),
)
elif run.reference_example_id is not None:
reference_example_ = self.read_example(run.reference_example_id)
else:
reference_example_ = None
return reference_example_
def _select_eval_results(
self,
results: Union[
ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict
],
*,
fn_name: Optional[str] = None,
) -> List[ls_evaluator.EvaluationResult]:
from langsmith.evaluation import evaluator as ls_evaluator # noqa: F811
def _cast_result(
single_result: Union[ls_evaluator.EvaluationResult, dict],
) -> ls_evaluator.EvaluationResult:
if isinstance(single_result, dict):
return ls_evaluator.EvaluationResult(
**{
"key": fn_name,
"comment": single_result.get("reasoning"),
**single_result,
}
)
return single_result
def _is_eval_results(results: Any) -> TypeGuard[ls_evaluator.EvaluationResults]:
return isinstance(results, dict) and "results" in results
if isinstance(results, ls_evaluator.EvaluationResult):
results_ = [results]
elif _is_eval_results(results):
results_ = [_cast_result(r) for r in results["results"]]
elif isinstance(results, dict):
results_ = [_cast_result(cast(dict, results))]
else:
raise ValueError(
f"Invalid evaluation results type: {type(results)}."
" Must be EvaluationResult, EvaluationResults."
)
return results_
def evaluate_run(
self,
run: Union[ls_schemas.Run, ls_schemas.RunBase, str, uuid.UUID],
evaluator: ls_evaluator.RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[
Union[ls_schemas.Example, str, dict, uuid.UUID]
] = None,
load_child_runs: bool = False,
) -> ls_evaluator.EvaluationResult:
"""Evaluate a run.
Parameters
----------
run : Run or RunBase or str or UUID
The run to evaluate.
evaluator : RunEvaluator
The evaluator to use.
source_info : Dict[str, Any] or None, default=None
Additional information about the source of the evaluation to log
as feedback metadata.
reference_example : Example or str or dict or UUID or None, default=None
The example to use as a reference for the evaluation.
If not provided, the run's reference example will be used.
load_child_runs : bool, default=False
Whether to load child runs when resolving the run ID.
Returns:
-------
Feedback
The feedback object created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
evaluator_response = evaluator.evaluate_run(
run_,
example=reference_example_,
)
results = self._log_evaluation_feedback(
evaluator_response,
run_,
source_info=source_info,
)
# TODO: Return all results
return results[0]
def _log_evaluation_feedback(
self,
evaluator_response: Union[
ls_evaluator.EvaluationResult, ls_evaluator.EvaluationResults, dict
],
run: Optional[ls_schemas.Run] = None,
source_info: Optional[Dict[str, Any]] = None,
project_id: Optional[ID_TYPE] = None,
*,
_executor: Optional[cf.ThreadPoolExecutor] = None,
) -> List[ls_evaluator.EvaluationResult]:
results = self._select_eval_results(evaluator_response)
def _submit_feedback(**kwargs):
if _executor:
_executor.submit(self.create_feedback, **kwargs)
else:
self.create_feedback(**kwargs)
for res in results:
source_info_ = source_info or {}
if res.evaluator_info:
source_info_ = {**res.evaluator_info, **source_info_}
run_id_ = None
if res.target_run_id:
run_id_ = res.target_run_id
elif run is not None:
run_id_ = run.id
_submit_feedback(
run_id=run_id_,
key=res.key,
score=res.score,
value=res.value,
comment=res.comment,
correction=res.correction,
source_info=source_info_,
source_run_id=res.source_run_id,
feedback_config=cast(
Optional[ls_schemas.FeedbackConfig], res.feedback_config
),
feedback_source_type=ls_schemas.FeedbackSourceType.MODEL,
project_id=project_id,
extra=res.extra,
trace_id=run.trace_id if run else None,
)
return results
async def aevaluate_run(
self,
run: Union[ls_schemas.Run, str, uuid.UUID],
evaluator: ls_evaluator.RunEvaluator,
*,
source_info: Optional[Dict[str, Any]] = None,
reference_example: Optional[
Union[ls_schemas.Example, str, dict, uuid.UUID]
] = None,
load_child_runs: bool = False,
) -> ls_evaluator.EvaluationResult:
"""Evaluate a run asynchronously.
Parameters
----------
run : Run or str or UUID
The run to evaluate.
evaluator : RunEvaluator
The evaluator to use.
source_info : Dict[str, Any] or None, default=None
Additional information about the source of the evaluation to log
as feedback metadata.
reference_example : Optional Example or UUID, default=None
The example to use as a reference for the evaluation.
If not provided, the run's reference example will be used.
load_child_runs : bool, default=False
Whether to load child runs when resolving the run ID.
Returns:
-------
EvaluationResult
The evaluation result object created by the evaluation.
"""
run_ = self._resolve_run_id(run, load_child_runs=load_child_runs)
reference_example_ = self._resolve_example_id(reference_example, run_)
evaluator_response = await evaluator.aevaluate_run(
run_,
example=reference_example_,
)
# TODO: Return all results and use async API
results = self._log_evaluation_feedback(
evaluator_response,
run_,
source_info=source_info,
)
return results[0]
def create_feedback(
self,
run_id: Optional[ID_TYPE],
key: str,
*,
score: Union[float, int, bool, None] = None,
value: Union[str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
source_info: Optional[Dict[str, Any]] = None,
feedback_source_type: Union[
ls_schemas.FeedbackSourceType, str
] = ls_schemas.FeedbackSourceType.API,
source_run_id: Optional[ID_TYPE] = None,
feedback_id: Optional[ID_TYPE] = None,
feedback_config: Optional[ls_schemas.FeedbackConfig] = None,
stop_after_attempt: int = 10,
project_id: Optional[ID_TYPE] = None,
comparative_experiment_id: Optional[ID_TYPE] = None,
feedback_group_id: Optional[ID_TYPE] = None,
extra: Optional[Dict] = None,
trace_id: Optional[ID_TYPE] = None,
**kwargs: Any,
) -> ls_schemas.Feedback:
"""Create a feedback in the LangSmith API.
Parameters
----------
run_id : str or UUID
The ID of the run to provide feedback for. Either the run_id OR
the project_id must be provided.
trace_id : str or UUID
The trace ID of the run to provide feedback for. This is optional.
key : str
The name of the metric or 'aspect' this feedback is about.
score : float or int or bool or None, default=None
The score to rate this run on the metric or aspect.
value : float or int or bool or str or dict or None, default=None
The display value or non-numeric value for this feedback.
correction : dict or None, default=None
The proper ground truth for this run.
comment : str or None, default=None
A comment about this feedback, such as a justification for the score or
chain-of-thought trajectory for an LLM judge.
source_info : Dict[str, Any] or None, default=None
Information about the source of this feedback.
feedback_source_type : FeedbackSourceType or str, default=FeedbackSourceType.API
The type of feedback source, such as model (for model-generated feedback)
or API.
source_run_id : str or UUID or None, default=None,
The ID of the run that generated this feedback, if a "model" type.
feedback_id : str or UUID or None, default=None
The ID of the feedback to create. If not provided, a random UUID will be
generated.
feedback_config: langsmith.schemas.FeedbackConfig or None, default=None,
The configuration specifying how to interpret feedback with this key.
Examples include continuous (with min/max bounds), categorical,
or freeform.
stop_after_attempt : int, default=10
The number of times to retry the request before giving up.
project_id : str or UUID
The ID of the project_id to provide feedback on. One - and only one - of
this and run_id must be provided.
comparative_experiment_id : str or UUID
If this feedback was logged as a part of a comparative experiment, this
associates the feedback with that experiment.
feedback_group_id : str or UUID
When logging preferences, ranking runs, or other comparative feedback,
this is used to group feedback together.
extra : dict
Metadata for the feedback.
trace_id: Optional[ID_TYPE] = The trace ID of the run to provide feedback for. Enables batch ingestion.
"""
if run_id is None and project_id is None:
raise ValueError("One of run_id and project_id must be provided")
if run_id is not None and project_id is not None:
raise ValueError("Only one of run_id and project_id must be provided")
if kwargs:
warnings.warn(
"The following arguments are no longer used in the create_feedback"
f" endpoint: {sorted(kwargs)}",
DeprecationWarning,
)
try:
if not isinstance(feedback_source_type, ls_schemas.FeedbackSourceType):
feedback_source_type = ls_schemas.FeedbackSourceType(
feedback_source_type
)
if feedback_source_type == ls_schemas.FeedbackSourceType.API:
feedback_source: ls_schemas.FeedbackSourceBase = (
ls_schemas.APIFeedbackSource(metadata=source_info)
)
elif feedback_source_type == ls_schemas.FeedbackSourceType.MODEL:
feedback_source = ls_schemas.ModelFeedbackSource(metadata=source_info)
else:
raise ValueError(f"Unknown feedback source type {feedback_source_type}")
feedback_source.metadata = (
feedback_source.metadata if feedback_source.metadata is not None else {}
)
if source_run_id is not None and "__run" not in feedback_source.metadata:
feedback_source.metadata["__run"] = {"run_id": str(source_run_id)}
if feedback_source.metadata and "__run" in feedback_source.metadata:
# Validate that the linked run ID is a valid UUID
# Run info may be a base model or dict.
_run_meta: Union[dict, Any] = feedback_source.metadata["__run"]
if hasattr(_run_meta, "dict") and callable(_run_meta):
_run_meta = _run_meta.dict()
if "run_id" in _run_meta:
_run_meta["run_id"] = str(
_as_uuid(
feedback_source.metadata["__run"]["run_id"],
"feedback_source.metadata['__run']['run_id']",
)
)
feedback_source.metadata["__run"] = _run_meta
feedback = ls_schemas.FeedbackCreate(
id=_ensure_uuid(feedback_id),
# If run_id is None, this is interpreted as session-level
# feedback.
run_id=_ensure_uuid(run_id, accept_null=True),
trace_id=_ensure_uuid(trace_id, accept_null=True),
key=key,
score=score,
value=value,
correction=correction,
comment=comment,
feedback_source=feedback_source,
created_at=datetime.datetime.now(datetime.timezone.utc),
modified_at=datetime.datetime.now(datetime.timezone.utc),
feedback_config=feedback_config,
session_id=_ensure_uuid(project_id, accept_null=True),
comparative_experiment_id=_ensure_uuid(
comparative_experiment_id, accept_null=True
),
feedback_group_id=_ensure_uuid(feedback_group_id, accept_null=True),
extra=extra,
)
use_multipart = (self.info.batch_ingest_config or {}).get(
"use_multipart_endpoint", False
)
if (
use_multipart
and self.info.version # TODO: Remove version check once versions have updated
and ls_utils.is_version_greater_or_equal(self.info.version, "0.8.10")
and self.tracing_queue is not None
and feedback.trace_id is not None
):
serialized_op = serialize_feedback_dict(feedback)
self.tracing_queue.put(
TracingQueueItem(str(feedback.id), serialized_op)
)
else:
feedback_block = _dumps_json(feedback.dict(exclude_none=True))
self.request_with_retries(
"POST",
"/feedback",
request_kwargs={
"data": feedback_block,
},
stop_after_attempt=stop_after_attempt,
retry_on=(ls_utils.LangSmithNotFoundError,),
)
return ls_schemas.Feedback(**feedback.dict())
except Exception as e:
logger.error("Error creating feedback", exc_info=True)
raise e
def update_feedback(
self,
feedback_id: ID_TYPE,
*,
score: Union[float, int, bool, None] = None,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
) -> None:
"""Update a feedback in the LangSmith API.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to update.
score : float or int or bool or None, default=None
The score to update the feedback with.
value : float or int or bool or str or dict or None, default=None
The value to update the feedback with.
correction : dict or None, default=None
The correction to update the feedback with.
comment : str or None, default=None
The comment to update the feedback with.
"""
feedback_update: Dict[str, Any] = {}
if score is not None:
feedback_update["score"] = score
if value is not None:
feedback_update["value"] = value
if correction is not None:
feedback_update["correction"] = correction
if comment is not None:
feedback_update["comment"] = comment
response = self.request_with_retries(
"PATCH",
f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}",
headers={**self._headers, "Content-Type": "application/json"},
data=_dumps_json(feedback_update),
)
ls_utils.raise_for_status_with_text(response)
def read_feedback(self, feedback_id: ID_TYPE) -> ls_schemas.Feedback:
"""Read a feedback from the LangSmith API.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to read.
Returns:
-------
Feedback
The feedback.
"""
response = self.request_with_retries(
"GET",
f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}",
)
return ls_schemas.Feedback(**response.json())
def list_feedback(
self,
*,
run_ids: Optional[Sequence[ID_TYPE]] = None,
feedback_key: Optional[Sequence[str]] = None,
feedback_source_type: Optional[Sequence[ls_schemas.FeedbackSourceType]] = None,
limit: Optional[int] = None,
**kwargs: Any,
) -> Iterator[ls_schemas.Feedback]:
"""List the feedback objects on the LangSmith API.
Parameters
----------
run_ids : List[str or UUID] or None, default=None
The IDs of the runs to filter by.
feedback_key: List[str] or None, default=None
The feedback key(s) to filter by. Example: 'correctness'
The query performs a union of all feedback keys.
feedback_source_type: List[FeedbackSourceType] or None, default=None
The type of feedback source, such as model
(for model-generated feedback) or API.
limit : int or None, default=None
**kwargs : Any
Additional keyword arguments.
Yields:
------
Feedback
The feedback objects.
"""
params: dict = {
"run": run_ids,
"limit": min(limit, 100) if limit is not None else 100,
**kwargs,
}
if feedback_key is not None:
params["key"] = feedback_key
if feedback_source_type is not None:
params["source"] = feedback_source_type
for i, feedback in enumerate(
self._get_paginated_list("/feedback", params=params)
):
yield ls_schemas.Feedback(**feedback)
if limit is not None and i + 1 >= limit:
break
def delete_feedback(self, feedback_id: ID_TYPE) -> None:
"""Delete a feedback by ID.
Parameters
----------
feedback_id : str or UUID
The ID of the feedback to delete.
"""
response = self.request_with_retries(
"DELETE",
f"/feedback/{_as_uuid(feedback_id, 'feedback_id')}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def create_feedback_from_token(
self,
token_or_url: Union[str, uuid.UUID],
score: Union[float, int, bool, None] = None,
*,
value: Union[float, int, bool, str, dict, None] = None,
correction: Union[dict, None] = None,
comment: Union[str, None] = None,
metadata: Optional[dict] = None,
) -> None:
"""Create feedback from a presigned token or URL.
Args:
token_or_url (Union[str, uuid.UUID]): The token or URL from which to create
feedback.
score (Union[float, int, bool, None], optional): The score of the feedback.
Defaults to None.
value (Union[float, int, bool, str, dict, None], optional): The value of the
feedback. Defaults to None.
correction (Union[dict, None], optional): The correction of the feedback.
Defaults to None.
comment (Union[str, None], optional): The comment of the feedback. Defaults
to None.
metadata (Optional[dict], optional): Additional metadata for the feedback.
Defaults to None.
Raises:
ValueError: If the source API URL is invalid.
Returns:
None: This method does not return anything.
"""
source_api_url, token_uuid = _parse_token_or_url(
token_or_url, self.api_url, num_parts=1
)
if source_api_url != self.api_url:
raise ValueError(f"Invalid source API URL. {source_api_url}")
response = self.request_with_retries(
"POST",
f"/feedback/tokens/{_as_uuid(token_uuid)}",
data=_dumps_json(
{
"score": score,
"value": value,
"correction": correction,
"comment": comment,
"metadata": metadata,
# TODO: Add ID once the API supports it.
}
),
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
def create_presigned_feedback_token(
self,
run_id: ID_TYPE,
feedback_key: str,
*,
expiration: Optional[datetime.datetime | datetime.timedelta] = None,
feedback_config: Optional[ls_schemas.FeedbackConfig] = None,
feedback_id: Optional[ID_TYPE] = None,
) -> ls_schemas.FeedbackIngestToken:
"""Create a pre-signed URL to send feedback data to.
This is useful for giving browser-based clients a way to upload
feedback data directly to LangSmith without accessing the
API key.
Args:
run_id:
feedback_key:
expiration: The expiration time of the pre-signed URL.
Either a datetime or a timedelta offset from now.
Default to 3 hours.
feedback_config: FeedbackConfig or None.
If creating a feedback_key for the first time,
this defines how the metric should be interpreted,
such as a continuous score (w/ optional bounds),
or distribution over categorical values.
feedback_id: The ID of the feedback to create. If not provided, a new
feedback will be created.
Returns:
The pre-signed URL for uploading feedback data.
"""
body: Dict[str, Any] = {
"run_id": run_id,
"feedback_key": feedback_key,
"feedback_config": feedback_config,
"id": feedback_id or str(uuid.uuid4()),
}
if expiration is None:
body["expires_in"] = ls_schemas.TimeDeltaInput(
days=0,
hours=3,
minutes=0,
)
elif isinstance(expiration, datetime.datetime):
body["expires_at"] = expiration.isoformat()
elif isinstance(expiration, datetime.timedelta):
body["expires_in"] = ls_schemas.TimeDeltaInput(
days=expiration.days,
hours=expiration.seconds // 3600,
minutes=(expiration.seconds // 60) % 60,
)
else:
raise ValueError(f"Unknown expiration type: {type(expiration)}")
response = self.request_with_retries(
"POST",
"/feedback/tokens",
data=_dumps_json(body),
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.FeedbackIngestToken(**response.json())
def create_presigned_feedback_tokens(
self,
run_id: ID_TYPE,
feedback_keys: Sequence[str],
*,
expiration: Optional[datetime.datetime | datetime.timedelta] = None,
feedback_configs: Optional[
Sequence[Optional[ls_schemas.FeedbackConfig]]
] = None,
) -> Sequence[ls_schemas.FeedbackIngestToken]:
"""Create a pre-signed URL to send feedback data to.
This is useful for giving browser-based clients a way to upload
feedback data directly to LangSmith without accessing the
API key.
Args:
run_id:
feedback_key:
expiration: The expiration time of the pre-signed URL.
Either a datetime or a timedelta offset from now.
Default to 3 hours.
feedback_config: FeedbackConfig or None.
If creating a feedback_key for the first time,
this defines how the metric should be interpreted,
such as a continuous score (w/ optional bounds),
or distribution over categorical values.
Returns:
The pre-signed URL for uploading feedback data.
"""
# validate
if feedback_configs is not None and len(feedback_keys) != len(feedback_configs):
raise ValueError(
"The length of feedback_keys and feedback_configs must be the same."
)
if not feedback_configs:
feedback_configs = [None] * len(feedback_keys)
# build expiry option
expires_in, expires_at = None, None
if expiration is None:
expires_in = ls_schemas.TimeDeltaInput(
days=0,
hours=3,
minutes=0,
)
elif isinstance(expiration, datetime.datetime):
expires_at = expiration.isoformat()
elif isinstance(expiration, datetime.timedelta):
expires_in = ls_schemas.TimeDeltaInput(
days=expiration.days,
hours=expiration.seconds // 3600,
minutes=(expiration.seconds // 60) % 60,
)
else:
raise ValueError(f"Unknown expiration type: {type(expiration)}")
# assemble body, one entry per key
body = _dumps_json(
[
{
"run_id": run_id,
"feedback_key": feedback_key,
"feedback_config": feedback_config,
"expires_in": expires_in,
"expires_at": expires_at,
}
for feedback_key, feedback_config in zip(
feedback_keys, feedback_configs
)
]
)
def req(api_url: str, api_key: Optional[str]) -> list:
response = self.request_with_retries(
"POST",
f"{api_url}/feedback/tokens",
request_kwargs={
"data": body,
"headers": {
**self._headers,
X_API_KEY: api_key or self.api_key,
},
},
)
ls_utils.raise_for_status_with_text(response)
return response.json()
tokens = []
with cf.ThreadPoolExecutor(max_workers=len(self._write_api_urls)) as executor:
futs = [
executor.submit(req, api_url, api_key)
for api_url, api_key in self._write_api_urls.items()
]
for fut in cf.as_completed(futs):
response = fut.result()
tokens.extend(
[ls_schemas.FeedbackIngestToken(**part) for part in response]
)
return tokens
def list_presigned_feedback_tokens(
self,
run_id: ID_TYPE,
*,
limit: Optional[int] = None,
) -> Iterator[ls_schemas.FeedbackIngestToken]:
"""List the feedback ingest tokens for a run.
Args:
run_id: The ID of the run to filter by.
limit: The maximum number of tokens to return.
Yields:
FeedbackIngestToken
The feedback ingest tokens.
"""
params = {
"run_id": _as_uuid(run_id, "run_id"),
"limit": min(limit, 100) if limit is not None else 100,
}
for i, token in enumerate(
self._get_paginated_list("/feedback/tokens", params=params)
):
yield ls_schemas.FeedbackIngestToken(**token)
if limit is not None and i + 1 >= limit:
break
# Annotation Queue API
def list_annotation_queues(
self,
*,
queue_ids: Optional[List[ID_TYPE]] = None,
name: Optional[str] = None,
name_contains: Optional[str] = None,
limit: Optional[int] = None,
) -> Iterator[ls_schemas.AnnotationQueue]:
"""List the annotation queues on the LangSmith API.
Args:
queue_ids : List[str or UUID] or None, default=None
The IDs of the queues to filter by.
name : str or None, default=None
The name of the queue to filter by.
name_contains : str or None, default=None
The substring that the queue name should contain.
limit : int or None, default=None
Yields:
AnnotationQueue
The annotation queues.
"""
params: dict = {
"ids": (
[_as_uuid(id_, f"queue_ids[{i}]") for i, id_ in enumerate(queue_ids)]
if queue_ids is not None
else None
),
"name": name,
"name_contains": name_contains,
"limit": min(limit, 100) if limit is not None else 100,
}
for i, queue in enumerate(
self._get_paginated_list("/annotation-queues", params=params)
):
yield ls_schemas.AnnotationQueue(
**queue,
)
if limit is not None and i + 1 >= limit:
break
def create_annotation_queue(
self,
*,
name: str,
description: Optional[str] = None,
queue_id: Optional[ID_TYPE] = None,
) -> ls_schemas.AnnotationQueue:
"""Create an annotation queue on the LangSmith API.
Args:
name : str
The name of the annotation queue.
description : str, optional
The description of the annotation queue.
queue_id : str or UUID, optional
The ID of the annotation queue.
Returns:
AnnotationQueue
The created annotation queue object.
"""
body = {
"name": name,
"description": description,
"id": queue_id or str(uuid.uuid4()),
}
response = self.request_with_retries(
"POST",
"/annotation-queues",
json={k: v for k, v in body.items() if v is not None},
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.AnnotationQueue(
**response.json(),
)
def read_annotation_queue(self, queue_id: ID_TYPE) -> ls_schemas.AnnotationQueue:
"""Read an annotation queue with the specified queue ID.
Args:
queue_id (ID_TYPE): The ID of the annotation queue to read.
Returns:
ls_schemas.AnnotationQueue: The annotation queue object.
"""
# TODO: Replace when actual endpoint is added
return next(self.list_annotation_queues(queue_ids=[queue_id]))
def update_annotation_queue(
self, queue_id: ID_TYPE, *, name: str, description: Optional[str] = None
) -> None:
"""Update an annotation queue with the specified queue_id.
Args:
queue_id (ID_TYPE): The ID of the annotation queue to update.
name (str): The new name for the annotation queue.
description (Optional[str], optional): The new description for the
annotation queue. Defaults to None.
"""
response = self.request_with_retries(
"PATCH",
f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}",
json={
"name": name,
"description": description,
},
)
ls_utils.raise_for_status_with_text(response)
def delete_annotation_queue(self, queue_id: ID_TYPE) -> None:
"""Delete an annotation queue with the specified queue ID.
Args:
queue_id (ID_TYPE): The ID of the annotation queue to delete.
"""
response = self.request_with_retries(
"DELETE",
f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}",
headers={"Accept": "application/json", **self._headers},
)
ls_utils.raise_for_status_with_text(response)
def add_runs_to_annotation_queue(
self, queue_id: ID_TYPE, *, run_ids: List[ID_TYPE]
) -> None:
"""Add runs to an annotation queue with the specified queue ID.
Args:
queue_id (ID_TYPE): The ID of the annotation queue.
run_ids (List[ID_TYPE]): The IDs of the runs to be added to the annotation
queue.
"""
response = self.request_with_retries(
"POST",
f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/runs",
json=[str(_as_uuid(id_, f"run_ids[{i}]")) for i, id_ in enumerate(run_ids)],
)
ls_utils.raise_for_status_with_text(response)
def delete_run_from_annotation_queue(
self, queue_id: ID_TYPE, *, run_id: ID_TYPE
) -> None:
"""Delete a run from an annotation queue with the specified queue ID and run ID.
Args:
queue_id (ID_TYPE): The ID of the annotation queue.
run_id (ID_TYPE): The ID of the run to be added to the annotation
queue.
"""
response = self.request_with_retries(
"DELETE",
f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/runs/{_as_uuid(run_id, 'run_id')}",
)
ls_utils.raise_for_status_with_text(response)
def get_run_from_annotation_queue(
self, queue_id: ID_TYPE, *, index: int
) -> ls_schemas.RunWithAnnotationQueueInfo:
"""Get a run from an annotation queue at the specified index.
Args:
queue_id (ID_TYPE): The ID of the annotation queue.
index (int): The index of the run to retrieve.
Returns:
ls_schemas.RunWithAnnotationQueueInfo: The run at the specified index.
Raises:
ls_utils.LangSmithNotFoundError: If the run is not found at the given index.
ls_utils.LangSmithError: For other API-related errors.
"""
base_url = f"/annotation-queues/{_as_uuid(queue_id, 'queue_id')}/run"
response = self.request_with_retries(
"GET",
f"{base_url}/{index}",
headers=self._headers,
)
ls_utils.raise_for_status_with_text(response)
return ls_schemas.RunWithAnnotationQueueInfo(**response.json())
def create_comparative_experiment(
self,
name: str,
experiments: Sequence[ID_TYPE],
*,
reference_dataset: Optional[ID_TYPE] = None,
description: Optional[str] = None,
created_at: Optional[datetime.datetime] = None,
metadata: Optional[Dict[str, Any]] = None,
id: Optional[ID_TYPE] = None,
) -> ls_schemas.ComparativeExperiment:
"""Create a comparative experiment on the LangSmith API.
These experiments compare 2 or more experiment results over a shared dataset.
Args:
name: The name of the comparative experiment.
experiments: The IDs of the experiments to compare.
reference_dataset: The ID of the dataset these experiments are compared on.
description: The description of the comparative experiment.
created_at: The creation time of the comparative experiment.
metadata: Additional metadata for the comparative experiment.
Returns:
The created comparative experiment object.
"""
if not experiments:
raise ValueError("At least one experiment is required.")
if reference_dataset is None:
# Get one of the experiments' reference dataset
reference_dataset = self.read_project(
project_id=experiments[0]
).reference_dataset_id
if not reference_dataset:
raise ValueError("A reference dataset is required.")
body: Dict[str, Any] = {
"id": id or str(uuid.uuid4()),
"name": name,
"experiment_ids": experiments,
"reference_dataset_id": reference_dataset,
"description": description,
"created_at": created_at or datetime.datetime.now(datetime.timezone.utc),
"extra": {},
}
if metadata is not None:
body["extra"]["metadata"] = metadata
ser = _dumps_json({k: v for k, v in body.items()}) # if v is not None})
response = self.request_with_retries(
"POST",
"/datasets/comparative",
request_kwargs={
"data": ser,
},
)
ls_utils.raise_for_status_with_text(response)
response_d = response.json()
return ls_schemas.ComparativeExperiment(**response_d)
async def arun_on_dataset(
self,
dataset_name: str,
llm_or_chain_factory: Any,
*,
evaluation: Optional[Any] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
dataset_version: Optional[Union[datetime.datetime, str]] = None,
verbose: bool = False,
input_mapper: Optional[Callable[[Dict], Any]] = None,
revision_id: Optional[str] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Asynchronously run the Chain or language model on a dataset.
.. deprecated:: 0.1.0
This method is deprecated. Use :func:`langsmith.aevaluate` instead.
""" # noqa: E501
warnings.warn(
"The `arun_on_dataset` method is deprecated and"
" will be removed in a future version."
"Please use the `aevaluate` method instead.",
DeprecationWarning,
)
try:
from langchain.smith import arun_on_dataset as _arun_on_dataset
except ImportError:
raise ImportError(
"The client.arun_on_dataset function requires the langchain"
"package to run.\nInstall with pip install langchain"
)
return await _arun_on_dataset(
dataset_name=dataset_name,
llm_or_chain_factory=llm_or_chain_factory,
client=self,
evaluation=evaluation,
concurrency_level=concurrency_level,
project_name=project_name,
project_metadata=project_metadata,
verbose=verbose,
input_mapper=input_mapper,
revision_id=revision_id,
dataset_version=dataset_version,
**kwargs,
)
def run_on_dataset(
self,
dataset_name: str,
llm_or_chain_factory: Any,
*,
evaluation: Optional[Any] = None,
concurrency_level: int = 5,
project_name: Optional[str] = None,
project_metadata: Optional[Dict[str, Any]] = None,
dataset_version: Optional[Union[datetime.datetime, str]] = None,
verbose: bool = False,
input_mapper: Optional[Callable[[Dict], Any]] = None,
revision_id: Optional[str] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Run the Chain or language model on a dataset.
.. deprecated:: 0.1.0
This method is deprecated. Use :func:`langsmith.aevaluate` instead.
""" # noqa: E501 # noqa: E501
warnings.warn(
"The `run_on_dataset` method is deprecated and"
" will be removed in a future version."
"Please use the `evaluate` method instead.",
DeprecationWarning,
)
try:
from langchain.smith import (
run_on_dataset as _run_on_dataset, # type: ignore
)
except ImportError:
raise ImportError(
"The client.run_on_dataset function requires the langchain"
"package to run.\nInstall with pip install langchain"
)
return _run_on_dataset(
dataset_name=dataset_name,
llm_or_chain_factory=llm_or_chain_factory,
concurrency_level=concurrency_level,
client=self,
evaluation=evaluation,
project_name=project_name,
project_metadata=project_metadata,
verbose=verbose,
input_mapper=input_mapper,
revision_id=revision_id,
dataset_version=dataset_version,
**kwargs,
)
def _current_tenant_is_owner(self, owner: str) -> bool:
"""Check if the current workspace has the same handle as owner.
Args:
owner (str): The owner to check against.
Returns:
bool: True if the current tenant is the owner, False otherwise.
"""
settings = self._get_settings()
return owner == "-" or settings.tenant_handle == owner
def _owner_conflict_error(
self, action: str, owner: str
) -> ls_utils.LangSmithUserError:
return ls_utils.LangSmithUserError(
f"Cannot {action} for another tenant.\n"
f"Current tenant: {self._get_settings().tenant_handle},\n"
f"Requested tenant: {owner}"
)
def _get_latest_commit_hash(
self, prompt_owner_and_name: str, limit: int = 1, offset: int = 0
) -> Optional[str]:
"""Get the latest commit hash for a prompt.
Args:
prompt_owner_and_name (str): The owner and name of the prompt.
limit (int): The maximum number of commits to fetch. Defaults to 1.
offset (int): The number of commits to skip. Defaults to 0.
Returns:
Optional[str]: The latest commit hash, or None if no commits are found.
"""
response = self.request_with_retries(
"GET",
f"/commits/{prompt_owner_and_name}/",
params={"limit": limit, "offset": offset},
)
commits = response.json()["commits"]
return commits[0]["commit_hash"] if commits else None
def _like_or_unlike_prompt(
self, prompt_identifier: str, like: bool
) -> Dict[str, int]:
"""Like or unlike a prompt.
Args:
prompt_identifier (str): The identifier of the prompt.
like (bool): True to like the prompt, False to unlike it.
Returns:
A dictionary with the key 'likes' and the count of likes as the value.
Raises:
requests.exceptions.HTTPError: If the prompt is not found or
another error occurs.
"""
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
response = self.request_with_retries(
"POST", f"/likes/{owner}/{prompt_name}", json={"like": like}
)
response.raise_for_status()
return response.json()
def _get_prompt_url(self, prompt_identifier: str) -> str:
"""Get a URL for a prompt.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
str: The URL for the prompt.
"""
owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(
prompt_identifier
)
if not self._current_tenant_is_owner(owner):
return f"{self._host_url}/hub/{owner}/{prompt_name}:{commit_hash[:8]}"
settings = self._get_settings()
return (
f"{self._host_url}/prompts/{prompt_name}/{commit_hash[:8]}"
f"?organizationId={settings.id}"
)
def _prompt_exists(self, prompt_identifier: str) -> bool:
"""Check if a prompt exists.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
bool: True if the prompt exists, False otherwise.
"""
prompt = self.get_prompt(prompt_identifier)
return True if prompt else False
def like_prompt(self, prompt_identifier: str) -> Dict[str, int]:
"""Like a prompt.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
A dictionary with the key 'likes' and the count of likes as the value.
"""
return self._like_or_unlike_prompt(prompt_identifier, like=True)
def unlike_prompt(self, prompt_identifier: str) -> Dict[str, int]:
"""Unlike a prompt.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
A dictionary with the key 'likes' and the count of likes as the value.
"""
return self._like_or_unlike_prompt(prompt_identifier, like=False)
def list_prompts(
self,
*,
limit: int = 100,
offset: int = 0,
is_public: Optional[bool] = None,
is_archived: Optional[bool] = False,
sort_field: ls_schemas.PromptSortField = ls_schemas.PromptSortField.updated_at,
sort_direction: Literal["desc", "asc"] = "desc",
query: Optional[str] = None,
) -> ls_schemas.ListPromptsResponse:
"""List prompts with pagination.
Args:
limit (int): The maximum number of prompts to return. Defaults to 100.
offset (int): The number of prompts to skip. Defaults to 0.
is_public (Optional[bool]): Filter prompts by if they are public.
is_archived (Optional[bool]): Filter prompts by if they are archived.
sort_field (ls_schemas.PromptsSortField): The field to sort by.
Defaults to "updated_at".
sort_direction (Literal["desc", "asc"]): The order to sort by.
Defaults to "desc".
query (Optional[str]): Filter prompts by a search query.
Returns:
ls_schemas.ListPromptsResponse: A response object containing
the list of prompts.
"""
params = {
"limit": limit,
"offset": offset,
"is_public": (
"true" if is_public else "false" if is_public is not None else None
),
"is_archived": "true" if is_archived else "false",
"sort_field": sort_field,
"sort_direction": sort_direction,
"query": query,
"match_prefix": "true" if query else None,
}
response = self.request_with_retries("GET", "/repos/", params=params)
return ls_schemas.ListPromptsResponse(**response.json())
def get_prompt(self, prompt_identifier: str) -> Optional[ls_schemas.Prompt]:
"""Get a specific prompt by its identifier.
Args:
prompt_identifier (str): The identifier of the prompt.
The identifier should be in the format "prompt_name" or "owner/prompt_name".
Returns:
Optional[ls_schemas.Prompt]: The prompt object.
Raises:
requests.exceptions.HTTPError: If the prompt is not found or
another error occurs.
"""
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
try:
response = self.request_with_retries("GET", f"/repos/{owner}/{prompt_name}")
return ls_schemas.Prompt(**response.json()["repo"])
except ls_utils.LangSmithNotFoundError:
return None
def create_prompt(
self,
prompt_identifier: str,
*,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
is_public: bool = False,
) -> ls_schemas.Prompt:
"""Create a new prompt.
Does not attach prompt object, just creates an empty prompt.
Args:
prompt_name (str): The name of the prompt.
description (Optional[str]): A description of the prompt.
readme (Optional[str]): A readme for the prompt.
tags (Optional[Sequence[str]]): A list of tags for the prompt.
is_public (bool): Whether the prompt should be public. Defaults to False.
Returns:
ls_schemas.Prompt: The created prompt object.
Raises:
ValueError: If the current tenant is not the owner.
HTTPError: If the server request fails.
"""
settings = self._get_settings()
if is_public and not settings.tenant_handle:
raise ls_utils.LangSmithUserError(
"Cannot create a public prompt without first\n"
"creating a LangChain Hub handle. "
"You can add a handle by creating a public prompt at:\n"
"https://smith.langchain.com/prompts"
)
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
if not self._current_tenant_is_owner(owner=owner):
raise self._owner_conflict_error("create a prompt", owner)
json: Dict[str, Union[str, bool, Sequence[str]]] = {
"repo_handle": prompt_name,
"description": description or "",
"readme": readme or "",
"tags": tags or [],
"is_public": is_public,
}
response = self.request_with_retries("POST", "/repos/", json=json)
response.raise_for_status()
return ls_schemas.Prompt(**response.json()["repo"])
def create_commit(
self,
prompt_identifier: str,
object: Any,
*,
parent_commit_hash: Optional[str] = None,
) -> str:
"""Create a commit for an existing prompt.
Args:
prompt_identifier (str): The identifier of the prompt.
object (Any): The LangChain object to commit.
parent_commit_hash (Optional[str]): The hash of the parent commit.
Defaults to latest commit.
Returns:
str: The url of the prompt commit.
Raises:
HTTPError: If the server request fails.
ValueError: If the prompt does not exist.
"""
if not self._prompt_exists(prompt_identifier):
raise ls_utils.LangSmithNotFoundError(
"Prompt does not exist, you must create it first."
)
try:
from langchain_core.load.dump import dumps
except ImportError:
raise ImportError(
"The client.create_commit function requires the langchain_core"
"package to run.\nInstall with `pip install langchain_core`"
)
json_object = dumps(object)
manifest_dict = json.loads(json_object)
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
prompt_owner_and_name = f"{owner}/{prompt_name}"
if parent_commit_hash == "latest" or parent_commit_hash is None:
parent_commit_hash = self._get_latest_commit_hash(prompt_owner_and_name)
request_dict = {"parent_commit": parent_commit_hash, "manifest": manifest_dict}
response = self.request_with_retries(
"POST", f"/commits/{prompt_owner_and_name}", json=request_dict
)
commit_hash = response.json()["commit"]["commit_hash"]
return self._get_prompt_url(f"{prompt_owner_and_name}:{commit_hash}")
def update_prompt(
self,
prompt_identifier: str,
*,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
is_public: Optional[bool] = None,
is_archived: Optional[bool] = None,
) -> Dict[str, Any]:
"""Update a prompt's metadata.
To update the content of a prompt, use push_prompt or create_commit instead.
Args:
prompt_identifier (str): The identifier of the prompt to update.
description (Optional[str]): New description for the prompt.
readme (Optional[str]): New readme for the prompt.
tags (Optional[Sequence[str]]): New list of tags for the prompt.
is_public (Optional[bool]): New public status for the prompt.
is_archived (Optional[bool]): New archived status for the prompt.
Returns:
Dict[str, Any]: The updated prompt data as returned by the server.
Raises:
ValueError: If the prompt_identifier is empty.
HTTPError: If the server request fails.
"""
settings = self._get_settings()
if is_public and not settings.tenant_handle:
raise ValueError(
"Cannot create a public prompt without first\n"
"creating a LangChain Hub handle. "
"You can add a handle by creating a public prompt at:\n"
"https://smith.langchain.com/prompts"
)
json: Dict[str, Union[str, bool, Sequence[str]]] = {}
if description is not None:
json["description"] = description
if readme is not None:
json["readme"] = readme
if is_public is not None:
json["is_public"] = is_public
if is_archived is not None:
json["is_archived"] = is_archived
if tags is not None:
json["tags"] = tags
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
response = self.request_with_retries(
"PATCH", f"/repos/{owner}/{prompt_name}", json=json
)
response.raise_for_status()
return response.json()
def delete_prompt(self, prompt_identifier: str) -> None:
"""Delete a prompt.
Args:
prompt_identifier (str): The identifier of the prompt to delete.
Returns:
bool: True if the prompt was successfully deleted, False otherwise.
Raises:
ValueError: If the current tenant is not the owner of the prompt.
"""
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
if not self._current_tenant_is_owner(owner):
raise self._owner_conflict_error("delete a prompt", owner)
response = self.request_with_retries("DELETE", f"/repos/{owner}/{prompt_name}")
response.raise_for_status()
def pull_prompt_commit(
self,
prompt_identifier: str,
*,
include_model: Optional[bool] = False,
) -> ls_schemas.PromptCommit:
"""Pull a prompt object from the LangSmith API.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
ls_schemas.PromptObject: The prompt object.
Raises:
ValueError: If no commits are found for the prompt.
"""
owner, prompt_name, commit_hash = ls_utils.parse_prompt_identifier(
prompt_identifier
)
try:
use_optimization = ls_utils.is_version_greater_or_equal(
self.info.version, "0.5.23"
)
except ValueError:
logger.exception(
"Failed to parse LangSmith API version. Defaulting to using optimization."
)
use_optimization = True
if not use_optimization and commit_hash == "latest":
latest_commit_hash = self._get_latest_commit_hash(f"{owner}/{prompt_name}")
if latest_commit_hash is None:
raise ValueError("No commits found")
else:
commit_hash = latest_commit_hash
response = self.request_with_retries(
"GET",
(
f"/commits/{owner}/{prompt_name}/{commit_hash}"
f"{'?include_model=true' if include_model else ''}"
),
)
return ls_schemas.PromptCommit(
**{"owner": owner, "repo": prompt_name, **response.json()}
)
def list_prompt_commits(
self,
prompt_identifier: str,
*,
limit: Optional[int] = None,
offset: int = 0,
include_model: bool = False,
) -> Iterator[ls_schemas.ListedPromptCommit]:
"""List commits for a given prompt.
Args:
prompt_identifier (str): The identifier of the prompt in the format 'owner/repo_name'.
limit (Optional[int], optional): The maximum number of commits to return. If None, returns all commits. Defaults to None.
offset (int, optional): The number of commits to skip before starting to return results. Defaults to 0.
include_model (bool, optional): Whether to include the model information in the commit data. Defaults to False.
Returns:
Iterator[ls_schemas.ListedPromptCommit]: An iterator of ListedPromptCommit objects representing the commits.
Yields:
ls_schemas.ListedPromptCommit: A ListedPromptCommit object for each commit.
Note:
This method uses pagination to retrieve commits. It will make multiple API calls if necessary to retrieve all commits
or up to the specified limit.
"""
owner, prompt_name, _ = ls_utils.parse_prompt_identifier(prompt_identifier)
params = {
"limit": min(100, limit) if limit is not None else limit,
"offset": offset,
"include_model": include_model,
}
i = 0
while True:
params["offset"] = offset
response = self.request_with_retries(
"GET",
f"/commits/{owner}/{prompt_name}/",
params=params,
)
val = response.json()
items = val["commits"]
total = val["total"]
if not items:
break
for it in items:
if limit is not None and i >= limit:
return # Stop iteration if we've reached the limit
yield ls_schemas.ListedPromptCommit(
**{"owner": owner, "repo": prompt_name, **it}
)
i += 1
offset += len(items)
if offset >= total:
break
def pull_prompt(
self, prompt_identifier: str, *, include_model: Optional[bool] = False
) -> Any:
"""Pull a prompt and return it as a LangChain PromptTemplate.
This method requires `langchain_core`.
Args:
prompt_identifier (str): The identifier of the prompt.
Returns:
Any: The prompt object in the specified format.
"""
try:
from langchain_core.language_models.base import BaseLanguageModel
from langchain_core.load.load import loads
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import RunnableBinding, RunnableSequence
except ImportError:
raise ImportError(
"The client.pull_prompt function requires the langchain_core"
"package to run.\nInstall with `pip install langchain_core`"
)
try:
from langchain_core._api import suppress_langchain_beta_warning
except ImportError:
@contextlib.contextmanager
def suppress_langchain_beta_warning():
yield
prompt_object = self.pull_prompt_commit(
prompt_identifier, include_model=include_model
)
with suppress_langchain_beta_warning():
prompt = loads(json.dumps(prompt_object.manifest))
if (
isinstance(prompt, BasePromptTemplate)
or isinstance(prompt, RunnableSequence)
and isinstance(prompt.first, BasePromptTemplate)
):
prompt_template = (
prompt
if isinstance(prompt, BasePromptTemplate)
else (
prompt.first
if isinstance(prompt, RunnableSequence)
and isinstance(prompt.first, BasePromptTemplate)
else None
)
)
if prompt_template is None:
raise ls_utils.LangSmithError(
"Prompt object is not a valid prompt template."
)
if prompt_template.metadata is None:
prompt_template.metadata = {}
prompt_template.metadata.update(
{
"lc_hub_owner": prompt_object.owner,
"lc_hub_repo": prompt_object.repo,
"lc_hub_commit_hash": prompt_object.commit_hash,
}
)
if (
include_model
and isinstance(prompt, RunnableSequence)
and isinstance(prompt.first, StructuredPrompt)
# Make forward-compatible in case we let update the response type
and (
len(prompt.steps) == 2 and not isinstance(prompt.last, BaseOutputParser)
)
):
if isinstance(prompt.last, RunnableBinding) and isinstance(
prompt.last.bound, BaseLanguageModel
):
seq = cast(RunnableSequence, prompt.first | prompt.last.bound)
if len(seq.steps) == 3: # prompt | bound llm | output parser
rebound_llm = seq.steps[1]
prompt = RunnableSequence(
prompt.first,
rebound_llm.bind(**{**prompt.last.kwargs}),
seq.last,
)
else:
prompt = seq # Not sure
elif isinstance(prompt.last, BaseLanguageModel):
prompt: RunnableSequence = prompt.first | prompt.last # type: ignore[no-redef, assignment]
else:
pass
return prompt
def push_prompt(
self,
prompt_identifier: str,
*,
object: Optional[Any] = None,
parent_commit_hash: str = "latest",
is_public: Optional[bool] = None,
description: Optional[str] = None,
readme: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
) -> str:
"""Push a prompt to the LangSmith API.
Can be used to update prompt metadata or prompt content.
If the prompt does not exist, it will be created.
If the prompt exists, it will be updated.
Args:
prompt_identifier (str): The identifier of the prompt.
object (Optional[Any]): The LangChain object to push.
parent_commit_hash (str): The parent commit hash.
Defaults to "latest".
is_public (Optional[bool]): Whether the prompt should be public.
If None (default), the current visibility status is maintained for existing prompts.
For new prompts, None defaults to private.
Set to True to make public, or False to make private.
description (Optional[str]): A description of the prompt.
Defaults to an empty string.
readme (Optional[str]): A readme for the prompt.
Defaults to an empty string.
tags (Optional[Sequence[str]]): A list of tags for the prompt.
Defaults to an empty list.
Returns:
str: The URL of the prompt.
"""
# Create or update prompt metadata
if self._prompt_exists(prompt_identifier):
if any(
param is not None for param in [is_public, description, readme, tags]
):
self.update_prompt(
prompt_identifier,
description=description,
readme=readme,
tags=tags,
is_public=is_public,
)
else:
self.create_prompt(
prompt_identifier,
is_public=is_public if is_public is not None else False,
description=description,
readme=readme,
tags=tags,
)
if object is None:
return self._get_prompt_url(prompt_identifier=prompt_identifier)
# Create a commit with the new manifest
url = self.create_commit(
prompt_identifier,
object,
parent_commit_hash=parent_commit_hash,
)
return url
def cleanup(self) -> None:
"""Manually trigger cleanup of the background thread."""
self._manual_cleanup = True
@overload
def evaluate(
self,
target: Union[TARGET_T, Runnable, EXPERIMENT_T],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[Sequence[EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> ExperimentResults: ...
@overload
def evaluate(
self,
target: Union[Tuple[EXPERIMENT_T, EXPERIMENT_T]],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[Sequence[COMPARATIVE_EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> ComparativeExperimentResults: ...
def evaluate(
self,
target: Union[
TARGET_T, Runnable, EXPERIMENT_T, Tuple[EXPERIMENT_T, EXPERIMENT_T]
],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[
Union[Sequence[EVALUATOR_T], Sequence[COMPARATIVE_EVALUATOR_T]]
] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> Union[ExperimentResults, ComparativeExperimentResults]:
r"""Evaluate a target system on a given dataset.
Args:
target (TARGET_T | Runnable | EXPERIMENT_T | Tuple[EXPERIMENT_T, EXPERIMENT_T]):
The target system or experiment(s) to evaluate. Can be a function
that takes a dict and returns a dict, a langchain Runnable, an
existing experiment ID, or a two-tuple of experiment IDs.
data (DATA_T): The dataset to evaluate on. Can be a dataset name, a list of
examples, or a generator of examples.
evaluators (Sequence[EVALUATOR_T] | Sequence[COMPARATIVE_EVALUATOR_T] | None):
A list of evaluators to run on each example. The evaluator signature
depends on the target type. Default to None.
summary_evaluators (Sequence[SUMMARY_EVALUATOR_T] | None): A list of summary
evaluators to run on the entire dataset. Should not be specified if
comparing two existing experiments. Defaults to None.
metadata (dict | None): Metadata to attach to the experiment.
Defaults to None.
experiment_prefix (str | None): A prefix to provide for your experiment name.
Defaults to None.
description (str | None): A free-form text description for the experiment.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
blocking (bool): Whether to block until the evaluation is complete.
Defaults to True.
num_repetitions (int): The number of times to run the evaluation.
Each item in the dataset will be run and evaluated this many times.
Defaults to 1.
experiment (schemas.TracerSession | None): An existing experiment to
extend. If provided, experiment_prefix is ignored. For advanced
usage only. Should not be specified if target is an existing experiment or
two-tuple fo experiments.
load_nested (bool): Whether to load all child runs for the experiment.
Default is to only load the top-level root runs. Should only be specified
when target is an existing experiment or two-tuple of experiments.
randomize_order (bool): Whether to randomize the order of the outputs for each
evaluation. Default is False. Should only be specified when target is a
two-tuple of existing experiments.
Returns:
ExperimentResults: If target is a function, Runnable, or existing experiment.
ComparativeExperimentResults: If target is a two-tuple of existing experiments.
Examples:
Prepare the dataset:
>>> from langsmith import Client
>>> client = Client()
>>> dataset = client.clone_public_dataset(
... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
... )
>>> dataset_name = "Evaluate Examples"
Basic usage:
>>> def accuracy(outputs: dict, reference_outputs: dict) -> dict:
... # Row-level evaluator for accuracy.
... pred = outputs["response"]
... expected = reference_outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(outputs: list[dict], reference_outputs: list[dict]) -> dict:
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [out["response"].lower() for out in outputs]
... expected = [ref["answer"].lower() for ref in reference_outputs]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
>>> def predict(inputs: dict) -> dict:
... # This can be any function or just an API call to your app.
... return {"response": "Yes"}
>>> results = client.evaluate(
... predict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Evaluating the accuracy of a simple prediction model.",
... metadata={
... "my-prompt-version": "abcd-1234",
... },
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating over only a subset of the examples
>>> experiment_name = results.experiment_name
>>> examples = client.list_examples(dataset_name=dataset_name, limit=5)
>>> results = client.evaluate(
... predict,
... data=examples,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Just testing a subset synchronously.",
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Streaming each prediction to more easily + eagerly debug.
>>> results = client.evaluate(
... predict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... description="I don't even have to block!",
... blocking=False,
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> for i, result in enumerate(results): # doctest: +ELLIPSIS
... pass
Using the `evaluate` API with an off-the-shelf LangChain evaluator:
>>> from langsmith.evaluation import LangChainStringEvaluator
>>> from langchain.chat_models import init_chat_model
>>> def prepare_criteria_data(run: Run, example: Example):
... return {
... "prediction": run.outputs["output"],
... "reference": example.outputs["answer"],
... "input": str(example.inputs),
... }
>>> results = client.evaluate(
... predict,
... data=dataset_name,
... evaluators=[
... accuracy,
... LangChainStringEvaluator("embedding_distance"),
... LangChainStringEvaluator(
... "labeled_criteria",
... config={
... "criteria": {
... "usefulness": "The prediction is useful if it is correct"
... " and/or asks a useful followup question."
... },
... "llm": init_chat_model("gpt-4o"),
... },
... prepare_data=prepare_criteria_data,
... ),
... ],
... description="Evaluating with off-the-shelf LangChain evaluators.",
... summary_evaluators=[precision],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating a LangChain object:
>>> from langchain_core.runnables import chain as as_runnable
>>> @as_runnable
... def nested_predict(inputs):
... return {"response": "Yes"}
>>> @as_runnable
... def lc_predict(inputs):
... return nested_predict.invoke(inputs)
>>> results = client.evaluate(
... lc_predict,
... data=dataset_name,
... evaluators=[accuracy],
... description="This time we're evaluating a LangChain object.",
... summary_evaluators=[precision],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
.. versionadded:: 0.2.0
""" # noqa: E501
from langsmith.evaluation._runner import evaluate as evaluate_
# Need to ignore because it fails when there are too many union types +
# overloads.
return evaluate_( # type: ignore[misc]
target, # type: ignore[arg-type]
data=data,
evaluators=evaluators, # type: ignore[arg-type]
summary_evaluators=summary_evaluators,
metadata=metadata,
experiment_prefix=experiment_prefix,
description=description,
max_concurrency=max_concurrency,
num_repetitions=num_repetitions,
client=self,
blocking=blocking,
experiment=experiment,
upload_results=upload_results,
**kwargs,
)
async def aevaluate(
self,
target: Union[
ATARGET_T,
AsyncIterable[dict],
Runnable,
str,
uuid.UUID,
schemas.TracerSession,
],
/,
data: Union[
DATA_T, AsyncIterable[schemas.Example], Iterable[schemas.Example], None
] = None,
evaluators: Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
blocking: bool = True,
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None,
upload_results: bool = True,
**kwargs: Any,
) -> AsyncExperimentResults:
r"""Evaluate an async target system on a given dataset.
Args:
target (AsyncCallable[[dict], dict] | AsyncIterable[dict] | Runnable | EXPERIMENT_T | Tuple[EXPERIMENT_T, EXPERIMENT_T]):
The target system or experiment(s) to evaluate. Can be an async function
that takes a dict and returns a dict, a langchain Runnable, an
existing experiment ID, or a two-tuple of experiment IDs.
data (Union[DATA_T, AsyncIterable[schemas.Example]]): The dataset to evaluate on. Can be a dataset name, a list of
examples, an async generator of examples, or an async iterable of examples.
evaluators (Optional[Sequence[EVALUATOR_T]]): A list of evaluators to run
on each example. Defaults to None.
summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): A list of summary
evaluators to run on the entire dataset. Defaults to None.
metadata (Optional[dict]): Metadata to attach to the experiment.
Defaults to None.
experiment_prefix (Optional[str]): A prefix to provide for your experiment name.
Defaults to None.
description (Optional[str]): A description of the experiment.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
num_repetitions (int): The number of times to run the evaluation.
Each item in the dataset will be run and evaluated this many times.
Defaults to 1.
blocking (bool): Whether to block until the evaluation is complete.
Defaults to True.
experiment (Optional[schemas.TracerSession]): An existing experiment to
extend. If provided, experiment_prefix is ignored. For advanced
usage only.
load_nested: Whether to load all child runs for the experiment.
Default is to only load the top-level root runs. Should only be specified
when evaluating an existing experiment.
Returns:
AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results.
Environment:
- LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to save time and
cost during testing. Recommended to commit the cache files to your repository
for faster CI/CD runs.
Requires the 'langsmith[vcr]' package to be installed.
Examples:
>>> import asyncio
>>> from langsmith import Client
>>> client = Client()
>>> dataset = client.clone_public_dataset(
... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
... )
>>> dataset_name = "Evaluate Examples"
Basic usage:
>>> def accuracy(outputs: dict, reference_outputs: dict) -> dict:
... # Row-level evaluator for accuracy.
... pred = outputs["resposen"]
... expected = reference_outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(outputs: list[dict], reference_outputs: list[dict]) -> dict:
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [out["response"].lower() for out in outputs]
... expected = [ref["answer"].lower() for ref in reference_outputs]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
>>> async def apredict(inputs: dict) -> dict:
... # This can be any async function or just an API call to your app.
... await asyncio.sleep(0.1)
... return {"response": "Yes"}
>>> results = asyncio.run(
... client.aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Evaluate the accuracy of the model asynchronously.",
... metadata={
... "my-prompt-version": "abcd-1234",
... },
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating over only a subset of the examples using an async generator:
>>> async def example_generator():
... examples = client.list_examples(dataset_name=dataset_name, limit=5)
... for example in examples:
... yield example
>>> results = asyncio.run(
... client.aevaluate(
... apredict,
... data=example_generator(),
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Subset Experiment",
... description="Evaluate a subset of examples asynchronously.",
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Streaming each prediction to more easily + eagerly debug.
>>> results = asyncio.run(
... client.aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Streaming Experiment",
... description="Streaming predictions for debugging.",
... blocking=False,
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> async def aenumerate(iterable):
... async for elem in iterable:
... print(elem)
>>> asyncio.run(aenumerate(results))
Running without concurrency:
>>> results = asyncio.run(
... client.aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment Without Concurrency",
... description="This was run without concurrency.",
... max_concurrency=0,
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Using Async evaluators:
>>> async def helpfulness(outputs: dict) -> dict:
... # Row-level evaluator for helpfulness.
... await asyncio.sleep(5) # Replace with your LLM API call
... return {"score": outputs["output"] == "Yes"}
>>> results = asyncio.run(
... client.aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[helpfulness],
... summary_evaluators=[precision],
... experiment_prefix="My Helpful Experiment",
... description="Applying async evaluators example.",
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
.. versionadded:: 0.2.0
""" # noqa: E501
from langsmith.evaluation._arunner import aevaluate as aevaluate_
return await aevaluate_(
target,
data=data,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
metadata=metadata,
experiment_prefix=experiment_prefix,
description=description,
max_concurrency=max_concurrency,
num_repetitions=num_repetitions,
client=self,
blocking=blocking,
experiment=experiment,
upload_results=upload_results,
**kwargs,
)
def convert_prompt_to_openai_format(
messages: Any,
model_kwargs: Optional[Dict[str, Any]] = None,
) -> dict:
"""Convert a prompt to OpenAI format.
Requires the `langchain_openai` package to be installed.
Args:
messages (Any): The messages to convert.
model_kwargs (Optional[Dict[str, Any]]): Model configuration arguments including
`stop` and any other required arguments. Defaults to None.
Returns:
dict: The prompt in OpenAI format.
Raises:
ImportError: If the `langchain_openai` package is not installed.
ls_utils.LangSmithError: If there is an error during the conversion process.
"""
try:
from langchain_openai import ChatOpenAI # type: ignore
except ImportError:
raise ImportError(
"The convert_prompt_to_openai_format function requires the langchain_openai"
"package to run.\nInstall with `pip install langchain_openai`"
)
openai = ChatOpenAI()
model_kwargs = model_kwargs or {}
stop = model_kwargs.pop("stop", None)
try:
return openai._get_request_payload(messages, stop=stop, **model_kwargs)
except Exception as e:
raise ls_utils.LangSmithError(f"Error converting to OpenAI format: {e}")
def convert_prompt_to_anthropic_format(
messages: Any,
model_kwargs: Optional[Dict[str, Any]] = None,
) -> dict:
"""Convert a prompt to Anthropic format.
Requires the `langchain_anthropic` package to be installed.
Args:
messages (Any): The messages to convert.
model_kwargs (Optional[Dict[str, Any]]):
Model configuration arguments including `model_name` and `stop`.
Defaults to None.
Returns:
dict: The prompt in Anthropic format.
"""
try:
from langchain_anthropic import ChatAnthropic # type: ignore
except ImportError:
raise ImportError(
"The convert_prompt_to_anthropic_format function requires the "
"langchain_anthropic package to run.\n"
"Install with `pip install langchain_anthropic`"
)
model_kwargs = model_kwargs or {}
model_name = model_kwargs.pop("model_name", "claude-3-haiku-20240307")
stop = model_kwargs.pop("stop", None)
timeout = model_kwargs.pop("timeout", None)
anthropic = ChatAnthropic(
model_name=model_name, timeout=timeout, stop=stop, **model_kwargs
)
try:
return anthropic._get_request_payload(messages, stop=stop)
except Exception as e:
raise ls_utils.LangSmithError(f"Error converting to Anthropic format: {e}")
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/anonymizer.py | import re # noqa
import inspect
from abc import abstractmethod
from collections import defaultdict
from typing import Any, Callable, List, Optional, Tuple, TypedDict, Union
class _ExtractOptions(TypedDict):
max_depth: Optional[int]
"""
Maximum depth to traverse to to extract string nodes
"""
class StringNode(TypedDict):
"""String node extracted from the data."""
value: str
"""String value."""
path: List[Union[str, int]]
"""Path to the string node in the data."""
def _extract_string_nodes(data: Any, options: _ExtractOptions) -> List[StringNode]:
max_depth = options.get("max_depth") or 10
queue: List[Tuple[Any, int, List[Union[str, int]]]] = [(data, 0, [])]
result: List[StringNode] = []
while queue:
task = queue.pop(0)
if task is None:
continue
value, depth, path = task
if isinstance(value, (dict, defaultdict)):
if depth >= max_depth:
continue
for key, nested_value in value.items():
queue.append((nested_value, depth + 1, path + [key]))
elif isinstance(value, list):
if depth >= max_depth:
continue
for i, item in enumerate(value):
queue.append((item, depth + 1, path + [i]))
elif isinstance(value, str):
result.append(StringNode(value=value, path=path))
return result
class StringNodeProcessor:
"""Processes a list of string nodes for masking."""
@abstractmethod
def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]:
"""Accept and return a list of string nodes to be masked."""
class ReplacerOptions(TypedDict):
"""Configuration options for replacing sensitive data."""
max_depth: Optional[int]
"""Maximum depth to traverse to to extract string nodes."""
deep_clone: Optional[bool]
"""Deep clone the data before replacing."""
class StringNodeRule(TypedDict):
"""Declarative rule used for replacing sensitive data."""
pattern: re.Pattern
"""Regex pattern to match."""
replace: Optional[str]
"""Replacement value. Defaults to `[redacted]` if not specified."""
class RuleNodeProcessor(StringNodeProcessor):
"""String node processor that uses a list of rules to replace sensitive data."""
rules: List[StringNodeRule]
"""List of rules to apply for replacing sensitive data.
Each rule is a StringNodeRule, which contains a regex pattern to match
and an optional replacement string.
"""
def __init__(self, rules: List[StringNodeRule]):
"""Initialize the processor with a list of rules."""
self.rules = [
{
"pattern": (
rule["pattern"]
if isinstance(rule["pattern"], re.Pattern)
else re.compile(rule["pattern"])
),
"replace": (
rule["replace"]
if isinstance(rule.get("replace"), str)
else "[redacted]"
),
}
for rule in rules
]
def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]:
"""Mask nodes using the rules."""
result = []
for item in nodes:
new_value = item["value"]
for rule in self.rules:
new_value = rule["pattern"].sub(rule["replace"], new_value)
if new_value != item["value"]:
result.append(StringNode(value=new_value, path=item["path"]))
return result
class CallableNodeProcessor(StringNodeProcessor):
"""String node processor that uses a callable function to replace sensitive data."""
func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]]
"""The callable function used to replace sensitive data.
It can be either a function that takes a single string argument and returns a string,
or a function that takes a string and a list of path elements (strings or integers)
and returns a string."""
accepts_path: bool
"""Indicates whether the callable function accepts a path argument.
If True, the function expects two arguments: the string to be processed and the path to that string.
If False, the function expects only the string to be processed."""
def __init__(
self,
func: Union[Callable[[str], str], Callable[[str, List[Union[str, int]]], str]],
):
"""Initialize the processor with a callable function."""
self.func = func
self.accepts_path = len(inspect.signature(func).parameters) == 2
def mask_nodes(self, nodes: List[StringNode]) -> List[StringNode]:
"""Mask nodes using the callable function."""
retval: List[StringNode] = []
for node in nodes:
candidate = (
self.func(node["value"], node["path"]) # type: ignore[call-arg]
if self.accepts_path
else self.func(node["value"]) # type: ignore[call-arg]
)
if candidate != node["value"]:
retval.append(StringNode(value=candidate, path=node["path"]))
return retval
ReplacerType = Union[
Callable[[str, List[Union[str, int]]], str],
List[StringNodeRule],
StringNodeProcessor,
]
def _get_node_processor(replacer: ReplacerType) -> StringNodeProcessor:
if isinstance(replacer, list):
return RuleNodeProcessor(rules=replacer)
elif callable(replacer):
return CallableNodeProcessor(func=replacer)
else:
return replacer
def create_anonymizer(
replacer: ReplacerType,
*,
max_depth: Optional[int] = None,
) -> Callable[[Any], Any]:
"""Create an anonymizer function."""
processor = _get_node_processor(replacer)
def anonymizer(data: Any) -> Any:
nodes = _extract_string_nodes(data, {"max_depth": max_depth or 10})
mutate_value = data
to_update = processor.mask_nodes(nodes)
for node in to_update:
if not node["path"]:
mutate_value = node["value"]
else:
temp = mutate_value
for part in node["path"][:-1]:
temp = temp[part]
last_part = node["path"][-1]
temp[last_part] = node["value"]
return mutate_value
return anonymizer
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/middleware.py | """Middleware for making it easier to do distributed tracing."""
class TracingMiddleware:
"""Middleware for propagating distributed tracing context using LangSmith.
This middleware checks for the 'langsmith-trace' header and propagates the
tracing context if present. It does not start new traces by default.
It is designed to work with ASGI applications.
Attributes:
app: The ASGI application being wrapped.
"""
def __init__(self, app):
"""Initialize the middleware."""
from langsmith.run_helpers import tracing_context # type: ignore
self._with_headers = tracing_context
self.app = app
async def __call__(self, scope: dict, receive, send):
"""Handle incoming requests and propagate tracing context if applicable.
Args:
scope: A dict containing ASGI connection scope.
receive: An awaitable callable for receiving ASGI events.
send: An awaitable callable for sending ASGI events.
If the request is HTTP and contains the 'langsmith-trace' header,
it propagates the tracing context before calling the wrapped application.
Otherwise, it calls the application directly without modifying the context.
"""
if scope["type"] == "http" and "headers" in scope:
headers = dict(scope["headers"])
if b"langsmith-trace" in headers:
with self._with_headers(parent=headers):
await self.app(scope, receive, send)
return
await self.app(scope, receive, send)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/run_helpers.py | """Decorator for creating a run tree from functions."""
from __future__ import annotations
import asyncio
import contextlib
import contextvars
import datetime
import functools
import inspect
import logging
import uuid
import warnings
from contextvars import copy_context
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
AsyncIterator,
Awaitable,
Callable,
Dict,
Generator,
Generic,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Set,
Tuple,
Type,
TypedDict,
TypeVar,
Union,
cast,
overload,
runtime_checkable,
)
from typing_extensions import Annotated, ParamSpec, TypeGuard, get_args, get_origin
from langsmith import client as ls_client
from langsmith import run_trees, schemas, utils
from langsmith._internal import _aiter as aitertools
from langsmith.env import _runtime_env
if TYPE_CHECKING:
from types import TracebackType
from langchain_core.runnables import Runnable
LOGGER = logging.getLogger(__name__)
_PARENT_RUN_TREE = contextvars.ContextVar[Optional[run_trees.RunTree]](
"_PARENT_RUN_TREE", default=None
)
_PROJECT_NAME = contextvars.ContextVar[Optional[str]]("_PROJECT_NAME", default=None)
_TAGS = contextvars.ContextVar[Optional[List[str]]]("_TAGS", default=None)
_METADATA = contextvars.ContextVar[Optional[Dict[str, Any]]]("_METADATA", default=None)
_TRACING_ENABLED = contextvars.ContextVar[Optional[Union[bool, Literal["local"]]]](
"_TRACING_ENABLED", default=None
)
_CLIENT = contextvars.ContextVar[Optional[ls_client.Client]]("_CLIENT", default=None)
_CONTEXT_KEYS: Dict[str, contextvars.ContextVar] = {
"parent": _PARENT_RUN_TREE,
"project_name": _PROJECT_NAME,
"tags": _TAGS,
"metadata": _METADATA,
"enabled": _TRACING_ENABLED,
"client": _CLIENT,
}
def get_current_run_tree() -> Optional[run_trees.RunTree]:
"""Get the current run tree."""
return _PARENT_RUN_TREE.get()
def get_tracing_context(
context: Optional[contextvars.Context] = None,
) -> Dict[str, Any]:
"""Get the current tracing context."""
if context is None:
return {
"parent": _PARENT_RUN_TREE.get(),
"project_name": _PROJECT_NAME.get(),
"tags": _TAGS.get(),
"metadata": _METADATA.get(),
"enabled": _TRACING_ENABLED.get(),
"client": _CLIENT.get(),
}
return {k: context.get(v) for k, v in _CONTEXT_KEYS.items()}
@contextlib.contextmanager
def tracing_context(
*,
project_name: Optional[str] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
parent: Optional[Union[run_trees.RunTree, Mapping, str]] = None,
enabled: Optional[Union[bool, Literal["local"]]] = None,
client: Optional[ls_client.Client] = None,
**kwargs: Any,
) -> Generator[None, None, None]:
"""Set the tracing context for a block of code.
Args:
project_name: The name of the project to log the run to. Defaults to None.
tags: The tags to add to the run. Defaults to None.
metadata: The metadata to add to the run. Defaults to None.
parent: The parent run to use for the context. Can be a Run/RunTree object,
request headers (for distributed tracing), or the dotted order string.
Defaults to None.
client: The client to use for logging the run to LangSmith. Defaults to None,
enabled: Whether tracing is enabled. Defaults to None, meaning it will use the
current context value or environment variables.
"""
if kwargs:
# warn
warnings.warn(
f"Unrecognized keyword arguments: {kwargs}.",
DeprecationWarning,
)
current_context = get_tracing_context()
parent_run = _get_parent_run({"parent": parent or kwargs.get("parent_run")})
if parent_run is not None:
tags = sorted(set(tags or []) | set(parent_run.tags or []))
metadata = {**parent_run.metadata, **(metadata or {})}
enabled = enabled if enabled is not None else current_context.get("enabled")
_set_tracing_context(
{
"parent": parent_run,
"project_name": project_name,
"tags": tags,
"metadata": metadata,
"enabled": enabled,
"client": client,
}
)
try:
yield
finally:
_set_tracing_context(current_context)
# Alias for backwards compatibility
get_run_tree_context = get_current_run_tree
def is_traceable_function(func: Any) -> TypeGuard[SupportsLangsmithExtra[P, R]]:
"""Check if a function is @traceable decorated."""
return (
_is_traceable_function(func)
or (isinstance(func, functools.partial) and _is_traceable_function(func.func))
or (hasattr(func, "__call__") and _is_traceable_function(func.__call__))
)
def ensure_traceable(
func: Callable[P, R],
*,
name: Optional[str] = None,
metadata: Optional[Mapping[str, Any]] = None,
tags: Optional[List[str]] = None,
client: Optional[ls_client.Client] = None,
reduce_fn: Optional[Callable[[Sequence], dict]] = None,
project_name: Optional[str] = None,
process_inputs: Optional[Callable[[dict], dict]] = None,
process_outputs: Optional[Callable[..., dict]] = None,
) -> SupportsLangsmithExtra[P, R]:
"""Ensure that a function is traceable."""
if is_traceable_function(func):
return func
return traceable(
name=name,
metadata=metadata,
tags=tags,
client=client,
reduce_fn=reduce_fn,
project_name=project_name,
process_inputs=process_inputs,
process_outputs=process_outputs,
)(func)
def is_async(func: Callable) -> bool:
"""Inspect function or wrapped function to see if it is async."""
return inspect.iscoroutinefunction(func) or (
hasattr(func, "__wrapped__") and inspect.iscoroutinefunction(func.__wrapped__)
)
class LangSmithExtra(TypedDict, total=False):
"""Any additional info to be injected into the run dynamically."""
name: Optional[str]
"""Optional name for the run."""
reference_example_id: Optional[ls_client.ID_TYPE]
"""Optional ID of a reference example."""
run_extra: Optional[Dict]
"""Optional additional run information."""
parent: Optional[Union[run_trees.RunTree, str, Mapping]]
"""Optional parent run, can be a RunTree, string, or mapping."""
run_tree: Optional[run_trees.RunTree] # TODO: Deprecate
"""Optional run tree (deprecated)."""
project_name: Optional[str]
"""Optional name of the project."""
metadata: Optional[Dict[str, Any]]
"""Optional metadata for the run."""
tags: Optional[List[str]]
"""Optional list of tags for the run."""
run_id: Optional[ls_client.ID_TYPE]
"""Optional ID for the run."""
client: Optional[ls_client.Client]
"""Optional LangSmith client."""
on_end: Optional[Callable[[run_trees.RunTree], Any]]
"""Optional callback function to be called when the run ends."""
R = TypeVar("R", covariant=True)
P = ParamSpec("P")
@runtime_checkable
class SupportsLangsmithExtra(Protocol, Generic[P, R]):
"""Implementations of this Protoc accept an optional langsmith_extra parameter.
Args:
*args: Variable length arguments.
langsmith_extra (Optional[LangSmithExtra): Optional dictionary of
additional parameters for Langsmith.
**kwargs: Keyword arguments.
Returns:
R: The return type of the callable.
"""
def __call__(
self,
*args: P.args,
langsmith_extra: Optional[LangSmithExtra] = None,
**kwargs: P.kwargs,
) -> R:
"""Call the instance when it is called as a function.
Args:
*args: Variable length argument list.
langsmith_extra: Optional dictionary containing additional
parameters specific to Langsmith.
**kwargs: Arbitrary keyword arguments.
Returns:
R: The return value of the method.
"""
...
@overload
def traceable(
func: Callable[P, R],
) -> SupportsLangsmithExtra[P, R]: ...
@overload
def traceable(
run_type: ls_client.RUN_TYPE_T = "chain",
*,
name: Optional[str] = None,
metadata: Optional[Mapping[str, Any]] = None,
tags: Optional[List[str]] = None,
client: Optional[ls_client.Client] = None,
reduce_fn: Optional[Callable[[Sequence], dict]] = None,
project_name: Optional[str] = None,
process_inputs: Optional[Callable[[dict], dict]] = None,
process_outputs: Optional[Callable[..., dict]] = None,
_invocation_params_fn: Optional[Callable[[dict], dict]] = None,
) -> Callable[[Callable[P, R]], SupportsLangsmithExtra[P, R]]: ...
def traceable(
*args: Any,
**kwargs: Any,
) -> Union[Callable, Callable[[Callable], Callable]]:
"""Trace a function with langsmith.
Args:
run_type: The type of run (span) to create. Examples: llm, chain, tool, prompt,
retriever, etc. Defaults to "chain".
name: The name of the run. Defaults to the function name.
metadata: The metadata to add to the run. Defaults to None.
tags: The tags to add to the run. Defaults to None.
client: The client to use for logging the run to LangSmith. Defaults to
None, which will use the default client.
reduce_fn: A function to reduce the output of the function if the function
returns a generator. Defaults to None, which means the values will be
logged as a list. Note: if the iterator is never exhausted (e.g.
the function returns an infinite generator), this will never be
called, and the run itself will be stuck in a pending state.
project_name: The name of the project to log the run to. Defaults to None,
which will use the default project.
process_inputs: Custom serialization / processing function for inputs.
Defaults to None.
process_outputs: Custom serialization / processing function for outputs.
Defaults to None.
Returns:
Union[Callable, Callable[[Callable], Callable]]: The decorated function.
Note:
- Requires that LANGSMITH_TRACING_V2 be set to 'true' in the environment.
Examples:
Basic usage:
.. code-block:: python
@traceable
def my_function(x: float, y: float) -> float:
return x + y
my_function(5, 6)
@traceable
async def my_async_function(query_params: dict) -> dict:
async with httpx.AsyncClient() as http_client:
response = await http_client.get(
"https://api.example.com/data",
params=query_params,
)
return response.json()
asyncio.run(my_async_function({"param": "value"}))
Streaming data with a generator:
.. code-block:: python
@traceable
def my_generator(n: int) -> Iterable:
for i in range(n):
yield i
for item in my_generator(5):
print(item)
Async streaming data:
.. code-block:: python
@traceable
async def my_async_generator(query_params: dict) -> Iterable:
async with httpx.AsyncClient() as http_client:
response = await http_client.get(
"https://api.example.com/data",
params=query_params,
)
for item in response.json():
yield item
async def async_code():
async for item in my_async_generator({"param": "value"}):
print(item)
asyncio.run(async_code())
Specifying a run type and name:
.. code-block:: python
@traceable(name="CustomName", run_type="tool")
def another_function(a: float, b: float) -> float:
return a * b
another_function(5, 6)
Logging with custom metadata and tags:
.. code-block:: python
@traceable(
metadata={"version": "1.0", "author": "John Doe"}, tags=["beta", "test"]
)
def tagged_function(x):
return x**2
tagged_function(5)
Specifying a custom client and project name:
.. code-block:: python
custom_client = Client(api_key="your_api_key")
@traceable(client=custom_client, project_name="My Special Project")
def project_specific_function(data):
return data
project_specific_function({"data": "to process"})
Manually passing langsmith_extra:
.. code-block:: python
@traceable
def manual_extra_function(x):
return x**2
manual_extra_function(5, langsmith_extra={"metadata": {"version": "1.0"}})
"""
run_type = cast(
ls_client.RUN_TYPE_T,
(
args[0]
if args and isinstance(args[0], str)
else (kwargs.pop("run_type", None) or "chain")
),
)
if run_type not in _VALID_RUN_TYPES:
warnings.warn(
f"Unrecognized run_type: {run_type}. Must be one of: {_VALID_RUN_TYPES}."
f" Did you mean @traceable(name='{run_type}')?"
)
if len(args) > 1:
warnings.warn(
"The `traceable()` decorator only accepts one positional argument, "
"which should be the run_type. All other arguments should be passed "
"as keyword arguments."
)
if "extra" in kwargs:
warnings.warn(
"The `extra` keyword argument is deprecated. Please use `metadata` "
"instead.",
DeprecationWarning,
)
reduce_fn = kwargs.pop("reduce_fn", None)
container_input = _ContainerInput(
# TODO: Deprecate raw extra
extra_outer=kwargs.pop("extra", None),
name=kwargs.pop("name", None),
metadata=kwargs.pop("metadata", None),
tags=kwargs.pop("tags", None),
client=kwargs.pop("client", None),
project_name=kwargs.pop("project_name", None),
run_type=run_type,
process_inputs=kwargs.pop("process_inputs", None),
invocation_params_fn=kwargs.pop("_invocation_params_fn", None),
)
outputs_processor = kwargs.pop("process_outputs", None)
_on_run_end = functools.partial(
_handle_container_end, outputs_processor=outputs_processor
)
if kwargs:
warnings.warn(
f"The following keyword arguments are not recognized and will be ignored: "
f"{sorted(kwargs.keys())}.",
DeprecationWarning,
)
def decorator(func: Callable):
func_sig = inspect.signature(func)
func_accepts_parent_run = func_sig.parameters.get("run_tree", None) is not None
func_accepts_config = func_sig.parameters.get("config", None) is not None
@functools.wraps(func)
async def async_wrapper(
*args: Any,
langsmith_extra: Optional[LangSmithExtra] = None,
**kwargs: Any,
) -> Any:
"""Async version of wrapper function."""
run_container = await aitertools.aio_to_thread(
_setup_run,
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
try:
accepts_context = aitertools.asyncio_accepts_context()
if func_accepts_parent_run:
kwargs["run_tree"] = run_container["new_run"]
if not func_accepts_config:
kwargs.pop("config", None)
fr_coro = func(*args, **kwargs)
if accepts_context:
function_result = await asyncio.create_task( # type: ignore[call-arg]
fr_coro, context=run_container["context"]
)
else:
# Python < 3.11
with tracing_context(
**get_tracing_context(run_container["context"])
):
function_result = await fr_coro
except BaseException as e:
# shield from cancellation, given we're catching all exceptions
await asyncio.shield(
aitertools.aio_to_thread(_on_run_end, run_container, error=e)
)
raise e
await aitertools.aio_to_thread(
_on_run_end, run_container, outputs=function_result
)
return function_result
@functools.wraps(func)
async def async_generator_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> AsyncGenerator:
run_container = await aitertools.aio_to_thread(
_setup_run,
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
results: List[Any] = []
try:
if func_accepts_parent_run:
kwargs["run_tree"] = run_container["new_run"]
# TODO: Nesting is ambiguous if a nested traceable function is only
# called mid-generation. Need to explicitly accept run_tree to get
# around this.
if not func_accepts_config:
kwargs.pop("config", None)
async_gen_result = func(*args, **kwargs)
# Can't iterate through if it's a coroutine
accepts_context = aitertools.asyncio_accepts_context()
if inspect.iscoroutine(async_gen_result):
if accepts_context:
async_gen_result = await asyncio.create_task(
async_gen_result, context=run_container["context"]
) # type: ignore
else:
# Python < 3.11
with tracing_context(
**get_tracing_context(run_container["context"])
):
async_gen_result = await async_gen_result
async for item in _process_async_iterator(
generator=async_gen_result,
run_container=run_container,
is_llm_run=(
run_container["new_run"].run_type == "llm"
if run_container["new_run"]
else False
),
accepts_context=accepts_context,
results=results,
):
yield item
except BaseException as e:
await asyncio.shield(
aitertools.aio_to_thread(
_on_run_end,
run_container,
error=e,
outputs=_get_function_result(results, reduce_fn),
)
)
raise e
await aitertools.aio_to_thread(
_on_run_end,
run_container,
outputs=_get_function_result(results, reduce_fn),
)
@functools.wraps(func)
def wrapper(
*args: Any,
langsmith_extra: Optional[LangSmithExtra] = None,
**kwargs: Any,
) -> Any:
"""Create a new run or create_child() if run is passed in kwargs."""
run_container = _setup_run(
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
func_accepts_parent_run = (
inspect.signature(func).parameters.get("run_tree", None) is not None
)
try:
if func_accepts_parent_run:
kwargs["run_tree"] = run_container["new_run"]
if not func_accepts_config:
kwargs.pop("config", None)
function_result = run_container["context"].run(func, *args, **kwargs)
except BaseException as e:
_on_run_end(run_container, error=e)
raise e
_on_run_end(run_container, outputs=function_result)
return function_result
@functools.wraps(func)
def generator_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> Any:
run_container = _setup_run(
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
func_accepts_parent_run = (
inspect.signature(func).parameters.get("run_tree", None) is not None
)
results: List[Any] = []
function_return: Any = None
try:
if func_accepts_parent_run:
kwargs["run_tree"] = run_container["new_run"]
if not func_accepts_config:
kwargs.pop("config", None)
generator_result = run_container["context"].run(func, *args, **kwargs)
function_return = yield from _process_iterator(
generator_result,
run_container,
is_llm_run=run_type == "llm",
results=results,
)
if function_return is not None:
results.append(function_return)
except BaseException as e:
_on_run_end(
run_container,
error=e,
outputs=_get_function_result(results, reduce_fn),
)
raise e
_on_run_end(run_container, outputs=_get_function_result(results, reduce_fn))
return function_return
# "Stream" functions (used in methods like OpenAI/Anthropic's SDKs)
# are functions that return iterable responses and should not be
# considered complete until the streaming is completed
@functools.wraps(func)
def stream_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> Any:
trace_container = _setup_run(
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
try:
if func_accepts_parent_run:
kwargs["run_tree"] = trace_container["new_run"]
if not func_accepts_config:
kwargs.pop("config", None)
stream = trace_container["context"].run(func, *args, **kwargs)
except Exception as e:
_on_run_end(trace_container, error=e)
raise
if hasattr(stream, "__iter__"):
return _TracedStream(stream, trace_container, reduce_fn)
elif hasattr(stream, "__aiter__"):
# sync function -> async iterable (unexpected)
return _TracedAsyncStream(stream, trace_container, reduce_fn)
# If it's not iterable, end the trace immediately
_on_run_end(trace_container, outputs=stream)
return stream
@functools.wraps(func)
async def async_stream_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> Any:
trace_container = await aitertools.aio_to_thread(
_setup_run,
func,
container_input=container_input,
langsmith_extra=langsmith_extra,
args=args,
kwargs=kwargs,
)
try:
if func_accepts_parent_run:
kwargs["run_tree"] = trace_container["new_run"]
if not func_accepts_config:
kwargs.pop("config", None)
stream = await func(*args, **kwargs)
except Exception as e:
await aitertools.aio_to_thread(_on_run_end, trace_container, error=e)
raise
if hasattr(stream, "__aiter__"):
return _TracedAsyncStream(stream, trace_container, reduce_fn)
elif hasattr(stream, "__iter__"):
# Async function -> sync iterable
return _TracedStream(stream, trace_container, reduce_fn)
# If it's not iterable, end the trace immediately
await aitertools.aio_to_thread(_on_run_end, trace_container, outputs=stream)
return stream
if inspect.isasyncgenfunction(func):
selected_wrapper: Callable = async_generator_wrapper
elif inspect.isgeneratorfunction(func):
selected_wrapper = generator_wrapper
elif is_async(func):
if reduce_fn:
selected_wrapper = async_stream_wrapper
else:
selected_wrapper = async_wrapper
else:
if reduce_fn:
selected_wrapper = stream_wrapper
else:
selected_wrapper = wrapper
setattr(selected_wrapper, "__langsmith_traceable__", True)
sig = inspect.signature(selected_wrapper)
if not sig.parameters.get("config"):
sig = sig.replace(
parameters=[
*(
param
for param in sig.parameters.values()
if param.kind != inspect.Parameter.VAR_KEYWORD
),
inspect.Parameter(
"config", inspect.Parameter.KEYWORD_ONLY, default=None
),
*(
param
for param in sig.parameters.values()
if param.kind == inspect.Parameter.VAR_KEYWORD
),
]
)
selected_wrapper.__signature__ = sig # type: ignore[attr-defined]
return selected_wrapper
# If the decorator is called with no arguments, then it's being used as a
# decorator, so we return the decorator function
if len(args) == 1 and callable(args[0]) and not kwargs:
return decorator(args[0])
# Else it's being used as a decorator factory, so we return the decorator
return decorator
class trace:
"""Manage a LangSmith run in context.
This class can be used as both a synchronous and asynchronous context manager.
Args:
name (str): Name of the run.
run_type (ls_client.RUN_TYPE_T, optional): Type of run (e.g., "chain", "llm", "tool"). Defaults to "chain".
inputs (Optional[Dict], optional): Initial input data for the run. Defaults to None.
project_name (Optional[str], optional): Project name to associate the run with. Defaults to None.
parent (Optional[Union[run_trees.RunTree, str, Mapping]], optional): Parent run. Can be a RunTree, dotted order string, or tracing headers. Defaults to None.
tags (Optional[List[str]], optional): List of tags for the run. Defaults to None.
metadata (Optional[Mapping[str, Any]], optional): Additional metadata for the run. Defaults to None.
client (Optional[ls_client.Client], optional): LangSmith client for custom settings. Defaults to None.
run_id (Optional[ls_client.ID_TYPE], optional): Preset identifier for the run. Defaults to None.
reference_example_id (Optional[ls_client.ID_TYPE], optional): Associates run with a dataset example. Only for root runs in evaluation. Defaults to None.
exceptions_to_handle (Optional[Tuple[Type[BaseException], ...]], optional): Exception types to ignore. Defaults to None.
extra (Optional[Dict], optional): Extra data to send to LangSmith. Use 'metadata' instead. Defaults to None.
Examples:
Synchronous usage:
.. code-block:: python
>>> with trace("My Operation", run_type="tool", tags=["important"]) as run:
... result = "foo" # Perform operation
... run.metadata["some-key"] = "some-value"
... run.end(outputs={"result": result})
Asynchronous usage:
.. code-block:: python
>>> async def main():
... async with trace("Async Operation", run_type="tool", tags=["async"]) as run:
... result = "foo" # Await async operation
... run.metadata["some-key"] = "some-value"
... # "end" just adds the outputs and sets error to None
... # The actual patching of the run happens when the context exits
... run.end(outputs={"result": result})
>>> asyncio.run(main())
Handling specific exceptions:
.. code-block:: python
>>> import pytest
>>> import sys
>>> with trace("Test", exceptions_to_handle=(pytest.skip.Exception,)):
... if sys.platform == "win32": # Just an example
... pytest.skip("Skipping test for windows")
... result = "foo" # Perform test operation
"""
def __init__(
self,
name: str,
run_type: ls_client.RUN_TYPE_T = "chain",
*,
inputs: Optional[Dict] = None,
extra: Optional[Dict] = None,
project_name: Optional[str] = None,
parent: Optional[Union[run_trees.RunTree, str, Mapping]] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Mapping[str, Any]] = None,
client: Optional[ls_client.Client] = None,
run_id: Optional[ls_client.ID_TYPE] = None,
reference_example_id: Optional[ls_client.ID_TYPE] = None,
exceptions_to_handle: Optional[Tuple[Type[BaseException], ...]] = None,
attachments: Optional[schemas.Attachments] = None,
**kwargs: Any,
):
"""Initialize the trace context manager.
Warns if unsupported kwargs are passed.
"""
if kwargs:
warnings.warn(
"The `trace` context manager no longer supports the following kwargs: "
f"{sorted(kwargs.keys())}.",
DeprecationWarning,
)
self.name = name
self.run_type = run_type
self.inputs = inputs
self.attachments = attachments
self.extra = extra
self.project_name = project_name
self.parent = parent
# The run tree is deprecated. Keeping for backwards compat.
# Will fully merge within parent later.
self.run_tree = kwargs.get("run_tree")
self.tags = tags
self.metadata = metadata
self.client = client
self.run_id = run_id
self.reference_example_id = reference_example_id
self.exceptions_to_handle = exceptions_to_handle
self.new_run: Optional[run_trees.RunTree] = None
self.old_ctx: Optional[dict] = None
def _setup(self) -> run_trees.RunTree:
"""Set up the tracing context and create a new run.
This method initializes the tracing context, merges tags and metadata,
creates a new run (either as a child of an existing run or as a new root run),
and sets up the necessary context variables.
Returns:
run_trees.RunTree: The newly created run.
"""
self.old_ctx = get_tracing_context()
enabled = utils.tracing_is_enabled(self.old_ctx)
outer_tags = _TAGS.get()
outer_metadata = _METADATA.get()
client_ = self.client or self.old_ctx.get("client")
parent_run_ = _get_parent_run(
{
"parent": self.parent,
"run_tree": self.run_tree,
"client": client_,
}
)
tags_ = sorted(set((self.tags or []) + (outer_tags or [])))
metadata = {
**(self.metadata or {}),
**(outer_metadata or {}),
"ls_method": "trace",
}
extra_outer = self.extra or {}
extra_outer["metadata"] = metadata
project_name_ = _get_project_name(self.project_name)
if parent_run_ is not None and enabled:
self.new_run = parent_run_.create_child(
name=self.name,
run_id=self.run_id,
run_type=self.run_type,
extra=extra_outer,
inputs=self.inputs,
tags=tags_,
attachments=self.attachments,
)
else:
self.new_run = run_trees.RunTree(
name=self.name,
id=ls_client._ensure_uuid(self.run_id),
reference_example_id=ls_client._ensure_uuid(
self.reference_example_id, accept_null=True
),
run_type=self.run_type,
extra=extra_outer,
project_name=project_name_ or "default",
inputs=self.inputs or {},
tags=tags_,
client=client_, # type: ignore
attachments=self.attachments or {},
)
if enabled is True:
self.new_run.post()
if enabled:
_TAGS.set(tags_)
_METADATA.set(metadata)
_PARENT_RUN_TREE.set(self.new_run)
_PROJECT_NAME.set(project_name_)
_CLIENT.set(client_)
return self.new_run
def _teardown(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Clean up the tracing context and finalize the run.
This method handles exceptions, ends the run if necessary,
patches the run if it's not disabled, and resets the tracing context.
Args:
exc_type: The type of the exception that occurred, if any.
exc_value: The exception instance that occurred, if any.
traceback: The traceback object associated with the exception, if any.
"""
if self.new_run is None:
return
if exc_type is not None:
if self.exceptions_to_handle and issubclass(
exc_type, self.exceptions_to_handle
):
tb = None
else:
tb = utils._format_exc()
tb = f"{exc_type.__name__}: {exc_value}\n\n{tb}"
self.new_run.end(error=tb)
if self.old_ctx is not None:
enabled = utils.tracing_is_enabled(self.old_ctx)
if enabled is True:
self.new_run.patch()
_set_tracing_context(self.old_ctx)
else:
warnings.warn("Tracing context was not set up properly.", RuntimeWarning)
def __enter__(self) -> run_trees.RunTree:
"""Enter the context manager synchronously.
Returns:
run_trees.RunTree: The newly created run.
"""
return self._setup()
def __exit__(
self,
exc_type: Optional[Type[BaseException]] = None,
exc_value: Optional[BaseException] = None,
traceback: Optional[TracebackType] = None,
) -> None:
"""Exit the context manager synchronously.
Args:
exc_type: The type of the exception that occurred, if any.
exc_value: The exception instance that occurred, if any.
traceback: The traceback object associated with the exception, if any.
"""
self._teardown(exc_type, exc_value, traceback)
async def __aenter__(self) -> run_trees.RunTree:
"""Enter the context manager asynchronously.
Returns:
run_trees.RunTree: The newly created run.
"""
ctx = copy_context()
result = await aitertools.aio_to_thread(self._setup, __ctx=ctx)
# Set the context for the current thread
_set_tracing_context(get_tracing_context(ctx))
return result
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]] = None,
exc_value: Optional[BaseException] = None,
traceback: Optional[TracebackType] = None,
) -> None:
"""Exit the context manager asynchronously.
Args:
exc_type: The type of the exception that occurred, if any.
exc_value: The exception instance that occurred, if any.
traceback: The traceback object associated with the exception, if any.
"""
ctx = copy_context()
if exc_type is not None:
await asyncio.shield(
aitertools.aio_to_thread(
self._teardown, exc_type, exc_value, traceback, __ctx=ctx
)
)
else:
await aitertools.aio_to_thread(
self._teardown, exc_type, exc_value, traceback, __ctx=ctx
)
_set_tracing_context(get_tracing_context(ctx))
def _get_project_name(project_name: Optional[str]) -> Optional[str]:
prt = _PARENT_RUN_TREE.get()
return (
# Maintain tree consistency first
_PROJECT_NAME.get()
or (prt.session_name if prt else None)
# Then check the passed in value
or project_name
# fallback to the default for the environment
or utils.get_tracer_project()
)
def as_runnable(traceable_fn: Callable) -> Runnable:
"""Convert a function wrapped by the LangSmith @traceable decorator to a Runnable.
Args:
traceable_fn (Callable): The function wrapped by the @traceable decorator.
Returns:
Runnable: A Runnable object that maintains a consistent LangSmith
tracing context.
Raises:
ImportError: If langchain module is not installed.
ValueError: If the provided function is not wrapped by the @traceable decorator.
Example:
>>> @traceable
... def my_function(input_data):
... # Function implementation
... pass
>>> runnable = as_runnable(my_function)
"""
try:
from langchain_core.runnables import RunnableConfig, RunnableLambda
from langchain_core.runnables.utils import Input, Output
except ImportError as e:
raise ImportError(
"as_runnable requires langchain-core to be installed. "
"You can install it with `pip install langchain-core`."
) from e
if not is_traceable_function(traceable_fn):
try:
fn_src = inspect.getsource(traceable_fn)
except Exception:
fn_src = "<source unavailable>"
raise ValueError(
f"as_runnable expects a function wrapped by the LangSmith"
f" @traceable decorator. Got {traceable_fn} defined as:\n{fn_src}"
)
class RunnableTraceable(RunnableLambda):
"""Converts a @traceable decorated function to a Runnable.
This helps maintain a consistent LangSmith tracing context.
"""
def __init__(
self,
func: Callable,
afunc: Optional[Callable[..., Awaitable[Output]]] = None,
) -> None:
wrapped: Optional[Callable[[Input], Output]] = None
awrapped = self._wrap_async(afunc)
if is_async(func):
if awrapped is not None:
raise TypeError(
"Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular "
"function to avoid ambiguity."
)
wrapped = cast(Callable[[Input], Output], self._wrap_async(func))
elif is_traceable_function(func):
wrapped = cast(Callable[[Input], Output], self._wrap_sync(func))
if wrapped is None:
raise ValueError(
f"{self.__class__.__name__} expects a function wrapped by"
" the LangSmith"
f" @traceable decorator. Got {func}"
)
super().__init__(
wrapped,
cast(
Optional[Callable[[Input], Awaitable[Output]]],
awrapped,
),
)
@staticmethod
def _wrap_sync(
func: Callable[..., Output],
) -> Callable[[Input, RunnableConfig], Output]:
"""Wrap a synchronous function to make it asynchronous."""
def wrap_traceable(inputs: dict, config: RunnableConfig) -> Any:
run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config))
return func(**inputs, langsmith_extra={"run_tree": run_tree})
return cast(Callable[[Input, RunnableConfig], Output], wrap_traceable)
@staticmethod
def _wrap_async(
afunc: Optional[Callable[..., Awaitable[Output]]],
) -> Optional[Callable[[Input, RunnableConfig], Awaitable[Output]]]:
"""Wrap an async function to make it synchronous."""
if afunc is None:
return None
if not is_traceable_function(afunc):
raise ValueError(
"RunnableTraceable expects a function wrapped by the LangSmith"
f" @traceable decorator. Got {afunc}"
)
afunc_ = cast(Callable[..., Awaitable[Output]], afunc)
async def awrap_traceable(inputs: dict, config: RunnableConfig) -> Any:
run_tree = run_trees.RunTree.from_runnable_config(cast(dict, config))
return await afunc_(**inputs, langsmith_extra={"run_tree": run_tree})
return cast(
Callable[[Input, RunnableConfig], Awaitable[Output]], awrap_traceable
)
return RunnableTraceable(traceable_fn)
## Private Methods and Objects
_VALID_RUN_TYPES = {
"tool",
"chain",
"llm",
"retriever",
"embedding",
"prompt",
"parser",
}
class _TraceableContainer(TypedDict, total=False):
"""Typed response when initializing a run a traceable."""
new_run: Optional[run_trees.RunTree]
project_name: Optional[str]
outer_project: Optional[str]
outer_metadata: Optional[Dict[str, Any]]
outer_tags: Optional[List[str]]
on_end: Optional[Callable[[run_trees.RunTree], Any]]
context: contextvars.Context
class _ContainerInput(TypedDict, total=False):
"""Typed response when initializing a run a traceable."""
extra_outer: Optional[Dict]
name: Optional[str]
metadata: Optional[Dict[str, Any]]
tags: Optional[List[str]]
client: Optional[ls_client.Client]
reduce_fn: Optional[Callable]
project_name: Optional[str]
run_type: ls_client.RUN_TYPE_T
process_inputs: Optional[Callable[[dict], dict]]
invocation_params_fn: Optional[Callable[[dict], dict]]
def _container_end(
container: _TraceableContainer,
outputs: Optional[Any] = None,
error: Optional[BaseException] = None,
) -> None:
"""End the run."""
run_tree = container.get("new_run")
if run_tree is None:
# Tracing not enabled
return
outputs_ = outputs if isinstance(outputs, dict) else {"output": outputs}
error_ = None
if error:
stacktrace = utils._format_exc()
error_ = f"{repr(error)}\n\n{stacktrace}"
run_tree.end(outputs=outputs_, error=error_)
if utils.tracing_is_enabled() is True:
run_tree.patch()
on_end = container.get("on_end")
if on_end is not None and callable(on_end):
try:
on_end(run_tree)
except BaseException as e:
LOGGER.warning(f"Failed to run on_end function: {e}")
def _collect_extra(extra_outer: dict, langsmith_extra: LangSmithExtra) -> dict:
run_extra = langsmith_extra.get("run_extra", None)
if run_extra:
extra_inner = {**extra_outer, **run_extra}
else:
extra_inner = extra_outer
return extra_inner
def _get_parent_run(
langsmith_extra: LangSmithExtra,
config: Optional[dict] = None,
) -> Optional[run_trees.RunTree]:
parent = langsmith_extra.get("parent")
if isinstance(parent, run_trees.RunTree):
return parent
if isinstance(parent, dict):
return run_trees.RunTree.from_headers(
parent,
client=langsmith_extra.get("client"),
# Precedence: headers -> cvar -> explicit -> env var
project_name=_get_project_name(langsmith_extra.get("project_name")),
)
if isinstance(parent, str):
dort = run_trees.RunTree.from_dotted_order(
parent,
client=langsmith_extra.get("client"),
# Precedence: cvar -> explicit -> env var
project_name=_get_project_name(langsmith_extra.get("project_name")),
)
return dort
run_tree = langsmith_extra.get("run_tree")
if run_tree:
return run_tree
crt = get_current_run_tree()
if _runtime_env.get_langchain_core_version() is not None:
if rt := run_trees.RunTree.from_runnable_config(
config, client=langsmith_extra.get("client")
):
# Still need to break ties when alternating between traceable and
# LanChain code.
# Nesting: LC -> LS -> LS, we want to still use LS as the parent
# Otherwise would look like LC -> {LS, LS} (siblings)
if (
not crt # Simple LC -> LS
# Let user override if manually passed in or invoked in a
# RunnableSequence. This is a naive check.
or (config is not None and config.get("callbacks"))
# If the LangChain dotted order is more nested than the LangSmith
# dotted order, use the LangChain run as the parent.
# Note that this condition shouldn't be triggered in later
# versions of core, since we also update the run_tree context
# vars when updating the RunnableConfig context var.
or rt.dotted_order > crt.dotted_order
):
return rt
return crt
def _setup_run(
func: Callable,
container_input: _ContainerInput,
langsmith_extra: Optional[LangSmithExtra] = None,
args: Any = None,
kwargs: Any = None,
) -> _TraceableContainer:
"""Create a new run or create_child() if run is passed in kwargs."""
extra_outer = container_input.get("extra_outer") or {}
metadata = container_input.get("metadata")
tags = container_input.get("tags")
client = container_input.get("client")
run_type = container_input.get("run_type") or "chain"
outer_project = _PROJECT_NAME.get()
langsmith_extra = langsmith_extra or LangSmithExtra()
name = langsmith_extra.get("name") or container_input.get("name")
client_ = langsmith_extra.get("client", client) or _CLIENT.get()
parent_run_ = _get_parent_run(
{**langsmith_extra, "client": client_}, kwargs.get("config")
)
project_cv = _PROJECT_NAME.get()
selected_project = (
project_cv # From parent trace
or (
parent_run_.session_name if parent_run_ else None
) # from parent run attempt 2 (not managed by traceable)
or langsmith_extra.get("project_name") # at invocation time
or container_input["project_name"] # at decorator time
or utils.get_tracer_project() # default
)
reference_example_id = langsmith_extra.get("reference_example_id")
id_ = langsmith_extra.get("run_id")
if not parent_run_ and not utils.tracing_is_enabled():
utils.log_once(
logging.DEBUG,
"LangSmith tracing is not enabled, returning original function.",
)
return _TraceableContainer(
new_run=None,
project_name=selected_project,
outer_project=outer_project,
outer_metadata=None,
outer_tags=None,
on_end=langsmith_extra.get("on_end"),
context=copy_context(),
)
id_ = id_ or str(uuid.uuid4())
signature = inspect.signature(func)
name_ = name or utils._get_function_name(func)
docstring = func.__doc__
extra_inner = _collect_extra(extra_outer, langsmith_extra)
outer_metadata = _METADATA.get()
outer_tags = _TAGS.get()
context = copy_context()
metadata_ = {
**(langsmith_extra.get("metadata") or {}),
**(outer_metadata or {}),
}
context.run(_METADATA.set, metadata_)
metadata_.update(metadata or {})
metadata_["ls_method"] = "traceable"
extra_inner["metadata"] = metadata_
inputs, attachments = _get_inputs_and_attachments_safe(signature, *args, **kwargs)
invocation_params_fn = container_input.get("invocation_params_fn")
if invocation_params_fn:
try:
invocation_params = {
k: v for k, v in invocation_params_fn(inputs).items() if v is not None
}
if invocation_params and isinstance(invocation_params, dict):
metadata_.update(invocation_params)
except BaseException as e:
LOGGER.error(f"Failed to infer invocation params for {name_}: {e}")
process_inputs = container_input.get("process_inputs")
if process_inputs:
try:
inputs = process_inputs(inputs)
except BaseException as e:
LOGGER.error(f"Failed to filter inputs for {name_}: {e}")
tags_ = (langsmith_extra.get("tags") or []) + (outer_tags or [])
context.run(_TAGS.set, tags_)
tags_ += tags or []
if parent_run_ is not None:
new_run = parent_run_.create_child(
name=name_,
run_type=run_type,
serialized={
"name": name,
"signature": str(signature),
"doc": docstring,
},
inputs=inputs,
tags=tags_,
extra=extra_inner,
run_id=id_,
attachments=attachments,
)
else:
new_run = run_trees.RunTree(
id=ls_client._ensure_uuid(id_),
name=name_,
serialized={
"name": name,
"signature": str(signature),
"doc": docstring,
},
inputs=inputs,
run_type=run_type,
reference_example_id=ls_client._ensure_uuid(
reference_example_id, accept_null=True
),
project_name=selected_project, # type: ignore[arg-type]
extra=extra_inner,
tags=tags_,
client=client_, # type: ignore
attachments=attachments,
)
if utils.tracing_is_enabled() is True:
try:
new_run.post()
except BaseException as e:
LOGGER.error(f"Failed to post run {new_run.id}: {e}")
response_container = _TraceableContainer(
new_run=new_run,
project_name=selected_project,
outer_project=outer_project,
outer_metadata=outer_metadata,
outer_tags=outer_tags,
on_end=langsmith_extra.get("on_end"),
context=context,
)
context.run(_PROJECT_NAME.set, response_container["project_name"])
context.run(_PARENT_RUN_TREE.set, response_container["new_run"])
return response_container
def _handle_container_end(
container: _TraceableContainer,
outputs: Optional[Any] = None,
error: Optional[BaseException] = None,
outputs_processor: Optional[Callable[..., dict]] = None,
) -> None:
"""Handle the end of run."""
try:
if outputs_processor is not None:
outputs = outputs_processor(outputs)
_container_end(container, outputs=outputs, error=error)
except BaseException as e:
LOGGER.warning(f"Unable to process trace outputs: {repr(e)}")
def _is_traceable_function(func: Any) -> bool:
return getattr(func, "__langsmith_traceable__", False)
def _get_inputs(
signature: inspect.Signature, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
"""Return a dictionary of inputs from the function signature."""
bound = signature.bind_partial(*args, **kwargs)
bound.apply_defaults()
arguments = dict(bound.arguments)
arguments.pop("self", None)
arguments.pop("cls", None)
for param_name, param in signature.parameters.items():
if param.kind == inspect.Parameter.VAR_KEYWORD:
# Update with the **kwargs, and remove the original entry
# This is to help flatten out keyword arguments
if param_name in arguments:
arguments.update(arguments[param_name])
arguments.pop(param_name)
return arguments
def _get_inputs_safe(
signature: inspect.Signature, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
try:
return _get_inputs(signature, *args, **kwargs)
except BaseException as e:
LOGGER.debug(f"Failed to get inputs for {signature}: {e}")
return {"args": args, "kwargs": kwargs}
@functools.lru_cache(maxsize=1000)
def _attachment_args(signature: inspect.Signature) -> Set[str]:
def _is_attachment(param: inspect.Parameter) -> bool:
if param.annotation == schemas.Attachment or (
get_origin(param.annotation) == Annotated
and any(arg == schemas.Attachment for arg in get_args(param.annotation))
):
return True
return False
return {
name for name, param in signature.parameters.items() if _is_attachment(param)
}
def _get_inputs_and_attachments_safe(
signature: inspect.Signature, *args: Any, **kwargs: Any
) -> Tuple[dict, schemas.Attachments]:
try:
inferred = _get_inputs(signature, *args, **kwargs)
attachment_args = _attachment_args(signature)
if attachment_args:
inputs, attachments = {}, {}
for k, v in inferred.items():
if k in attachment_args:
attachments[k] = v
else:
inputs[k] = v
return inputs, attachments
return inferred, {}
except BaseException as e:
LOGGER.debug(f"Failed to get inputs for {signature}: {e}")
return {"args": args, "kwargs": kwargs}, {}
def _set_tracing_context(context: Dict[str, Any]):
"""Set the tracing context."""
for k, v in context.items():
var = _CONTEXT_KEYS[k]
var.set(v)
def _process_iterator(
generator: Iterator[T],
run_container: _TraceableContainer,
is_llm_run: bool,
# Results is mutated
results: List[Any],
) -> Generator[T, None, Any]:
try:
while True:
item: T = run_container["context"].run(next, generator) # type: ignore[arg-type]
if is_llm_run and run_container["new_run"]:
run_container["new_run"].add_event(
{
"name": "new_token",
"time": datetime.datetime.now(
datetime.timezone.utc
).isoformat(),
"kwargs": {"token": item},
}
)
results.append(item)
yield item
except StopIteration as e:
return e.value
async def _process_async_iterator(
generator: AsyncIterator[T],
run_container: _TraceableContainer,
*,
is_llm_run: bool,
accepts_context: bool,
results: List[Any],
) -> AsyncGenerator[T, None]:
try:
while True:
if accepts_context:
item = await asyncio.create_task( # type: ignore[call-arg, var-annotated]
aitertools.py_anext(generator), # type: ignore[arg-type]
context=run_container["context"],
)
else:
# Python < 3.11
with tracing_context(**get_tracing_context(run_container["context"])):
item = await aitertools.py_anext(generator)
if is_llm_run and run_container["new_run"]:
run_container["new_run"].add_event(
{
"name": "new_token",
"time": datetime.datetime.now(
datetime.timezone.utc
).isoformat(),
"kwargs": {"token": item},
}
)
results.append(item)
yield item
except StopAsyncIteration:
pass
T = TypeVar("T")
class _TracedStreamBase(Generic[T]):
"""Base class for traced stream objects."""
def __init__(
self,
stream: Union[Iterator[T], AsyncIterator[T]],
trace_container: _TraceableContainer,
reduce_fn: Optional[Callable] = None,
):
self.__ls_stream__ = stream
self.__ls_trace_container__ = trace_container
self.__ls_completed__ = False
self.__ls_reduce_fn__ = reduce_fn
self.__ls_accumulated_output__: list[T] = []
self.__is_llm_run__ = (
trace_container["new_run"].run_type == "llm"
if trace_container["new_run"]
else False
)
def __getattr__(self, name: str):
return getattr(self.__ls_stream__, name)
def __dir__(self):
return list(set(dir(self.__class__) + dir(self.__ls_stream__)))
def __repr__(self):
return f"Traceable({self.__ls_stream__!r})"
def __str__(self):
return str(self.__ls_stream__)
def __del__(self):
try:
if not self.__ls_completed__:
self._end_trace()
except BaseException:
pass
try:
self.__ls_stream__.__del__()
except BaseException:
pass
def _end_trace(self, error: Optional[BaseException] = None):
if self.__ls_completed__:
return
try:
if self.__ls_reduce_fn__:
reduced_output = self.__ls_reduce_fn__(self.__ls_accumulated_output__)
else:
reduced_output = self.__ls_accumulated_output__
_container_end(
self.__ls_trace_container__, outputs=reduced_output, error=error
)
finally:
self.__ls_completed__ = True
class _TracedStream(_TracedStreamBase, Generic[T]):
"""A wrapper for synchronous stream objects that handles tracing."""
def __init__(
self,
stream: Iterator[T],
trace_container: _TraceableContainer,
reduce_fn: Optional[Callable] = None,
):
super().__init__(
stream=stream, trace_container=trace_container, reduce_fn=reduce_fn
)
self.__ls_stream__ = stream
self.__ls__gen__ = _process_iterator(
self.__ls_stream__,
self.__ls_trace_container__,
is_llm_run=self.__is_llm_run__,
results=self.__ls_accumulated_output__,
)
def __next__(self) -> T:
try:
return next(self.__ls__gen__)
except StopIteration:
self._end_trace()
raise
def __iter__(self) -> Iterator[T]:
try:
yield from self.__ls__gen__
except BaseException as e:
self._end_trace(error=e)
raise
else:
self._end_trace()
def __enter__(self):
return self.__ls_stream__.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return self.__ls_stream__.__exit__(exc_type, exc_val, exc_tb)
finally:
self._end_trace(error=exc_val if exc_type else None)
class _TracedAsyncStream(_TracedStreamBase, Generic[T]):
"""A wrapper for asynchronous stream objects that handles tracing."""
def __init__(
self,
stream: AsyncIterator[T],
trace_container: _TraceableContainer,
reduce_fn: Optional[Callable] = None,
):
super().__init__(
stream=stream, trace_container=trace_container, reduce_fn=reduce_fn
)
self.__ls_stream__ = stream
self.__ls_gen = _process_async_iterator(
generator=self.__ls_stream__,
run_container=self.__ls_trace_container__,
is_llm_run=self.__is_llm_run__,
accepts_context=aitertools.asyncio_accepts_context(),
results=self.__ls_accumulated_output__,
)
async def _aend_trace(self, error: Optional[BaseException] = None):
ctx = copy_context()
await asyncio.shield(
aitertools.aio_to_thread(self._end_trace, error, __ctx=ctx)
)
_set_tracing_context(get_tracing_context(ctx))
async def __anext__(self) -> T:
try:
return cast(T, await aitertools.py_anext(self.__ls_gen))
except StopAsyncIteration:
await self._aend_trace()
raise
async def __aiter__(self) -> AsyncIterator[T]:
try:
async for item in self.__ls_gen:
yield item
except BaseException:
await self._aend_trace()
raise
else:
await self._aend_trace()
async def __aenter__(self):
return await self.__ls_stream__.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
return await self.__ls_stream__.__aexit__(exc_type, exc_val, exc_tb)
finally:
await self._aend_trace()
def _get_function_result(results: list, reduce_fn: Callable) -> Any:
if results:
if reduce_fn is not None:
try:
return reduce_fn(results)
except BaseException as e:
LOGGER.error(e)
return results
else:
return results
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/utils.py | """Generic utility functions."""
from __future__ import annotations
import contextlib
import contextvars
import copy
import enum
import functools
import logging
import os
import pathlib
import socket
import subprocess
import sys
import threading
import traceback
from concurrent.futures import Future, ThreadPoolExecutor
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from urllib import parse as urllib_parse
import httpx
import requests
from typing_extensions import ParamSpec
from urllib3.util import Retry # type: ignore[import-untyped]
from langsmith import schemas as ls_schemas
_LOGGER = logging.getLogger(__name__)
class LangSmithError(Exception):
"""An error occurred while communicating with the LangSmith API."""
class LangSmithAPIError(LangSmithError):
"""Internal server error while communicating with LangSmith."""
class LangSmithRequestTimeout(LangSmithError):
"""Client took too long to send request body."""
class LangSmithUserError(LangSmithError):
"""User error caused an exception when communicating with LangSmith."""
class LangSmithRateLimitError(LangSmithError):
"""You have exceeded the rate limit for the LangSmith API."""
class LangSmithAuthError(LangSmithError):
"""Couldn't authenticate with the LangSmith API."""
class LangSmithNotFoundError(LangSmithError):
"""Couldn't find the requested resource."""
class LangSmithConflictError(LangSmithError):
"""The resource already exists."""
class LangSmithConnectionError(LangSmithError):
"""Couldn't connect to the LangSmith API."""
## Warning classes
class LangSmithWarning(UserWarning):
"""Base class for warnings."""
class LangSmithMissingAPIKeyWarning(LangSmithWarning):
"""Warning for missing API key."""
def tracing_is_enabled(ctx: Optional[dict] = None) -> Union[bool, Literal["local"]]:
"""Return True if tracing is enabled."""
from langsmith.run_helpers import get_current_run_tree, get_tracing_context
tc = ctx or get_tracing_context()
# You can manually override the environment using context vars.
# Check that first.
# Doing this before checking the run tree lets us
# disable a branch within a trace.
if tc["enabled"] is not None:
return tc["enabled"]
# Next check if we're mid-trace
if get_current_run_tree():
return True
# Finally, check the global environment
var_result = get_env_var("TRACING_V2", default=get_env_var("TRACING", default=""))
return var_result == "true"
def test_tracking_is_disabled() -> bool:
"""Return True if testing is enabled."""
return get_env_var("TEST_TRACKING", default="") == "false"
def xor_args(*arg_groups: Tuple[str, ...]) -> Callable:
"""Validate specified keyword args are mutually exclusive."""
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
"""Validate exactly one arg in each group is not None."""
counts = [
sum(1 for arg in arg_group if kwargs.get(arg) is not None)
for arg_group in arg_groups
]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups]
raise ValueError(
"Exactly one argument in each of the following"
" groups must be defined:"
f" {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
return decorator
def raise_for_status_with_text(
response: Union[requests.Response, httpx.Response],
) -> None:
"""Raise an error with the response text."""
try:
response.raise_for_status()
except requests.HTTPError as e:
raise requests.HTTPError(str(e), response.text) from e # type: ignore[call-arg]
def get_enum_value(enu: Union[enum.Enum, str]) -> str:
"""Get the value of a string enum."""
if isinstance(enu, enum.Enum):
return enu.value
return enu
@functools.lru_cache(maxsize=1)
def log_once(level: int, message: str) -> None:
"""Log a message at the specified level, but only once."""
_LOGGER.log(level, message)
def _get_message_type(message: Mapping[str, Any]) -> str:
if not message:
raise ValueError("Message is empty.")
if "lc" in message:
if "id" not in message:
raise ValueError(
f"Unexpected format for serialized message: {message}"
" Message does not have an id."
)
return message["id"][-1].replace("Message", "").lower()
else:
if "type" not in message:
raise ValueError(
f"Unexpected format for stored message: {message}"
" Message does not have a type."
)
return message["type"]
def _get_message_fields(message: Mapping[str, Any]) -> Mapping[str, Any]:
if not message:
raise ValueError("Message is empty.")
if "lc" in message:
if "kwargs" not in message:
raise ValueError(
f"Unexpected format for serialized message: {message}"
" Message does not have kwargs."
)
return message["kwargs"]
else:
if "data" not in message:
raise ValueError(
f"Unexpected format for stored message: {message}"
" Message does not have data."
)
return message["data"]
def _convert_message(message: Mapping[str, Any]) -> Dict[str, Any]:
"""Extract message from a message object."""
message_type = _get_message_type(message)
message_data = _get_message_fields(message)
return {"type": message_type, "data": message_data}
def get_messages_from_inputs(inputs: Mapping[str, Any]) -> List[Dict[str, Any]]:
"""Extract messages from the given inputs dictionary.
Args:
inputs (Mapping[str, Any]): The inputs dictionary.
Returns:
List[Dict[str, Any]]: A list of dictionaries representing
the extracted messages.
Raises:
ValueError: If no message(s) are found in the inputs dictionary.
"""
if "messages" in inputs:
return [_convert_message(message) for message in inputs["messages"]]
if "message" in inputs:
return [_convert_message(inputs["message"])]
raise ValueError(f"Could not find message(s) in run with inputs {inputs}.")
def get_message_generation_from_outputs(outputs: Mapping[str, Any]) -> Dict[str, Any]:
"""Retrieve the message generation from the given outputs.
Args:
outputs (Mapping[str, Any]): The outputs dictionary.
Returns:
Dict[str, Any]: The message generation.
Raises:
ValueError: If no generations are found or if multiple generations are present.
"""
if "generations" not in outputs:
raise ValueError(f"No generations found in in run with output: {outputs}.")
generations = outputs["generations"]
if len(generations) != 1:
raise ValueError(
"Chat examples expect exactly one generation."
f" Found {len(generations)} generations: {generations}."
)
first_generation = generations[0]
if "message" not in first_generation:
raise ValueError(
f"Unexpected format for generation: {first_generation}."
" Generation does not have a message."
)
return _convert_message(first_generation["message"])
def get_prompt_from_inputs(inputs: Mapping[str, Any]) -> str:
"""Retrieve the prompt from the given inputs.
Args:
inputs (Mapping[str, Any]): The inputs dictionary.
Returns:
str: The prompt.
Raises:
ValueError: If the prompt is not found or if multiple prompts are present.
"""
if "prompt" in inputs:
return inputs["prompt"]
if "prompts" in inputs:
prompts = inputs["prompts"]
if len(prompts) == 1:
return prompts[0]
raise ValueError(
f"Multiple prompts in run with inputs {inputs}."
" Please create example manually."
)
raise ValueError(f"Could not find prompt in run with inputs {inputs}.")
def get_llm_generation_from_outputs(outputs: Mapping[str, Any]) -> str:
"""Get the LLM generation from the outputs."""
if "generations" not in outputs:
raise ValueError(f"No generations found in in run with output: {outputs}.")
generations = outputs["generations"]
if len(generations) != 1:
raise ValueError(f"Multiple generations in run: {generations}")
first_generation = generations[0]
if "text" not in first_generation:
raise ValueError(f"No text in generation: {first_generation}")
return first_generation["text"]
@functools.lru_cache(maxsize=1)
def get_docker_compose_command() -> List[str]:
"""Get the correct docker compose command for this system."""
try:
subprocess.check_call(
["docker", "compose", "--version"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return ["docker", "compose"]
except (subprocess.CalledProcessError, FileNotFoundError):
try:
subprocess.check_call(
["docker-compose", "--version"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
return ["docker-compose"]
except (subprocess.CalledProcessError, FileNotFoundError):
raise ValueError(
"Neither 'docker compose' nor 'docker-compose'"
" commands are available. Please install the Docker"
" server following the instructions for your operating"
" system at https://docs.docker.com/engine/install/"
)
def convert_langchain_message(message: ls_schemas.BaseMessageLike) -> dict:
"""Convert a LangChain message to an example."""
converted: Dict[str, Any] = {
"type": message.type,
"data": {"content": message.content},
}
# Check for presence of keys in additional_kwargs
if message.additional_kwargs and len(message.additional_kwargs) > 0:
converted["data"]["additional_kwargs"] = {**message.additional_kwargs}
return converted
def is_base_message_like(obj: object) -> bool:
"""Check if the given object is similar to BaseMessage.
Args:
obj (object): The object to check.
Returns:
bool: True if the object is similar to BaseMessage, False otherwise.
"""
return all(
[
isinstance(getattr(obj, "content", None), str),
isinstance(getattr(obj, "additional_kwargs", None), dict),
hasattr(obj, "type") and isinstance(getattr(obj, "type"), str),
]
)
@functools.lru_cache(maxsize=100)
def get_env_var(
name: str,
default: Optional[str] = None,
*,
namespaces: Tuple = ("LANGSMITH", "LANGCHAIN"),
) -> Optional[str]:
"""Retrieve an environment variable from a list of namespaces.
Args:
name (str): The name of the environment variable.
default (Optional[str], optional): The default value to return if the
environment variable is not found. Defaults to None.
namespaces (Tuple, optional): A tuple of namespaces to search for the
environment variable. Defaults to ("LANGSMITH", "LANGCHAINs").
Returns:
Optional[str]: The value of the environment variable if found,
otherwise the default value.
"""
names = [f"{namespace}_{name}" for namespace in namespaces]
for name in names:
value = os.environ.get(name)
if value is not None:
return value
return default
@functools.lru_cache(maxsize=1)
def get_tracer_project(return_default_value=True) -> Optional[str]:
"""Get the project name for a LangSmith tracer."""
return os.environ.get(
# Hosted LangServe projects get precedence over all other defaults.
# This is to make sure that we always use the associated project
# for a hosted langserve deployment even if the customer sets some
# other project name in their environment.
"HOSTED_LANGSERVE_PROJECT_NAME",
get_env_var(
"PROJECT",
# This is the legacy name for a LANGCHAIN_PROJECT, so it
# has lower precedence than LANGCHAIN_PROJECT
default=get_env_var(
"SESSION", default="default" if return_default_value else None
),
),
)
class FilterPoolFullWarning(logging.Filter):
"""Filter urrllib3 warnings logged when the connection pool isn't reused."""
def __init__(self, name: str = "", host: str = "") -> None:
"""Initialize the FilterPoolFullWarning filter.
Args:
name (str, optional): The name of the filter. Defaults to "".
host (str, optional): The host to filter. Defaults to "".
"""
super().__init__(name)
self._host = host
def filter(self, record) -> bool:
"""urllib3.connectionpool:Connection pool is full, discarding connection: ..."""
msg = record.getMessage()
if "Connection pool is full, discarding connection" not in msg:
return True
return self._host not in msg
class FilterLangSmithRetry(logging.Filter):
"""Filter for retries from this lib."""
def filter(self, record) -> bool:
"""Filter retries from this library."""
# We re-raise/log manually.
msg = record.getMessage()
return "LangSmithRetry" not in msg
class LangSmithRetry(Retry):
"""Wrapper to filter logs with this name."""
_FILTER_LOCK = threading.RLock()
@contextlib.contextmanager
def filter_logs(
logger: logging.Logger, filters: Sequence[logging.Filter]
) -> Generator[None, None, None]:
"""Temporarily adds specified filters to a logger.
Parameters:
- logger: The logger to which the filters will be added.
- filters: A sequence of logging.Filter objects to be temporarily added
to the logger.
"""
with _FILTER_LOCK:
for filter in filters:
logger.addFilter(filter)
# Not actually perfectly thread-safe, but it's only log filters
try:
yield
finally:
with _FILTER_LOCK:
for filter in filters:
try:
logger.removeFilter(filter)
except BaseException:
_LOGGER.warning("Failed to remove filter")
def get_cache_dir(cache: Optional[str]) -> Optional[str]:
"""Get the testing cache directory.
Args:
cache (Optional[str]): The cache path.
Returns:
Optional[str]: The cache path if provided, otherwise the value
from the LANGSMITH_TEST_CACHE environment variable.
"""
if cache is not None:
return cache
return get_env_var("TEST_CACHE", default=None)
@contextlib.contextmanager
def with_cache(
path: Union[str, pathlib.Path], ignore_hosts: Optional[Sequence[str]] = None
) -> Generator[None, None, None]:
"""Use a cache for requests."""
try:
import vcr # type: ignore[import-untyped]
except ImportError:
raise ImportError(
"vcrpy is required to use caching. Install with:"
'pip install -U "langsmith[vcr]"'
)
# Fix concurrency issue in vcrpy's patching
from langsmith._internal import _patch as patch_urllib3
patch_urllib3.patch_urllib3()
def _filter_request_headers(request: Any) -> Any:
if ignore_hosts and any(request.url.startswith(host) for host in ignore_hosts):
return None
request.headers = {}
return request
cache_dir, cache_file = os.path.split(path)
ls_vcr = vcr.VCR(
serializer=(
"yaml"
if cache_file.endswith(".yaml") or cache_file.endswith(".yml")
else "json"
),
cassette_library_dir=cache_dir,
# Replay previous requests, record new ones
# TODO: Support other modes
record_mode="new_episodes",
match_on=["uri", "method", "path", "body"],
filter_headers=["authorization", "Set-Cookie"],
before_record_request=_filter_request_headers,
)
with ls_vcr.use_cassette(cache_file):
yield
@contextlib.contextmanager
def with_optional_cache(
path: Optional[Union[str, pathlib.Path]],
ignore_hosts: Optional[Sequence[str]] = None,
) -> Generator[None, None, None]:
"""Use a cache for requests."""
if path is not None:
with with_cache(path, ignore_hosts):
yield
else:
yield
def _format_exc() -> str:
# Used internally to format exceptions without cluttering the traceback
tb_lines = traceback.format_exception(*sys.exc_info())
filtered_lines = [line for line in tb_lines if "langsmith/" not in line]
return "".join(filtered_lines)
T = TypeVar("T")
def _middle_copy(
val: T, memo: Dict[int, Any], max_depth: int = 4, _depth: int = 0
) -> T:
cls = type(val)
copier = getattr(cls, "__deepcopy__", None)
if copier is not None:
try:
return copier(memo)
except BaseException:
pass
if _depth >= max_depth:
return val
if isinstance(val, dict):
return { # type: ignore[return-value]
_middle_copy(k, memo, max_depth, _depth + 1): _middle_copy(
v, memo, max_depth, _depth + 1
)
for k, v in val.items()
}
if isinstance(val, list):
return [_middle_copy(item, memo, max_depth, _depth + 1) for item in val] # type: ignore[return-value]
if isinstance(val, tuple):
return tuple(_middle_copy(item, memo, max_depth, _depth + 1) for item in val) # type: ignore[return-value]
if isinstance(val, set):
return {_middle_copy(item, memo, max_depth, _depth + 1) for item in val} # type: ignore[return-value]
return val
def deepish_copy(val: T) -> T:
"""Deep copy a value with a compromise for uncopyable objects.
Args:
val: The value to be deep copied.
Returns:
The deep copied value.
"""
memo: Dict[int, Any] = {}
try:
return copy.deepcopy(val, memo)
except BaseException as e:
# Generators, locks, etc. cannot be copied
# and raise a TypeError (mentioning pickling, since the dunder methods)
# are re-used for copying. We'll try to do a compromise and copy
# what we can
_LOGGER.debug("Failed to deepcopy input: %s", repr(e))
return _middle_copy(val, memo)
def is_version_greater_or_equal(current_version: str, target_version: str) -> bool:
"""Check if the current version is greater or equal to the target version."""
from packaging import version
current = version.parse(current_version)
target = version.parse(target_version)
return current >= target
def parse_prompt_identifier(identifier: str) -> Tuple[str, str, str]:
"""Parse a string in the format of owner/name:hash, name:hash, owner/name, or name.
Args:
identifier (str): The prompt identifier to parse.
Returns:
Tuple[str, str, str]: A tuple containing (owner, name, hash).
Raises:
ValueError: If the identifier doesn't match the expected formats.
"""
if (
not identifier
or identifier.count("/") > 1
or identifier.startswith("/")
or identifier.endswith("/")
):
raise ValueError(f"Invalid identifier format: {identifier}")
parts = identifier.split(":", 1)
owner_name = parts[0]
commit = parts[1] if len(parts) > 1 else "latest"
if "/" in owner_name:
owner, name = owner_name.split("/", 1)
if not owner or not name:
raise ValueError(f"Invalid identifier format: {identifier}")
return owner, name, commit
else:
if not owner_name:
raise ValueError(f"Invalid identifier format: {identifier}")
return "-", owner_name, commit
P = ParamSpec("P")
class ContextThreadPoolExecutor(ThreadPoolExecutor):
"""ThreadPoolExecutor that copies the context to the child thread."""
def submit( # type: ignore[override]
self,
func: Callable[P, T],
*args: P.args,
**kwargs: P.kwargs,
) -> Future[T]:
"""Submit a function to the executor.
Args:
func (Callable[..., T]): The function to submit.
*args (Any): The positional arguments to the function.
**kwargs (Any): The keyword arguments to the function.
Returns:
Future[T]: The future for the function.
"""
return super().submit(
cast(
Callable[..., T],
functools.partial(
contextvars.copy_context().run, func, *args, **kwargs
),
)
)
def map(
self,
fn: Callable[..., T],
*iterables: Iterable[Any],
timeout: Optional[float] = None,
chunksize: int = 1,
) -> Iterator[T]:
"""Return an iterator equivalent to stdlib map.
Each function will receive its own copy of the context from the parent thread.
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: The size of the chunks the iterable will be broken into
before being passed to a child process. This argument is only
used by ProcessPoolExecutor; it is ignored by
ThreadPoolExecutor.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
contexts = [contextvars.copy_context() for _ in range(len(iterables[0]))] # type: ignore[arg-type]
def _wrapped_fn(*args: Any) -> T:
return contexts.pop().run(fn, *args)
return super().map(
_wrapped_fn,
*iterables,
timeout=timeout,
chunksize=chunksize,
)
def get_api_url(api_url: Optional[str]) -> str:
"""Get the LangSmith API URL from the environment or the given value."""
_api_url = api_url or cast(
str,
get_env_var(
"ENDPOINT",
default="https://api.smith.langchain.com",
),
)
if not _api_url.strip():
raise LangSmithUserError("LangSmith API URL cannot be empty")
return _api_url.strip().strip('"').strip("'").rstrip("/")
def get_api_key(api_key: Optional[str]) -> Optional[str]:
"""Get the API key from the environment or the given value."""
api_key_ = api_key if api_key is not None else get_env_var("API_KEY", default=None)
if api_key_ is None or not api_key_.strip():
return None
return api_key_.strip().strip('"').strip("'")
def _is_localhost(url: str) -> bool:
"""Check if the URL is localhost.
Parameters
----------
url : str
The URL to check.
Returns:
-------
bool
True if the URL is localhost, False otherwise.
"""
try:
netloc = urllib_parse.urlsplit(url).netloc.split(":")[0]
ip = socket.gethostbyname(netloc)
return ip == "127.0.0.1" or ip.startswith("0.0.0.0") or ip.startswith("::")
except socket.gaierror:
return False
@functools.lru_cache(maxsize=2)
def get_host_url(web_url: Optional[str], api_url: str):
"""Get the host URL based on the web URL or API URL."""
if web_url:
return web_url
parsed_url = urllib_parse.urlparse(api_url)
if _is_localhost(api_url):
link = "http://localhost"
elif str(parsed_url.path).endswith("/api"):
new_path = str(parsed_url.path).rsplit("/api", 1)[0]
link = urllib_parse.urlunparse(parsed_url._replace(path=new_path))
elif str(parsed_url.netloc).startswith("eu."):
link = "https://eu.smith.langchain.com"
elif str(parsed_url.netloc).startswith("dev."):
link = "https://dev.smith.langchain.com"
else:
link = "https://smith.langchain.com"
return link
def _get_function_name(fn: Callable, depth: int = 0) -> str:
if depth > 2 or not callable(fn):
return str(fn)
if hasattr(fn, "__name__"):
return fn.__name__
if isinstance(fn, functools.partial):
return _get_function_name(fn.func, depth + 1)
if hasattr(fn, "__call__"):
if hasattr(fn, "__class__") and hasattr(fn.__class__, "__name__"):
return fn.__class__.__name__
return _get_function_name(fn.__call__, depth + 1)
return str(fn)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/run_trees.py | """Schemas for the LangSmith API."""
from __future__ import annotations
import json
import logging
import sys
from datetime import datetime, timezone
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union, cast
from uuid import UUID, uuid4
try:
from pydantic.v1 import Field, root_validator # type: ignore[import]
except ImportError:
from pydantic import ( # type: ignore[assignment, no-redef]
Field,
root_validator,
)
import threading
import urllib.parse
from langsmith import schemas as ls_schemas
from langsmith import utils
from langsmith.client import ID_TYPE, RUN_TYPE_T, Client, _dumps_json, _ensure_uuid
logger = logging.getLogger(__name__)
LANGSMITH_PREFIX = "langsmith-"
LANGSMITH_DOTTED_ORDER = sys.intern(f"{LANGSMITH_PREFIX}trace")
LANGSMITH_DOTTED_ORDER_BYTES = LANGSMITH_DOTTED_ORDER.encode("utf-8")
LANGSMITH_METADATA = sys.intern(f"{LANGSMITH_PREFIX}metadata")
LANGSMITH_TAGS = sys.intern(f"{LANGSMITH_PREFIX}tags")
LANGSMITH_PROJECT = sys.intern(f"{LANGSMITH_PREFIX}project")
_CLIENT: Optional[Client] = None
_LOCK = threading.Lock() # Keeping around for a while for backwards compat
# Note, this is called directly by langchain. Do not remove.
def get_cached_client(**init_kwargs: Any) -> Client:
global _CLIENT
if _CLIENT is None:
if _CLIENT is None:
_CLIENT = Client(**init_kwargs)
return _CLIENT
class RunTree(ls_schemas.RunBase):
"""Run Schema with back-references for posting runs."""
name: str
id: UUID = Field(default_factory=uuid4)
run_type: str = Field(default="chain")
start_time: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
parent_run: Optional[RunTree] = Field(default=None, exclude=True)
child_runs: List[RunTree] = Field(
default_factory=list,
exclude={"__all__": {"parent_run_id"}},
)
session_name: str = Field(
default_factory=lambda: utils.get_tracer_project() or "default",
alias="project_name",
)
session_id: Optional[UUID] = Field(default=None, alias="project_id")
extra: Dict = Field(default_factory=dict)
tags: Optional[List[str]] = Field(default_factory=list)
events: List[Dict] = Field(default_factory=list)
"""List of events associated with the run, like
start and end events."""
ls_client: Optional[Any] = Field(default=None, exclude=True)
dotted_order: str = Field(
default="", description="The order of the run in the tree."
)
trace_id: UUID = Field(default="", description="The trace id of the run.") # type: ignore
class Config:
"""Pydantic model configuration."""
arbitrary_types_allowed = True
allow_population_by_field_name = True
extra = "ignore"
@root_validator(pre=True)
def infer_defaults(cls, values: dict) -> dict:
"""Assign name to the run."""
if values.get("name") is None and values.get("serialized") is not None:
if "name" in values["serialized"]:
values["name"] = values["serialized"]["name"]
elif "id" in values["serialized"]:
values["name"] = values["serialized"]["id"][-1]
if values.get("name") is None:
values["name"] = "Unnamed"
if "client" in values: # Handle user-constructed clients
values["ls_client"] = values.pop("client")
elif "_client" in values:
values["ls_client"] = values.pop("_client")
if not values.get("ls_client"):
values["ls_client"] = None
if values.get("parent_run") is not None:
values["parent_run_id"] = values["parent_run"].id
if "id" not in values:
values["id"] = uuid4()
if "trace_id" not in values:
if "parent_run" in values:
values["trace_id"] = values["parent_run"].trace_id
else:
values["trace_id"] = values["id"]
cast(dict, values.setdefault("extra", {}))
if values.get("events") is None:
values["events"] = []
if values.get("tags") is None:
values["tags"] = []
if values.get("outputs") is None:
values["outputs"] = {}
if values.get("attachments") is None:
values["attachments"] = {}
return values
@root_validator(pre=False)
def ensure_dotted_order(cls, values: dict) -> dict:
"""Ensure the dotted order of the run."""
current_dotted_order = values.get("dotted_order")
if current_dotted_order and current_dotted_order.strip():
return values
current_dotted_order = _create_current_dotted_order(
values["start_time"], values["id"]
)
if values["parent_run"]:
values["dotted_order"] = (
values["parent_run"].dotted_order + "." + current_dotted_order
)
else:
values["dotted_order"] = current_dotted_order
return values
@property
def client(self) -> Client:
"""Return the client."""
# Lazily load the client
# If you never use this for API calls, it will never be loaded
if self.ls_client is None:
self.ls_client = get_cached_client()
return self.ls_client
@property
def _client(self) -> Optional[Client]:
# For backwards compat
return self.ls_client
def __setattr__(self, name, value):
"""Set the _client specially."""
# For backwards compat
if name == "_client":
self.ls_client = value
else:
return super().__setattr__(name, value)
def add_tags(self, tags: Union[Sequence[str], str]) -> None:
"""Add tags to the run."""
if isinstance(tags, str):
tags = [tags]
if self.tags is None:
self.tags = []
self.tags.extend(tags)
def add_metadata(self, metadata: Dict[str, Any]) -> None:
"""Add metadata to the run."""
if self.extra is None:
self.extra = {}
metadata_: dict = cast(dict, self.extra).setdefault("metadata", {})
metadata_.update(metadata)
def add_outputs(self, outputs: Dict[str, Any]) -> None:
"""Upsert the given outputs into the run.
Args:
outputs (Dict[str, Any]): A dictionary containing the outputs to be added.
Returns:
None
"""
if self.outputs is None:
self.outputs = {}
self.outputs.update(outputs)
def add_event(
self,
events: Union[
ls_schemas.RunEvent,
Sequence[ls_schemas.RunEvent],
Sequence[dict],
dict,
str,
],
) -> None:
"""Add an event to the list of events.
Args:
events (Union[ls_schemas.RunEvent, Sequence[ls_schemas.RunEvent],
Sequence[dict], dict, str]):
The event(s) to be added. It can be a single event, a sequence
of events, a sequence of dictionaries, a dictionary, or a string.
Returns:
None
"""
if self.events is None:
self.events = []
if isinstance(events, dict):
self.events.append(events) # type: ignore[arg-type]
elif isinstance(events, str):
self.events.append(
{
"name": "event",
"time": datetime.now(timezone.utc).isoformat(),
"message": events,
}
)
else:
self.events.extend(events) # type: ignore[arg-type]
def end(
self,
*,
outputs: Optional[Dict] = None,
error: Optional[str] = None,
end_time: Optional[datetime] = None,
events: Optional[Sequence[ls_schemas.RunEvent]] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
"""Set the end time of the run and all child runs."""
self.end_time = end_time or datetime.now(timezone.utc)
if outputs is not None:
if not self.outputs:
self.outputs = outputs
else:
self.outputs.update(outputs)
if error is not None:
self.error = error
if events is not None:
self.add_event(events)
if metadata is not None:
self.add_metadata(metadata)
def create_child(
self,
name: str,
run_type: RUN_TYPE_T = "chain",
*,
run_id: Optional[ID_TYPE] = None,
serialized: Optional[Dict] = None,
inputs: Optional[Dict] = None,
outputs: Optional[Dict] = None,
error: Optional[str] = None,
reference_example_id: Optional[UUID] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
tags: Optional[List[str]] = None,
extra: Optional[Dict] = None,
attachments: Optional[ls_schemas.Attachments] = None,
) -> RunTree:
"""Add a child run to the run tree."""
serialized_ = serialized or {"name": name}
run = RunTree(
name=name,
id=_ensure_uuid(run_id),
serialized=serialized_,
inputs=inputs or {},
outputs=outputs or {},
error=error,
run_type=run_type,
reference_example_id=reference_example_id,
start_time=start_time or datetime.now(timezone.utc),
end_time=end_time,
extra=extra or {},
parent_run=self,
project_name=self.session_name,
ls_client=self.ls_client,
tags=tags,
attachments=attachments or {},
)
self.child_runs.append(run)
return run
def _get_dicts_safe(self):
# Things like generators cannot be copied
self_dict = self.dict(
exclude={"child_runs", "inputs", "outputs"}, exclude_none=True
)
if self.inputs is not None:
# shallow copy. deep copying will occur in the client
self_dict["inputs"] = self.inputs.copy()
if self.outputs is not None:
# shallow copy; deep copying will occur in the client
self_dict["outputs"] = self.outputs.copy()
return self_dict
def post(self, exclude_child_runs: bool = True) -> None:
"""Post the run tree to the API asynchronously."""
kwargs = self._get_dicts_safe()
self.client.create_run(**kwargs)
if attachments := kwargs.get("attachments"):
keys = [str(name) for name in attachments]
self.events.append(
{
"name": "uploaded_attachment",
"time": datetime.now(timezone.utc).isoformat(),
"message": set(keys),
}
)
if not exclude_child_runs:
for child_run in self.child_runs:
child_run.post(exclude_child_runs=False)
def patch(self) -> None:
"""Patch the run tree to the API in a background thread."""
if not self.end_time:
self.end()
attachments = self.attachments
try:
# Avoid loading the same attachment twice
if attachments:
uploaded = next(
(
ev
for ev in self.events
if ev.get("name") == "uploaded_attachment"
),
None,
)
if uploaded:
attachments = {
a: v
for a, v in attachments.items()
if a not in uploaded["message"]
}
except Exception as e:
logger.warning(f"Error filtering attachments to upload: {e}")
self.client.update_run(
name=self.name,
run_id=self.id,
outputs=self.outputs.copy() if self.outputs else None,
error=self.error,
parent_run_id=self.parent_run_id,
reference_example_id=self.reference_example_id,
end_time=self.end_time,
dotted_order=self.dotted_order,
trace_id=self.trace_id,
events=self.events,
tags=self.tags,
extra=self.extra,
attachments=attachments,
)
def wait(self) -> None:
"""Wait for all _futures to complete."""
pass
def get_url(self) -> str:
"""Return the URL of the run."""
return self.client.get_run_url(run=self)
@classmethod
def from_dotted_order(
cls,
dotted_order: str,
**kwargs: Any,
) -> RunTree:
"""Create a new 'child' span from the provided dotted order.
Returns:
RunTree: The new span.
"""
headers = {
LANGSMITH_DOTTED_ORDER: dotted_order,
}
return cast(RunTree, cls.from_headers(headers, **kwargs)) # type: ignore[arg-type]
@classmethod
def from_runnable_config(
cls,
config: Optional[dict],
**kwargs: Any,
) -> Optional[RunTree]:
"""Create a new 'child' span from the provided runnable config.
Requires langchain to be installed.
Returns:
Optional[RunTree]: The new span or None if
no parent span information is found.
"""
try:
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
CallbackManager,
)
from langchain_core.runnables import RunnableConfig, ensure_config
from langchain_core.tracers.langchain import LangChainTracer
except ImportError as e:
raise ImportError(
"RunTree.from_runnable_config requires langchain-core to be installed. "
"You can install it with `pip install langchain-core`."
) from e
if config is None:
config_ = ensure_config(
cast(RunnableConfig, config) if isinstance(config, dict) else None
)
else:
config_ = cast(RunnableConfig, config)
if (
(cb := config_.get("callbacks"))
and isinstance(cb, (CallbackManager, AsyncCallbackManager))
and cb.parent_run_id
and (
tracer := next(
(t for t in cb.handlers if isinstance(t, LangChainTracer)),
None,
)
)
):
if (run := tracer.run_map.get(str(cb.parent_run_id))) and run.dotted_order:
dotted_order = run.dotted_order
kwargs["run_type"] = run.run_type
kwargs["inputs"] = run.inputs
kwargs["outputs"] = run.outputs
kwargs["start_time"] = run.start_time
kwargs["end_time"] = run.end_time
kwargs["tags"] = sorted(set(run.tags or [] + kwargs.get("tags", [])))
kwargs["name"] = run.name
extra_ = kwargs.setdefault("extra", {})
metadata_ = extra_.setdefault("metadata", {})
metadata_.update(run.metadata)
elif hasattr(tracer, "order_map") and cb.parent_run_id in tracer.order_map:
dotted_order = tracer.order_map[cb.parent_run_id][1]
else:
return None
kwargs["client"] = tracer.client
kwargs["project_name"] = tracer.project_name
return RunTree.from_dotted_order(dotted_order, **kwargs)
return None
@classmethod
def from_headers(
cls, headers: Mapping[Union[str, bytes], Union[str, bytes]], **kwargs: Any
) -> Optional[RunTree]:
"""Create a new 'parent' span from the provided headers.
Extracts parent span information from the headers and creates a new span.
Metadata and tags are extracted from the baggage header.
The dotted order and trace id are extracted from the trace header.
Returns:
Optional[RunTree]: The new span or None if
no parent span information is found.
"""
init_args = kwargs.copy()
langsmith_trace = cast(Optional[str], headers.get(LANGSMITH_DOTTED_ORDER))
if not langsmith_trace:
langsmith_trace_bytes = cast(
Optional[bytes], headers.get(LANGSMITH_DOTTED_ORDER_BYTES)
)
if not langsmith_trace_bytes:
return # type: ignore[return-value]
langsmith_trace = langsmith_trace_bytes.decode("utf-8")
parent_dotted_order = langsmith_trace.strip()
parsed_dotted_order = _parse_dotted_order(parent_dotted_order)
trace_id = parsed_dotted_order[0][1]
init_args["trace_id"] = trace_id
init_args["id"] = parsed_dotted_order[-1][1]
init_args["dotted_order"] = parent_dotted_order
if len(parsed_dotted_order) >= 2:
# Has a parent
init_args["parent_run_id"] = parsed_dotted_order[-2][1]
# All placeholders. We assume the source process
# handles the life-cycle of the run.
init_args["start_time"] = init_args.get("start_time") or datetime.now(
timezone.utc
)
init_args["run_type"] = init_args.get("run_type") or "chain"
init_args["name"] = init_args.get("name") or "parent"
baggage = _Baggage.from_headers(headers)
if baggage.metadata or baggage.tags:
init_args["extra"] = init_args.setdefault("extra", {})
init_args["extra"]["metadata"] = init_args["extra"].setdefault(
"metadata", {}
)
metadata = {**baggage.metadata, **init_args["extra"]["metadata"]}
init_args["extra"]["metadata"] = metadata
tags = sorted(set(baggage.tags + init_args.get("tags", [])))
init_args["tags"] = tags
if baggage.project_name:
init_args["project_name"] = baggage.project_name
return RunTree(**init_args)
def to_headers(self) -> Dict[str, str]:
"""Return the RunTree as a dictionary of headers."""
headers = {}
if self.trace_id:
headers[f"{LANGSMITH_DOTTED_ORDER}"] = self.dotted_order
baggage = _Baggage(
metadata=self.extra.get("metadata", {}),
tags=self.tags,
project_name=self.session_name,
)
headers["baggage"] = baggage.to_header()
return headers
def __repr__(self):
"""Return a string representation of the RunTree object."""
return (
f"RunTree(id={self.id}, name='{self.name}', "
f"run_type='{self.run_type}', dotted_order='{self.dotted_order}')"
)
class _Baggage:
"""Baggage header information."""
def __init__(
self,
metadata: Optional[Dict[str, str]] = None,
tags: Optional[List[str]] = None,
project_name: Optional[str] = None,
):
"""Initialize the Baggage object."""
self.metadata = metadata or {}
self.tags = tags or []
self.project_name = project_name
@classmethod
def from_header(cls, header_value: Optional[str]) -> _Baggage:
"""Create a Baggage object from the given header value."""
if not header_value:
return cls()
metadata = {}
tags = []
project_name = None
try:
for item in header_value.split(","):
key, value = item.split("=", 1)
if key == LANGSMITH_METADATA:
metadata = json.loads(urllib.parse.unquote(value))
elif key == LANGSMITH_TAGS:
tags = urllib.parse.unquote(value).split(",")
elif key == LANGSMITH_PROJECT:
project_name = urllib.parse.unquote(value)
except Exception as e:
logger.warning(f"Error parsing baggage header: {e}")
return cls(metadata=metadata, tags=tags, project_name=project_name)
@classmethod
def from_headers(cls, headers: Mapping[Union[str, bytes], Any]) -> _Baggage:
if "baggage" in headers:
return cls.from_header(headers["baggage"])
elif b"baggage" in headers:
return cls.from_header(cast(bytes, headers[b"baggage"]).decode("utf-8"))
else:
return cls.from_header(None)
def to_header(self) -> str:
"""Return the Baggage object as a header value."""
items = []
if self.metadata:
serialized_metadata = _dumps_json(self.metadata)
items.append(
f"{LANGSMITH_PREFIX}metadata={urllib.parse.quote(serialized_metadata)}"
)
if self.tags:
serialized_tags = ",".join(self.tags)
items.append(
f"{LANGSMITH_PREFIX}tags={urllib.parse.quote(serialized_tags)}"
)
if self.project_name:
items.append(
f"{LANGSMITH_PREFIX}project={urllib.parse.quote(self.project_name)}"
)
return ",".join(items)
def _parse_dotted_order(dotted_order: str) -> List[Tuple[datetime, UUID]]:
"""Parse the dotted order string."""
parts = dotted_order.split(".")
return [
(datetime.strptime(part[:-36], "%Y%m%dT%H%M%S%fZ"), UUID(part[-36:]))
for part in parts
]
def _create_current_dotted_order(
start_time: Optional[datetime], run_id: Optional[UUID]
) -> str:
"""Create the current dotted order."""
st = start_time or datetime.now(timezone.utc)
id_ = run_id or uuid4()
return st.strftime("%Y%m%dT%H%M%S%fZ") + str(id_)
__all__ = ["RunTree", "RunTree"]
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/__init__.py | """LangSmith Client."""
from importlib import metadata
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langsmith._expect import expect
from langsmith._testing import test, unit
from langsmith.async_client import AsyncClient
from langsmith.client import Client
from langsmith.evaluation import aevaluate, evaluate
from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator
from langsmith.run_helpers import (
get_current_run_tree,
get_tracing_context,
trace,
traceable,
tracing_context,
)
from langsmith.run_trees import RunTree
from langsmith.utils import (
ContextThreadPoolExecutor,
)
# Avoid calling into importlib on every call to __version__
version = ""
try:
version = metadata.version(__package__)
except metadata.PackageNotFoundError:
pass
def __getattr__(name: str) -> Any:
if name == "__version__":
return version
elif name == "Client":
from langsmith.client import Client
return Client
elif name == "AsyncClient":
from langsmith.async_client import AsyncClient
return AsyncClient
elif name == "RunTree":
from langsmith.run_trees import RunTree
return RunTree
elif name == "EvaluationResult":
from langsmith.evaluation.evaluator import EvaluationResult
return EvaluationResult
elif name == "RunEvaluator":
from langsmith.evaluation.evaluator import RunEvaluator
return RunEvaluator
elif name == "trace":
from langsmith.run_helpers import trace
return trace
elif name == "traceable":
from langsmith.run_helpers import traceable
return traceable
elif name == "test":
from langsmith._testing import test
return test
elif name == "expect":
from langsmith._expect import expect
return expect
elif name == "evaluate":
from langsmith.evaluation import evaluate
return evaluate
elif name == "evaluate_existing":
from langsmith.evaluation import evaluate_existing
return evaluate_existing
elif name == "aevaluate":
from langsmith.evaluation import aevaluate
return aevaluate
elif name == "aevaluate_existing":
from langsmith.evaluation import aevaluate_existing
return aevaluate_existing
elif name == "tracing_context":
from langsmith.run_helpers import tracing_context
return tracing_context
elif name == "get_tracing_context":
from langsmith.run_helpers import get_tracing_context
return get_tracing_context
elif name == "get_current_run_tree":
from langsmith.run_helpers import get_current_run_tree
return get_current_run_tree
elif name == "unit":
from langsmith._testing import unit
return unit
elif name == "ContextThreadPoolExecutor":
from langsmith.utils import (
ContextThreadPoolExecutor,
)
return ContextThreadPoolExecutor
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
__all__ = [
"Client",
"RunTree",
"__version__",
"EvaluationResult",
"RunEvaluator",
"anonymizer",
"traceable",
"trace",
"unit",
"test",
"expect",
"evaluate",
"aevaluate",
"tracing_context",
"get_tracing_context",
"get_current_run_tree",
"ContextThreadPoolExecutor",
"AsyncClient",
]
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/langsmith/_testing.py | from __future__ import annotations
import atexit
import datetime
import functools
import inspect
import logging
import threading
import uuid
import warnings
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Optional, Sequence, Tuple, TypeVar, overload
from typing_extensions import TypedDict
from langsmith import client as ls_client
from langsmith import env as ls_env
from langsmith import run_helpers as rh
from langsmith import run_trees as rt
from langsmith import schemas as ls_schemas
from langsmith import utils as ls_utils
from langsmith._internal import _orjson
try:
import pytest # type: ignore
SkipException = pytest.skip.Exception
except ImportError:
class SkipException(Exception): # type: ignore[no-redef]
pass
logger = logging.getLogger(__name__)
T = TypeVar("T")
U = TypeVar("U")
@overload
def test(
func: Callable,
) -> Callable: ...
@overload
def test(
*,
id: Optional[uuid.UUID] = None,
output_keys: Optional[Sequence[str]] = None,
client: Optional[ls_client.Client] = None,
test_suite_name: Optional[str] = None,
) -> Callable[[Callable], Callable]: ...
def test(*args: Any, **kwargs: Any) -> Callable:
"""Create a test case in LangSmith.
This decorator is used to mark a function as a test case for LangSmith. It ensures
that the necessary example data is created and associated with the test function.
The decorated function will be executed as a test case, and the results will be
recorded and reported by LangSmith.
Args:
- id (Optional[uuid.UUID]): A unique identifier for the test case. If not
provided, an ID will be generated based on the test function's module
and name.
- output_keys (Optional[Sequence[str]]): A list of keys to be considered as
the output keys for the test case. These keys will be extracted from the
test function's inputs and stored as the expected outputs.
- client (Optional[ls_client.Client]): An instance of the LangSmith client
to be used for communication with the LangSmith service. If not provided,
a default client will be used.
- test_suite_name (Optional[str]): The name of the test suite to which the
test case belongs. If not provided, the test suite name will be determined
based on the environment or the package name.
Returns:
Callable: The decorated test function.
Environment:
- LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to
save time and costs during testing. Recommended to commit the
cache files to your repository for faster CI/CD runs.
Requires the 'langsmith[vcr]' package to be installed.
- LANGSMITH_TEST_TRACKING: Set this variable to the path of a directory
to enable caching of test results. This is useful for re-running tests
without re-executing the code. Requires the 'langsmith[vcr]' package.
Example:
For basic usage, simply decorate a test function with `@test`:
>>> @test
... def test_addition():
... assert 3 + 4 == 7
Any code that is traced (such as those traced using `@traceable`
or `wrap_*` functions) will be traced within the test case for
improved visibility and debugging.
>>> from langsmith import traceable
>>> @traceable
... def generate_numbers():
... return 3, 4
>>> @test
... def test_nested():
... # Traced code will be included in the test case
... a, b = generate_numbers()
... assert a + b == 7
LLM calls are expensive! Cache requests by setting
`LANGSMITH_TEST_CACHE=path/to/cache`. Check in these files to speed up
CI/CD pipelines, so your results only change when your prompt or requested
model changes.
Note that this will require that you install langsmith with the `vcr` extra:
`pip install -U "langsmith[vcr]"`
Caching is faster if you install libyaml. See
https://vcrpy.readthedocs.io/en/latest/installation.html#speed for more details.
>>> # os.environ["LANGSMITH_TEST_CACHE"] = "tests/cassettes"
>>> import openai
>>> from langsmith.wrappers import wrap_openai
>>> oai_client = wrap_openai(openai.Client())
>>> @test
... def test_openai_says_hello():
... # Traced code will be included in the test case
... response = oai_client.chat.completions.create(
... model="gpt-3.5-turbo",
... messages=[
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "Say hello!"},
... ],
... )
... assert "hello" in response.choices[0].message.content.lower()
LLMs are stochastic. Naive assertions are flakey. You can use langsmith's
`expect` to score and make approximate assertions on your results.
>>> from langsmith import expect
>>> @test
... def test_output_semantically_close():
... response = oai_client.chat.completions.create(
... model="gpt-3.5-turbo",
... messages=[
... {"role": "system", "content": "You are a helpful assistant."},
... {"role": "user", "content": "Say hello!"},
... ],
... )
... # The embedding_distance call logs the embedding distance to LangSmith
... expect.embedding_distance(
... prediction=response.choices[0].message.content,
... reference="Hello!",
... # The following optional assertion logs a
... # pass/fail score to LangSmith
... # and raises an AssertionError if the assertion fails.
... ).to_be_less_than(1.0)
... # Compute damerau_levenshtein distance
... expect.edit_distance(
... prediction=response.choices[0].message.content,
... reference="Hello!",
... # And then log a pass/fail score to LangSmith
... ).to_be_less_than(1.0)
The `@test` decorator works natively with pytest fixtures.
The values will populate the "inputs" of the corresponding example in LangSmith.
>>> import pytest
>>> @pytest.fixture
... def some_input():
... return "Some input"
>>>
>>> @test
... def test_with_fixture(some_input: str):
... assert "input" in some_input
>>>
You can still use pytest.parametrize() as usual to run multiple test cases
using the same test function.
>>> @test(output_keys=["expected"])
... @pytest.mark.parametrize(
... "a, b, expected",
... [
... (1, 2, 3),
... (3, 4, 7),
... ],
... )
... def test_addition_with_multiple_inputs(a: int, b: int, expected: int):
... assert a + b == expected
By default, each test case will be assigned a consistent, unique identifier
based on the function name and module. You can also provide a custom identifier
using the `id` argument:
>>> @test(id="1a77e4b5-1d38-4081-b829-b0442cf3f145")
... def test_multiplication():
... assert 3 * 4 == 12
By default, all test test inputs are saved as "inputs" to a dataset.
You can specify the `output_keys` argument to persist those keys
within the dataset's "outputs" fields.
>>> @pytest.fixture
... def expected_output():
... return "input"
>>> @test(output_keys=["expected_output"])
... def test_with_expected_output(some_input: str, expected_output: str):
... assert expected_output in some_input
To run these tests, use the pytest CLI. Or directly run the test functions.
>>> test_output_semantically_close()
>>> test_addition()
>>> test_nested()
>>> test_with_fixture("Some input")
>>> test_with_expected_output("Some input", "Some")
>>> test_multiplication()
>>> test_openai_says_hello()
>>> test_addition_with_multiple_inputs(1, 2, 3)
"""
langtest_extra = _UTExtra(
id=kwargs.pop("id", None),
output_keys=kwargs.pop("output_keys", None),
client=kwargs.pop("client", None),
test_suite_name=kwargs.pop("test_suite_name", None),
cache=ls_utils.get_cache_dir(kwargs.pop("cache", None)),
)
if kwargs:
warnings.warn(f"Unexpected keyword arguments: {kwargs.keys()}")
disable_tracking = ls_utils.test_tracking_is_disabled()
if disable_tracking:
warnings.warn(
"LANGSMITH_TEST_TRACKING is set to 'false'."
" Skipping LangSmith test tracking."
)
def decorator(func: Callable) -> Callable:
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def async_wrapper(*test_args: Any, **test_kwargs: Any):
if disable_tracking:
return await func(*test_args, **test_kwargs)
await _arun_test(
func, *test_args, **test_kwargs, langtest_extra=langtest_extra
)
return async_wrapper
@functools.wraps(func)
def wrapper(*test_args: Any, **test_kwargs: Any):
if disable_tracking:
return func(*test_args, **test_kwargs)
_run_test(func, *test_args, **test_kwargs, langtest_extra=langtest_extra)
return wrapper
if args and callable(args[0]):
return decorator(args[0])
return decorator
## Private functions
def _get_experiment_name() -> str:
# TODO Make more easily configurable
prefix = ls_utils.get_tracer_project(False) or "TestSuiteResult"
name = f"{prefix}:{uuid.uuid4().hex[:8]}"
return name
def _get_test_suite_name(func: Callable) -> str:
test_suite_name = ls_utils.get_env_var("TEST_SUITE")
if test_suite_name:
return test_suite_name
repo_name = ls_env.get_git_info()["repo_name"]
try:
mod = inspect.getmodule(func)
if mod:
return f"{repo_name}.{mod.__name__}"
except BaseException:
logger.debug("Could not determine test suite name from file path.")
raise ValueError("Please set the LANGSMITH_TEST_SUITE environment variable.")
def _get_test_suite(
client: ls_client.Client, test_suite_name: str
) -> ls_schemas.Dataset:
if client.has_dataset(dataset_name=test_suite_name):
return client.read_dataset(dataset_name=test_suite_name)
else:
repo = ls_env.get_git_info().get("remote_url") or ""
description = "Test suite"
if repo:
description += f" for {repo}"
return client.create_dataset(
dataset_name=test_suite_name, description=description
)
def _start_experiment(
client: ls_client.Client,
test_suite: ls_schemas.Dataset,
) -> ls_schemas.TracerSession:
experiment_name = _get_experiment_name()
try:
return client.create_project(
experiment_name,
reference_dataset_id=test_suite.id,
description="Test Suite Results.",
metadata={
"revision_id": ls_env.get_langchain_env_var_metadata().get(
"revision_id"
)
},
)
except ls_utils.LangSmithConflictError:
return client.read_project(project_name=experiment_name)
# Track the number of times a parameter has been used in a test
# This is to ensure that we can uniquely identify each test case
# defined using pytest.mark.parametrize
_param_dict: dict = defaultdict(lambda: defaultdict(int))
def _get_id(func: Callable, inputs: dict, suite_id: uuid.UUID) -> Tuple[uuid.UUID, str]:
global _param_dict
try:
file_path = str(Path(inspect.getfile(func)).relative_to(Path.cwd()))
except ValueError:
# Fall back to module name if file path is not available
file_path = func.__module__
identifier = f"{suite_id}{file_path}::{func.__name__}"
input_keys = tuple(sorted(inputs.keys()))
arg_indices = []
for key in input_keys:
_param_dict[identifier][key] += 1
arg_indices.append(f"{key}{_param_dict[identifier][key]}")
if arg_indices:
identifier += f"[{'-'.join(arg_indices)}]"
return uuid.uuid5(uuid.NAMESPACE_DNS, identifier), identifier[len(str(suite_id)) :]
def _end_tests(
test_suite: _LangSmithTestSuite,
):
git_info = ls_env.get_git_info() or {}
test_suite.client.update_project(
test_suite.experiment_id,
end_time=datetime.datetime.now(datetime.timezone.utc),
metadata={
**git_info,
"dataset_version": test_suite.get_version(),
"revision_id": ls_env.get_langchain_env_var_metadata().get("revision_id"),
},
)
test_suite.wait()
VT = TypeVar("VT", bound=Optional[dict])
def _serde_example_values(values: VT) -> VT:
if values is None:
return values
bts = ls_client._dumps_json(values)
return _orjson.loads(bts)
class _LangSmithTestSuite:
_instances: Optional[dict] = None
_lock = threading.RLock()
def __init__(
self,
client: Optional[ls_client.Client],
experiment: ls_schemas.TracerSession,
dataset: ls_schemas.Dataset,
):
self.client = client or rt.get_cached_client()
self._experiment = experiment
self._dataset = dataset
self._version: Optional[datetime.datetime] = None
self._executor = ls_utils.ContextThreadPoolExecutor(max_workers=1)
atexit.register(_end_tests, self)
@property
def id(self):
return self._dataset.id
@property
def experiment_id(self):
return self._experiment.id
@property
def experiment(self):
return self._experiment
@classmethod
def from_test(
cls,
client: Optional[ls_client.Client],
func: Callable,
test_suite_name: Optional[str] = None,
) -> _LangSmithTestSuite:
client = client or rt.get_cached_client()
test_suite_name = test_suite_name or _get_test_suite_name(func)
with cls._lock:
if not cls._instances:
cls._instances = {}
if test_suite_name not in cls._instances:
test_suite = _get_test_suite(client, test_suite_name)
experiment = _start_experiment(client, test_suite)
cls._instances[test_suite_name] = cls(client, experiment, test_suite)
return cls._instances[test_suite_name]
@property
def name(self):
return self._experiment.name
def update_version(self, version: datetime.datetime) -> None:
with self._lock:
if self._version is None or version > self._version:
self._version = version
def get_version(self) -> Optional[datetime.datetime]:
with self._lock:
return self._version
def submit_result(
self, run_id: uuid.UUID, error: Optional[str] = None, skipped: bool = False
) -> None:
self._executor.submit(self._submit_result, run_id, error, skipped=skipped)
def _submit_result(
self, run_id: uuid.UUID, error: Optional[str] = None, skipped: bool = False
) -> None:
if error:
if skipped:
self.client.create_feedback(
run_id,
key="pass",
# Don't factor into aggregate score
score=None,
comment=f"Skipped: {repr(error)}",
)
else:
self.client.create_feedback(
run_id, key="pass", score=0, comment=f"Error: {repr(error)}"
)
else:
self.client.create_feedback(
run_id,
key="pass",
score=1,
)
def sync_example(
self, example_id: uuid.UUID, inputs: dict, outputs: dict, metadata: dict
) -> None:
self._executor.submit(
self._sync_example, example_id, inputs, outputs, metadata.copy()
)
def _sync_example(
self, example_id: uuid.UUID, inputs: dict, outputs: dict, metadata: dict
) -> None:
inputs_ = _serde_example_values(inputs)
outputs_ = _serde_example_values(outputs)
try:
example = self.client.read_example(example_id=example_id)
if (
inputs_ != example.inputs
or outputs_ != example.outputs
or str(example.dataset_id) != str(self.id)
):
self.client.update_example(
example_id=example.id,
inputs=inputs_,
outputs=outputs_,
metadata=metadata,
dataset_id=self.id,
)
except ls_utils.LangSmithNotFoundError:
example = self.client.create_example(
example_id=example_id,
inputs=inputs_,
outputs=outputs_,
dataset_id=self.id,
metadata=metadata,
created_at=self._experiment.start_time,
)
if example.modified_at:
self.update_version(example.modified_at)
def wait(self):
self._executor.shutdown(wait=True)
class _UTExtra(TypedDict, total=False):
client: Optional[ls_client.Client]
id: Optional[uuid.UUID]
output_keys: Optional[Sequence[str]]
test_suite_name: Optional[str]
cache: Optional[str]
def _get_test_repr(func: Callable, sig: inspect.Signature) -> str:
name = getattr(func, "__name__", None) or ""
description = getattr(func, "__doc__", None) or ""
if description:
description = f" - {description.strip()}"
return f"{name}{sig}{description}"
def _ensure_example(
func: Callable, *args: Any, langtest_extra: _UTExtra, **kwargs: Any
) -> Tuple[_LangSmithTestSuite, uuid.UUID]:
client = langtest_extra["client"] or rt.get_cached_client()
output_keys = langtest_extra["output_keys"]
signature = inspect.signature(func)
inputs: dict = rh._get_inputs_safe(signature, *args, **kwargs)
outputs = {}
if output_keys:
for k in output_keys:
outputs[k] = inputs.pop(k, None)
test_suite = _LangSmithTestSuite.from_test(
client, func, langtest_extra.get("test_suite_name")
)
example_id, example_name = _get_id(func, inputs, test_suite.id)
example_id = langtest_extra["id"] or example_id
test_suite.sync_example(
example_id,
inputs,
outputs,
metadata={"signature": _get_test_repr(func, signature), "name": example_name},
)
return test_suite, example_id
def _run_test(
func: Callable, *test_args: Any, langtest_extra: _UTExtra, **test_kwargs: Any
) -> None:
test_suite, example_id = _ensure_example(
func, *test_args, **test_kwargs, langtest_extra=langtest_extra
)
run_id = uuid.uuid4()
def _test():
func_inputs = rh._get_inputs_safe(
inspect.signature(func), *test_args, **test_kwargs
)
with rh.trace(
name=getattr(func, "__name__", "Test"),
run_id=run_id,
reference_example_id=example_id,
inputs=func_inputs,
project_name=test_suite.name,
exceptions_to_handle=(SkipException,),
) as run_tree:
try:
result = func(*test_args, **test_kwargs)
run_tree.end(
outputs=(
result
if result is None or isinstance(result, dict)
else {"output": result}
)
)
except SkipException as e:
test_suite.submit_result(run_id, error=repr(e), skipped=True)
run_tree.end(
outputs={"skipped_reason": repr(e)},
)
raise e
except BaseException as e:
test_suite.submit_result(run_id, error=repr(e))
raise e
try:
test_suite.submit_result(run_id, error=None)
except BaseException as e:
logger.warning(f"Failed to create feedback for run_id {run_id}: {e}")
cache_path = (
Path(langtest_extra["cache"]) / f"{test_suite.id}.yaml"
if langtest_extra["cache"]
else None
)
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{
"experiment": test_suite.experiment.name,
"reference_example_id": str(example_id),
},
}
with rh.tracing_context(
**{**current_context, "metadata": metadata}
), ls_utils.with_optional_cache(
cache_path, ignore_hosts=[test_suite.client.api_url]
):
_test()
async def _arun_test(
func: Callable, *test_args: Any, langtest_extra: _UTExtra, **test_kwargs: Any
) -> None:
test_suite, example_id = _ensure_example(
func, *test_args, **test_kwargs, langtest_extra=langtest_extra
)
run_id = uuid.uuid4()
async def _test():
func_inputs = rh._get_inputs_safe(
inspect.signature(func), *test_args, **test_kwargs
)
with rh.trace(
name=getattr(func, "__name__", "Test"),
run_id=run_id,
reference_example_id=example_id,
inputs=func_inputs,
project_name=test_suite.name,
exceptions_to_handle=(SkipException,),
) as run_tree:
try:
result = await func(*test_args, **test_kwargs)
run_tree.end(
outputs=(
result
if result is None or isinstance(result, dict)
else {"output": result}
)
)
except SkipException as e:
test_suite.submit_result(run_id, error=repr(e), skipped=True)
run_tree.end(
outputs={"skipped_reason": repr(e)},
)
raise e
except BaseException as e:
test_suite.submit_result(run_id, error=repr(e))
raise e
try:
test_suite.submit_result(run_id, error=None)
except BaseException as e:
logger.warning(f"Failed to create feedback for run_id {run_id}: {e}")
cache_path = (
Path(langtest_extra["cache"]) / f"{test_suite.id}.yaml"
if langtest_extra["cache"]
else None
)
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{
"experiment": test_suite.experiment.name,
"reference_example_id": str(example_id),
},
}
with rh.tracing_context(
**{**current_context, "metadata": metadata}
), ls_utils.with_optional_cache(
cache_path, ignore_hosts=[test_suite.client.api_url]
):
await _test()
# For backwards compatibility
unit = test
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_multipart.py | from __future__ import annotations
from typing import Dict, Iterable, Tuple
MultipartPart = Tuple[str, Tuple[None, bytes, str, Dict[str, str]]]
class MultipartPartsAndContext:
parts: list[MultipartPart]
context: str
__slots__ = ("parts", "context")
def __init__(self, parts: list[MultipartPart], context: str) -> None:
self.parts = parts
self.context = context
def join_multipart_parts_and_context(
parts_and_contexts: Iterable[MultipartPartsAndContext],
) -> MultipartPartsAndContext:
acc_parts: list[MultipartPart] = []
acc_context: list[str] = []
for parts_and_context in parts_and_contexts:
acc_parts.extend(parts_and_context.parts)
acc_context.append(parts_and_context.context)
return MultipartPartsAndContext(acc_parts, "; ".join(acc_context))
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_aiter.py | """Adapted.
Original source:
https://github.com/maxfischer2781/asyncstdlib/blob/master/asyncstdlib/itertools.py
MIT License
"""
import asyncio
import contextvars
import functools
import inspect
from collections import deque
from typing import (
Any,
AsyncContextManager,
AsyncGenerator,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Deque,
Generic,
Iterable,
Iterator,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
overload,
)
T = TypeVar("T")
_no_default = object()
# https://github.com/python/cpython/blob/main/Lib/test/test_asyncgen.py#L54
# before 3.10, the builtin anext() was not available
def py_anext(
iterator: AsyncIterator[T], default: Union[T, Any] = _no_default
) -> Awaitable[Union[T, None, Any]]:
"""Pure-Python implementation of anext() for testing purposes.
Closely matches the builtin anext() C implementation.
Can be used to compare the built-in implementation of the inner
coroutines machinery to C-implementation of __anext__() and send()
or throw() on the returned generator.
"""
try:
__anext__ = cast(
Callable[[AsyncIterator[T]], Awaitable[T]], type(iterator).__anext__
)
except AttributeError:
raise TypeError(f"{iterator!r} is not an async iterator")
if default is _no_default:
return __anext__(iterator)
async def anext_impl() -> Union[T, Any]:
try:
# The C code is way more low-level than this, as it implements
# all methods of the iterator protocol. In this implementation
# we're relying on higher-level coroutine concepts, but that's
# exactly what we want -- crosstest pure-Python high-level
# implementation and low-level C anext() iterators.
return await __anext__(iterator)
except StopAsyncIteration:
return default
return anext_impl()
class NoLock:
"""Dummy lock that provides the proper interface but no protection."""
async def __aenter__(self) -> None:
pass
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
return False
async def tee_peer(
iterator: AsyncIterator[T],
# the buffer specific to this peer
buffer: Deque[T],
# the buffers of all peers, including our own
peers: List[Deque[T]],
lock: AsyncContextManager[Any],
) -> AsyncGenerator[T, None]:
"""Iterate over :py:func:`~.tee`."""
try:
while True:
if not buffer:
async with lock:
# Another peer produced an item while we were waiting for the lock.
# Proceed with the next loop iteration to yield the item.
if buffer:
continue
try:
item = await iterator.__anext__()
except StopAsyncIteration:
break
else:
# Append to all buffers, including our own. We'll fetch our
# item from the buffer again, instead of yielding it directly.
# This ensures the proper item ordering if any of our peers
# are fetching items concurrently. They may have buffered their
# item already.
for peer_buffer in peers:
peer_buffer.append(item)
yield buffer.popleft()
finally:
async with lock:
# this peer is done – remove its buffer
for idx, peer_buffer in enumerate(peers): # pragma: no branch
if peer_buffer is buffer:
peers.pop(idx)
break
# if we are the last peer, try and close the iterator
if not peers and hasattr(iterator, "aclose"):
await iterator.aclose()
class Tee(Generic[T]):
"""Create ``n`` separate asynchronous iterators over ``iterable``.
This splits a single ``iterable`` into multiple iterators, each providing
the same items in the same order.
All child iterators may advance separately but pare the same items
from ``iterable`` -- when the most advanced iterator retrieves an item,
it is buffered until the least advanced iterator has yielded it as well.
A ``tee`` works lazily and can handle an infinite ``iterable``, provided
that all iterators advance.
.. code-block:: python3
async def derivative(sensor_data):
previous, current = a.tee(sensor_data, n=2)
await a.anext(previous) # advance one iterator
return a.map(operator.sub, previous, current)
Unlike :py:func:`itertools.tee`, :py:func:`~.tee` returns a custom type instead
of a :py:class:`tuple`. Like a tuple, it can be indexed, iterated and unpacked
to get the child iterators. In addition, its :py:meth:`~.tee.aclose` method
immediately closes all children, and it can be used in an ``async with`` context
for the same effect.
If ``iterable`` is an iterator and read elsewhere, ``tee`` will *not*
provide these items. Also, ``tee`` must internally buffer each item until the
last iterator has yielded it; if the most and least advanced iterator differ
by most data, using a :py:class:`list` is more efficient (but not lazy).
If the underlying iterable is concurrency safe (``anext`` may be awaited
concurrently) the resulting iterators are concurrency safe as well. Otherwise,
the iterators are safe if there is only ever one single "most advanced" iterator.
To enforce sequential use of ``anext``, provide a ``lock``
- e.g. an :py:class:`asyncio.Lock` instance in an :py:mod:`asyncio` application -
and access is automatically synchronised.
"""
def __init__(
self,
iterable: AsyncIterator[T],
n: int = 2,
*,
lock: Optional[AsyncContextManager[Any]] = None,
):
self._iterator = iterable.__aiter__() # before 3.10 aiter() doesn't exist
self._buffers: List[Deque[T]] = [deque() for _ in range(n)]
self._children = tuple(
tee_peer(
iterator=self._iterator,
buffer=buffer,
peers=self._buffers,
lock=lock if lock is not None else NoLock(),
)
for buffer in self._buffers
)
def __len__(self) -> int:
return len(self._children)
@overload
def __getitem__(self, item: int) -> AsyncIterator[T]: ...
@overload
def __getitem__(self, item: slice) -> Tuple[AsyncIterator[T], ...]: ...
def __getitem__(
self, item: Union[int, slice]
) -> Union[AsyncIterator[T], Tuple[AsyncIterator[T], ...]]:
return self._children[item]
def __iter__(self) -> Iterator[AsyncIterator[T]]:
yield from self._children
async def __aenter__(self) -> "Tee[T]":
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
await self.aclose()
return False
async def aclose(self) -> None:
for child in self._children:
await child.aclose()
atee = Tee
async def async_zip(*async_iterables):
"""Async version of zip."""
# Before Python 3.10, aiter() was not available
iterators = [iterable.__aiter__() for iterable in async_iterables]
while True:
try:
items = await asyncio.gather(
*(py_anext(iterator) for iterator in iterators)
)
yield tuple(items)
except StopAsyncIteration:
break
def ensure_async_iterator(
iterable: Union[Iterable, AsyncIterable],
) -> AsyncIterator:
if hasattr(iterable, "__anext__"):
return cast(AsyncIterator, iterable)
elif hasattr(iterable, "__aiter__"):
return cast(AsyncIterator, iterable.__aiter__())
else:
class AsyncIteratorWrapper:
def __init__(self, iterable: Iterable):
self._iterator = iter(iterable)
async def __anext__(self):
try:
return next(self._iterator)
except StopIteration:
raise StopAsyncIteration
def __aiter__(self):
return self
return AsyncIteratorWrapper(iterable)
def aiter_with_concurrency(
n: Optional[int],
generator: AsyncIterator[Coroutine[None, None, T]],
*,
_eager_consumption_timeout: float = 0,
) -> AsyncGenerator[T, None]:
"""Process async generator with max parallelism.
Args:
n: The number of tasks to run concurrently.
generator: The async generator to process.
_eager_consumption_timeout: If set, check for completed tasks after
each iteration and yield their results. This can be used to
consume the generator eagerly while still respecting the concurrency
limit.
Yields:
The processed items yielded by the async generator.
"""
if n == 0:
async def consume():
async for item in generator:
yield await item
return consume()
semaphore = cast(
asyncio.Semaphore, asyncio.Semaphore(n) if n is not None else NoLock()
)
async def process_item(ix: int, item):
async with semaphore:
res = await item
return (ix, res)
async def process_generator():
tasks = {}
accepts_context = asyncio_accepts_context()
ix = 0
async for item in generator:
if accepts_context:
context = contextvars.copy_context()
task = asyncio.create_task(process_item(ix, item), context=context)
else:
task = asyncio.create_task(process_item(ix, item))
tasks[ix] = task
ix += 1
if _eager_consumption_timeout > 0:
try:
for _fut in asyncio.as_completed(
tasks.values(),
timeout=_eager_consumption_timeout,
):
task_idx, res = await _fut
yield res
del tasks[task_idx]
except asyncio.TimeoutError:
pass
if n is not None and len(tasks) >= n:
done, _ = await asyncio.wait(
tasks.values(), return_when=asyncio.FIRST_COMPLETED
)
for task in done:
task_idx, res = task.result()
yield res
del tasks[task_idx]
for task in asyncio.as_completed(tasks.values()):
_, res = await task
yield res
return process_generator()
def accepts_context(callable: Callable[..., Any]) -> bool:
"""Check if a callable accepts a context argument."""
try:
return inspect.signature(callable).parameters.get("context") is not None
except ValueError:
return False
# Ported from Python 3.9+ to support Python 3.8
async def aio_to_thread(
func, /, *args, __ctx: Optional[contextvars.Context] = None, **kwargs
):
"""Asynchronously run function *func* in a separate thread.
Any *args and **kwargs supplied for this function are directly passed
to *func*. Also, the current :class:`contextvars.Context` is propagated,
allowing context variables from the main thread to be accessed in the
separate thread.
Return a coroutine that can be awaited to get the eventual result of *func*.
"""
loop = asyncio.get_running_loop()
ctx = __ctx or contextvars.copy_context()
func_call = functools.partial(ctx.run, func, *args, **kwargs)
return await loop.run_in_executor(None, func_call)
@functools.lru_cache(maxsize=1)
def asyncio_accepts_context():
"""Check if the current asyncio event loop accepts a context argument."""
return accepts_context(asyncio.create_task)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_beta_decorator.py | import functools
import warnings
from typing import Callable
class LangSmithBetaWarning(UserWarning):
"""This is a warning specific to the LangSmithBeta module."""
@functools.lru_cache(maxsize=100)
def _warn_once(message: str) -> None:
warnings.warn(message, LangSmithBetaWarning, stacklevel=2)
def warn_beta(func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
_warn_once(f"Function {func.__name__} is in beta.")
return func(*args, **kwargs)
return wrapper
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_background_thread.py | from __future__ import annotations
import functools
import logging
import sys
import threading
import weakref
from queue import Empty, Queue
from typing import (
TYPE_CHECKING,
List,
Union,
cast,
)
from langsmith import schemas as ls_schemas
from langsmith._internal._constants import (
_AUTO_SCALE_DOWN_NEMPTY_TRIGGER,
_AUTO_SCALE_UP_NTHREADS_LIMIT,
_AUTO_SCALE_UP_QSIZE_TRIGGER,
)
from langsmith._internal._operations import (
SerializedFeedbackOperation,
SerializedRunOperation,
combine_serialized_queue_operations,
)
if TYPE_CHECKING:
from langsmith.client import Client
logger = logging.getLogger("langsmith.client")
@functools.total_ordering
class TracingQueueItem:
"""An item in the tracing queue.
Attributes:
priority (str): The priority of the item.
action (str): The action associated with the item.
item (Any): The item itself.
"""
priority: str
item: Union[SerializedRunOperation, SerializedFeedbackOperation]
__slots__ = ("priority", "item")
def __init__(
self,
priority: str,
item: Union[SerializedRunOperation, SerializedFeedbackOperation],
) -> None:
self.priority = priority
self.item = item
def __lt__(self, other: TracingQueueItem) -> bool:
return (self.priority, self.item.__class__) < (
other.priority,
other.item.__class__,
)
def __eq__(self, other: object) -> bool:
return isinstance(other, TracingQueueItem) and (
self.priority,
self.item.__class__,
) == (other.priority, other.item.__class__)
def _tracing_thread_drain_queue(
tracing_queue: Queue, limit: int = 100, block: bool = True
) -> List[TracingQueueItem]:
next_batch: List[TracingQueueItem] = []
try:
# wait 250ms for the first item, then
# - drain the queue with a 50ms block timeout
# - stop draining if we hit the limit
# shorter drain timeout is used instead of non-blocking calls to
# avoid creating too many small batches
if item := tracing_queue.get(block=block, timeout=0.25):
next_batch.append(item)
while item := tracing_queue.get(block=block, timeout=0.05):
next_batch.append(item)
if limit and len(next_batch) >= limit:
break
except Empty:
pass
return next_batch
def _tracing_thread_handle_batch(
client: Client,
tracing_queue: Queue,
batch: List[TracingQueueItem],
use_multipart: bool,
) -> None:
try:
ops = combine_serialized_queue_operations([item.item for item in batch])
if use_multipart:
client._multipart_ingest_ops(ops)
else:
if any(isinstance(op, SerializedFeedbackOperation) for op in ops):
logger.warn(
"Feedback operations are not supported in non-multipart mode"
)
ops = [
op for op in ops if not isinstance(op, SerializedFeedbackOperation)
]
client._batch_ingest_run_ops(cast(List[SerializedRunOperation], ops))
except Exception:
logger.error("Error in tracing queue", exc_info=True)
# exceptions are logged elsewhere, but we need to make sure the
# background thread continues to run
pass
finally:
for _ in batch:
tracing_queue.task_done()
def _ensure_ingest_config(
info: ls_schemas.LangSmithInfo,
) -> ls_schemas.BatchIngestConfig:
default_config = ls_schemas.BatchIngestConfig(
use_multipart_endpoint=False,
size_limit_bytes=None, # Note this field is not used here
size_limit=100,
scale_up_nthreads_limit=_AUTO_SCALE_UP_NTHREADS_LIMIT,
scale_up_qsize_trigger=_AUTO_SCALE_UP_QSIZE_TRIGGER,
scale_down_nempty_trigger=_AUTO_SCALE_DOWN_NEMPTY_TRIGGER,
)
if not info:
return default_config
try:
if not info.batch_ingest_config:
return default_config
return info.batch_ingest_config
except BaseException:
return default_config
def tracing_control_thread_func(client_ref: weakref.ref[Client]) -> None:
client = client_ref()
if client is None:
return
tracing_queue = client.tracing_queue
assert tracing_queue is not None
batch_ingest_config = _ensure_ingest_config(client.info)
size_limit: int = batch_ingest_config["size_limit"]
scale_up_nthreads_limit: int = batch_ingest_config["scale_up_nthreads_limit"]
scale_up_qsize_trigger: int = batch_ingest_config["scale_up_qsize_trigger"]
use_multipart = batch_ingest_config.get("use_multipart_endpoint", False)
sub_threads: List[threading.Thread] = []
# 1 for this func, 1 for getrefcount, 1 for _get_data_type_cached
num_known_refs = 3
def keep_thread_active() -> bool:
# if `client.cleanup()` was called, stop thread
if not client or (
hasattr(client, "_manual_cleanup") and client._manual_cleanup
):
return False
if not threading.main_thread().is_alive():
# main thread is dead. should not be active
return False
if hasattr(sys, "getrefcount"):
# check if client refs count indicates we're the only remaining
# reference to the client
return sys.getrefcount(client) > num_known_refs + len(sub_threads)
else:
# in PyPy, there is no sys.getrefcount attribute
# for now, keep thread alive
return True
# loop until
while keep_thread_active():
for thread in sub_threads:
if not thread.is_alive():
sub_threads.remove(thread)
if (
len(sub_threads) < scale_up_nthreads_limit
and tracing_queue.qsize() > scale_up_qsize_trigger
):
new_thread = threading.Thread(
target=_tracing_sub_thread_func,
args=(weakref.ref(client), use_multipart),
)
sub_threads.append(new_thread)
new_thread.start()
if next_batch := _tracing_thread_drain_queue(tracing_queue, limit=size_limit):
_tracing_thread_handle_batch(
client, tracing_queue, next_batch, use_multipart
)
# drain the queue on exit
while next_batch := _tracing_thread_drain_queue(
tracing_queue, limit=size_limit, block=False
):
_tracing_thread_handle_batch(client, tracing_queue, next_batch, use_multipart)
def _tracing_sub_thread_func(
client_ref: weakref.ref[Client],
use_multipart: bool,
) -> None:
client = client_ref()
if client is None:
return
try:
if not client.info:
return
except BaseException as e:
logger.debug("Error in tracing control thread: %s", e)
return
tracing_queue = client.tracing_queue
assert tracing_queue is not None
batch_ingest_config = _ensure_ingest_config(client.info)
size_limit = batch_ingest_config.get("size_limit", 100)
seen_successive_empty_queues = 0
# loop until
while (
# the main thread dies
threading.main_thread().is_alive()
# or we've seen the queue empty 4 times in a row
and seen_successive_empty_queues
<= batch_ingest_config["scale_down_nempty_trigger"]
):
if next_batch := _tracing_thread_drain_queue(tracing_queue, limit=size_limit):
seen_successive_empty_queues = 0
_tracing_thread_handle_batch(
client, tracing_queue, next_batch, use_multipart
)
else:
seen_successive_empty_queues += 1
# drain the queue on exit
while next_batch := _tracing_thread_drain_queue(
tracing_queue, limit=size_limit, block=False
):
_tracing_thread_handle_batch(client, tracing_queue, next_batch, use_multipart)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_serde.py | from __future__ import annotations
import base64
import collections
import datetime
import decimal
import ipaddress
import json
import logging
import pathlib
import re
import uuid
from typing import Any
from langsmith._internal import _orjson
try:
from zoneinfo import ZoneInfo # type: ignore[import-not-found]
except ImportError:
class ZoneInfo: # type: ignore[no-redef]
"""Introduced in python 3.9."""
logger = logging.getLogger(__name__)
def _simple_default(obj):
try:
# Only need to handle types that orjson doesn't serialize by default
# https://github.com/ijl/orjson#serialize
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, BaseException):
return {"error": type(obj).__name__, "message": str(obj)}
elif isinstance(obj, (set, frozenset, collections.deque)):
return list(obj)
elif isinstance(obj, (datetime.timezone, ZoneInfo)):
return obj.tzname(None)
elif isinstance(obj, datetime.timedelta):
return obj.total_seconds()
elif isinstance(obj, decimal.Decimal):
if obj.as_tuple().exponent >= 0:
return int(obj)
else:
return float(obj)
elif isinstance(
obj,
(
ipaddress.IPv4Address,
ipaddress.IPv4Interface,
ipaddress.IPv4Network,
ipaddress.IPv6Address,
ipaddress.IPv6Interface,
ipaddress.IPv6Network,
pathlib.Path,
),
):
return str(obj)
elif isinstance(obj, re.Pattern):
return obj.pattern
elif isinstance(obj, (bytes, bytearray)):
return base64.b64encode(obj).decode()
return str(obj)
except BaseException as e:
logger.debug(f"Failed to serialize {type(obj)} to JSON: {e}")
return str(obj)
_serialization_methods = [
(
"model_dump",
{"exclude_none": True, "mode": "json"},
), # Pydantic V2 with non-serializable fields
("dict", {}), # Pydantic V1 with non-serializable field
("to_dict", {}), # dataclasses-json
]
def _serialize_json(obj: Any) -> Any:
try:
if isinstance(obj, (set, tuple)):
if hasattr(obj, "_asdict") and callable(obj._asdict):
# NamedTuple
return obj._asdict()
return list(obj)
for attr, kwargs in _serialization_methods:
if (
hasattr(obj, attr)
and callable(getattr(obj, attr))
and not isinstance(obj, type)
):
try:
method = getattr(obj, attr)
response = method(**kwargs)
if not isinstance(response, dict):
return str(response)
return response
except Exception as e:
logger.error(
f"Failed to use {attr} to serialize {type(obj)} to"
f" JSON: {repr(e)}"
)
pass
return _simple_default(obj)
except BaseException as e:
logger.debug(f"Failed to serialize {type(obj)} to JSON: {e}")
return str(obj)
def _elide_surrogates(s: bytes) -> bytes:
pattern = re.compile(rb"\\ud[89a-f][0-9a-f]{2}", re.IGNORECASE)
result = pattern.sub(b"", s)
return result
def dumps_json(obj: Any) -> bytes:
"""Serialize an object to a JSON formatted string.
Parameters
----------
obj : Any
The object to serialize.
default : Callable[[Any], Any] or None, default=None
The default function to use for serialization.
Returns:
-------
str
The JSON formatted string.
"""
try:
return _orjson.dumps(
obj,
default=_serialize_json,
option=_orjson.OPT_SERIALIZE_NUMPY
| _orjson.OPT_SERIALIZE_DATACLASS
| _orjson.OPT_SERIALIZE_UUID
| _orjson.OPT_NON_STR_KEYS,
)
except TypeError as e:
# Usually caused by UTF surrogate characters
logger.debug(f"Orjson serialization failed: {repr(e)}. Falling back to json.")
result = json.dumps(
obj,
default=_simple_default,
ensure_ascii=True,
).encode("utf-8")
try:
result = _orjson.dumps(
_orjson.loads(result.decode("utf-8", errors="surrogateescape"))
)
except _orjson.JSONDecodeError:
result = _elide_surrogates(result)
return result
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_orjson.py | """Stubs for orjson operations, compatible with PyPy via a json fallback."""
try:
from orjson import (
OPT_NON_STR_KEYS,
OPT_SERIALIZE_DATACLASS,
OPT_SERIALIZE_NUMPY,
OPT_SERIALIZE_UUID,
Fragment,
JSONDecodeError,
dumps,
loads,
)
except ImportError:
import dataclasses
import json
import uuid
from typing import Any, Callable, Optional
OPT_NON_STR_KEYS = 1
OPT_SERIALIZE_DATACLASS = 2
OPT_SERIALIZE_NUMPY = 4
OPT_SERIALIZE_UUID = 8
class Fragment: # type: ignore
def __init__(self, payloadb: bytes):
self.payloadb = payloadb
from json import JSONDecodeError # type: ignore
def dumps( # type: ignore
obj: Any,
/,
default: Optional[Callable[[Any], Any]] = None,
option: int = 0,
) -> bytes: # type: ignore
# for now, don't do anything for this case because `json.dumps`
# automatically encodes non-str keys as str by default, unlike orjson
# enable_non_str_keys = bool(option & OPT_NON_STR_KEYS)
enable_serialize_numpy = bool(option & OPT_SERIALIZE_NUMPY)
enable_serialize_dataclass = bool(option & OPT_SERIALIZE_DATACLASS)
enable_serialize_uuid = bool(option & OPT_SERIALIZE_UUID)
class CustomEncoder(json.JSONEncoder): # type: ignore
def encode(self, o: Any) -> str:
if isinstance(o, Fragment):
return o.payloadb.decode("utf-8") # type: ignore
return super().encode(o)
def default(self, o: Any) -> Any:
if enable_serialize_uuid and isinstance(o, uuid.UUID):
return str(o)
if enable_serialize_numpy and hasattr(o, "tolist"):
# even objects like np.uint16(15) have a .tolist() function
return o.tolist()
if (
enable_serialize_dataclass
and dataclasses.is_dataclass(o)
and not isinstance(o, type)
):
return dataclasses.asdict(o)
if default is not None:
return default(o)
return super().default(o)
return json.dumps(obj, cls=CustomEncoder).encode("utf-8")
def loads(payload: bytes, /) -> Any: # type: ignore
return json.loads(payload)
__all__ = [
"loads",
"dumps",
"Fragment",
"JSONDecodeError",
"OPT_SERIALIZE_NUMPY",
"OPT_SERIALIZE_DATACLASS",
"OPT_SERIALIZE_UUID",
"OPT_NON_STR_KEYS",
]
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_patch.py | import functools
from urllib3 import __version__ as urllib3version # type: ignore[import-untyped]
from urllib3 import connection # type: ignore[import-untyped]
def _ensure_str(s, encoding="utf-8", errors="strict") -> str:
if isinstance(s, str):
return s
if isinstance(s, bytes):
return s.decode(encoding, errors)
return str(s)
# Copied from https://github.com/urllib3/urllib3/blob/1c994dfc8c5d5ecaee8ed3eb585d4785f5febf6e/src/urllib3/connection.py#L231
def request(self, method, url, body=None, headers=None):
"""Make the request.
This function is based on the urllib3 request method, with modifications
to handle potential issues when using vcrpy in concurrent workloads.
Args:
self: The HTTPConnection instance.
method (str): The HTTP method (e.g., 'GET', 'POST').
url (str): The URL for the request.
body (Optional[Any]): The body of the request.
headers (Optional[dict]): Headers to send with the request.
Returns:
The result of calling the parent request method.
"""
# Update the inner socket's timeout value to send the request.
# This only triggers if the connection is re-used.
if getattr(self, "sock", None) is not None:
self.sock.settimeout(self.timeout)
if headers is None:
headers = {}
else:
# Avoid modifying the headers passed into .request()
headers = headers.copy()
if "user-agent" not in (_ensure_str(k.lower()) for k in headers):
headers["User-Agent"] = connection._get_default_user_agent()
# The above is all the same ^^^
# The following is different:
return self._parent_request(method, url, body=body, headers=headers)
_PATCHED = False
def patch_urllib3():
"""Patch the request method of urllib3 to avoid type errors when using vcrpy.
In concurrent workloads (such as the tracing background queue), the
connection pool can get in a state where an HTTPConnection is created
before vcrpy patches the HTTPConnection class. In urllib3 >= 2.0 this isn't
a problem since they use the proper super().request(...) syntax, but in older
versions, super(HTTPConnection, self).request is used, resulting in a TypeError
since self is no longer a subclass of "HTTPConnection" (which at this point
is vcr.stubs.VCRConnection).
This method patches the class to fix the super() syntax to avoid mixed inheritance.
In the case of the LangSmith tracing logic, it doesn't really matter since we always
exclude cache checks for calls to LangSmith.
The patch is only applied for urllib3 versions older than 2.0.
"""
global _PATCHED
if _PATCHED:
return
from packaging import version
if version.parse(urllib3version) >= version.parse("2.0"):
_PATCHED = True
return
# Lookup the parent class and its request method
parent_class = connection.HTTPConnection.__bases__[0]
parent_request = parent_class.request
def new_request(self, *args, **kwargs):
"""Handle parent request.
This method binds the parent's request method to self and then
calls our modified request function.
"""
self._parent_request = functools.partial(parent_request, self)
return request(self, *args, **kwargs)
connection.HTTPConnection.request = new_request
_PATCHED = True
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_edit_distance.py | from typing import Any, Callable, Dict, Literal, Optional
from typing_extensions import TypedDict
METRICS = Literal[
"damerau_levenshtein",
"levenshtein",
"jaro",
"jaro_winkler",
"hamming",
"indel",
]
class EditDistanceConfig(TypedDict, total=False):
metric: METRICS
normalize_score: bool
class EditDistance:
def __init__(
self,
config: Optional[EditDistanceConfig] = None,
):
config = config or {}
metric = config.get("metric") or "damerau_levenshtein"
self.metric = self._get_metric(
metric, normalize_score=config.get("normalize_score", True)
)
def evaluate(
self,
prediction: str,
reference: Optional[str] = None,
) -> float:
return self.metric(prediction, reference)
@staticmethod
def _get_metric(distance: str, normalize_score: bool = True) -> Callable:
try:
from rapidfuzz import ( # type: ignore[import-not-found]
distance as rf_distance,
)
except ImportError:
raise ImportError(
"This operation requires the rapidfuzz library to use."
"Please install it with `pip install -U rapidfuzz`."
)
module_map: Dict[str, Any] = {
"damerau_levenshtein": rf_distance.DamerauLevenshtein,
"levenshtein": rf_distance.Levenshtein,
"jaro": rf_distance.Jaro,
"jaro_winkler": rf_distance.JaroWinkler,
"hamming": rf_distance.Hamming,
"indel": rf_distance.Indel,
}
if distance not in module_map:
raise ValueError(
f"Invalid distance metric: {distance}"
f"\nMust be one of: {list(module_map)}"
)
module = module_map[distance]
if normalize_score:
return module.normalized_distance
else:
return module.distance
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_embedding_distance.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
Callable,
List,
Literal,
Optional,
Sequence,
Union,
)
from typing_extensions import TypedDict
if TYPE_CHECKING:
import numpy as np # type: ignore
logger = logging.getLogger(__name__)
Matrix = Union[List[List[float]], List[Any], Any]
def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
"""Row-wise cosine similarity between two equal-width matrices."""
import numpy as np
if len(X) == 0 or len(Y) == 0:
return np.array([])
X = np.array(X)
Y = np.array(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} "
f"and Y has shape {Y.shape}."
)
try:
import simsimd as simd # type: ignore
X = np.array(X, dtype=np.float32)
Y = np.array(Y, dtype=np.float32)
Z = 1 - simd.cdist(X, Y, metric="cosine")
if isinstance(Z, float):
return np.array([Z])
return np.array(Z)
except ImportError:
logger.debug(
"Unable to import simsimd, defaulting to NumPy implementation. If you want "
"to use simsimd please install with `pip install simsimd`."
)
X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1)
# Ignore divide by zero errors run time warnings as those are handled below.
with np.errstate(divide="ignore", invalid="ignore"):
similarity = np.dot(X, Y.T) / np.outer(X_norm, Y_norm)
similarity[np.isnan(similarity) | np.isinf(similarity)] = 0.0
return similarity
def _get_openai_encoder() -> Callable[[Sequence[str]], Sequence[Sequence[float]]]:
"""Get the OpenAI GPT-3 encoder."""
try:
from openai import Client as OpenAIClient
except ImportError:
raise ImportError(
"THe default encoder for the EmbeddingDistance class uses the OpenAI API. "
"Please either install the openai library with `pip install openai` or "
"provide a custom encoder function (Callable[[str], Sequence[float]])."
)
def encode_text(texts: Sequence[str]) -> Sequence[Sequence[float]]:
client = OpenAIClient()
response = client.embeddings.create(
input=list(texts), model="text-embedding-3-small"
)
return [d.embedding for d in response.data]
return encode_text
class EmbeddingConfig(TypedDict, total=False):
encoder: Callable[[List[str]], Sequence[Sequence[float]]]
metric: Literal["cosine", "euclidean", "manhattan", "chebyshev", "hamming"]
class EmbeddingDistance:
def __init__(
self,
config: Optional[EmbeddingConfig] = None,
):
config = config or {}
self.distance = config.get("metric") or "cosine"
self.encoder = config.get("encoder") or _get_openai_encoder()
def evaluate(
self,
prediction: str,
reference: str,
) -> float:
try:
import numpy as np
except ImportError:
raise ImportError(
"The EmbeddingDistance class requires NumPy. Please install it with "
"`pip install numpy`."
)
embeddings = self.encoder([prediction, reference])
vector = np.array(embeddings)
return self._compute_distance(vector[0], vector[1]).item()
def _compute_distance(self, a: np.ndarray, b: np.ndarray) -> np.floating:
if self.distance == "cosine":
return self._cosine_distance(a, b) # type: ignore
elif self.distance == "euclidean":
return self._euclidean_distance(a, b)
elif self.distance == "manhattan":
return self._manhattan_distance(a, b)
elif self.distance == "chebyshev":
return self._chebyshev_distance(a, b)
elif self.distance == "hamming":
return self._hamming_distance(a, b)
else:
raise ValueError(f"Invalid distance metric: {self.distance}")
@staticmethod
def _cosine_distance(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Compute the cosine distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.ndarray: The cosine distance.
"""
return 1.0 - cosine_similarity([a], [b])
@staticmethod
def _euclidean_distance(a: np.ndarray, b: np.ndarray) -> np.floating:
"""Compute the Euclidean distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Euclidean distance.
"""
return np.linalg.norm(a - b)
@staticmethod
def _manhattan_distance(a: np.ndarray, b: np.ndarray) -> np.floating:
"""Compute the Manhattan distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Manhattan distance.
"""
return np.sum(np.abs(a - b))
@staticmethod
def _chebyshev_distance(a: np.ndarray, b: np.ndarray) -> np.floating:
"""Compute the Chebyshev distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Chebyshev distance.
"""
return np.max(np.abs(a - b))
@staticmethod
def _hamming_distance(a: np.ndarray, b: np.ndarray) -> np.floating:
"""Compute the Hamming distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Hamming distance.
"""
return np.mean(a != b)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_constants.py | _SIZE_LIMIT_BYTES = 20_971_520 # 20MB by default
_AUTO_SCALE_UP_QSIZE_TRIGGER = 200
_AUTO_SCALE_UP_NTHREADS_LIMIT = 32
_AUTO_SCALE_DOWN_NEMPTY_TRIGGER = 4
_BLOCKSIZE_BYTES = 1024 * 1024 # 1MB
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/_internal/_operations.py | from __future__ import annotations
import itertools
import logging
import uuid
from typing import Literal, Optional, Union, cast
from langsmith import schemas as ls_schemas
from langsmith._internal import _orjson
from langsmith._internal._multipart import MultipartPart, MultipartPartsAndContext
from langsmith._internal._serde import dumps_json as _dumps_json
logger = logging.getLogger(__name__)
class SerializedRunOperation:
operation: Literal["post", "patch"]
id: uuid.UUID
trace_id: uuid.UUID
# this is the whole object, minus the other fields which
# are popped (inputs/outputs/events/attachments)
_none: bytes
inputs: Optional[bytes]
outputs: Optional[bytes]
events: Optional[bytes]
attachments: Optional[ls_schemas.Attachments]
__slots__ = (
"operation",
"id",
"trace_id",
"_none",
"inputs",
"outputs",
"events",
"attachments",
)
def __init__(
self,
operation: Literal["post", "patch"],
id: uuid.UUID,
trace_id: uuid.UUID,
_none: bytes,
inputs: Optional[bytes] = None,
outputs: Optional[bytes] = None,
events: Optional[bytes] = None,
attachments: Optional[ls_schemas.Attachments] = None,
) -> None:
self.operation = operation
self.id = id
self.trace_id = trace_id
self._none = _none
self.inputs = inputs
self.outputs = outputs
self.events = events
self.attachments = attachments
def __eq__(self, other: object) -> bool:
return isinstance(other, SerializedRunOperation) and (
self.operation,
self.id,
self.trace_id,
self._none,
self.inputs,
self.outputs,
self.events,
self.attachments,
) == (
other.operation,
other.id,
other.trace_id,
other._none,
other.inputs,
other.outputs,
other.events,
other.attachments,
)
class SerializedFeedbackOperation:
id: uuid.UUID
trace_id: uuid.UUID
feedback: bytes
__slots__ = ("id", "trace_id", "feedback")
def __init__(self, id: uuid.UUID, trace_id: uuid.UUID, feedback: bytes) -> None:
self.id = id
self.trace_id = trace_id
self.feedback = feedback
def __eq__(self, other: object) -> bool:
return isinstance(other, SerializedFeedbackOperation) and (
self.id,
self.trace_id,
self.feedback,
) == (other.id, other.trace_id, other.feedback)
def serialize_feedback_dict(
feedback: Union[ls_schemas.FeedbackCreate, dict],
) -> SerializedFeedbackOperation:
if hasattr(feedback, "dict") and callable(getattr(feedback, "dict")):
feedback_create: dict = feedback.dict() # type: ignore
else:
feedback_create = cast(dict, feedback)
if "id" not in feedback_create:
feedback_create["id"] = uuid.uuid4()
elif isinstance(feedback_create["id"], str):
feedback_create["id"] = uuid.UUID(feedback_create["id"])
if "trace_id" not in feedback_create:
feedback_create["trace_id"] = uuid.uuid4()
elif isinstance(feedback_create["trace_id"], str):
feedback_create["trace_id"] = uuid.UUID(feedback_create["trace_id"])
return SerializedFeedbackOperation(
id=feedback_create["id"],
trace_id=feedback_create["trace_id"],
feedback=_dumps_json(feedback_create),
)
def serialize_run_dict(
operation: Literal["post", "patch"], payload: dict
) -> SerializedRunOperation:
inputs = payload.pop("inputs", None)
outputs = payload.pop("outputs", None)
events = payload.pop("events", None)
attachments = payload.pop("attachments", None)
return SerializedRunOperation(
operation=operation,
id=payload["id"],
trace_id=payload["trace_id"],
_none=_dumps_json(payload),
inputs=_dumps_json(inputs) if inputs is not None else None,
outputs=_dumps_json(outputs) if outputs is not None else None,
events=_dumps_json(events) if events is not None else None,
attachments=attachments if attachments is not None else None,
)
def combine_serialized_queue_operations(
ops: list[Union[SerializedRunOperation, SerializedFeedbackOperation]],
) -> list[Union[SerializedRunOperation, SerializedFeedbackOperation]]:
create_ops_by_id = {
op.id: op
for op in ops
if isinstance(op, SerializedRunOperation) and op.operation == "post"
}
passthrough_ops: list[
Union[SerializedRunOperation, SerializedFeedbackOperation]
] = []
for op in ops:
if isinstance(op, SerializedRunOperation):
if op.operation == "post":
continue
# must be patch
create_op = create_ops_by_id.get(op.id)
if create_op is None:
passthrough_ops.append(op)
continue
if op._none is not None and op._none != create_op._none:
# TODO optimize this more - this would currently be slowest
# for large payloads
create_op_dict = _orjson.loads(create_op._none)
op_dict = {
k: v for k, v in _orjson.loads(op._none).items() if v is not None
}
create_op_dict.update(op_dict)
create_op._none = _orjson.dumps(create_op_dict)
if op.inputs is not None:
create_op.inputs = op.inputs
if op.outputs is not None:
create_op.outputs = op.outputs
if op.events is not None:
create_op.events = op.events
if op.attachments is not None:
if create_op.attachments is None:
create_op.attachments = {}
create_op.attachments.update(op.attachments)
else:
passthrough_ops.append(op)
return list(itertools.chain(create_ops_by_id.values(), passthrough_ops))
def serialized_feedback_operation_to_multipart_parts_and_context(
op: SerializedFeedbackOperation,
) -> MultipartPartsAndContext:
return MultipartPartsAndContext(
[
(
f"feedback.{op.id}",
(
None,
op.feedback,
"application/json",
{"Content-Length": str(len(op.feedback))},
),
)
],
f"trace={op.trace_id},id={op.id}",
)
def serialized_run_operation_to_multipart_parts_and_context(
op: SerializedRunOperation,
) -> MultipartPartsAndContext:
acc_parts: list[MultipartPart] = []
# this is main object, minus inputs/outputs/events/attachments
acc_parts.append(
(
f"{op.operation}.{op.id}",
(
None,
op._none,
"application/json",
{"Content-Length": str(len(op._none))},
),
)
)
for key, value in (
("inputs", op.inputs),
("outputs", op.outputs),
("events", op.events),
):
if value is None:
continue
valb = value
acc_parts.append(
(
f"{op.operation}.{op.id}.{key}",
(
None,
valb,
"application/json",
{"Content-Length": str(len(valb))},
),
),
)
if op.attachments:
for n, (content_type, valb) in op.attachments.items():
if "." in n:
logger.warning(
f"Skipping logging of attachment '{n}' "
f"for run {op.id}:"
" Invalid attachment name. Attachment names must not contain"
" periods ('.'). Please rename the attachment and try again."
)
continue
acc_parts.append(
(
f"attachment.{op.id}.{n}",
(
None,
valb,
content_type,
{"Content-Length": str(len(valb))},
),
)
)
return MultipartPartsAndContext(
acc_parts,
f"trace={op.trace_id},id={op.id}",
)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/beta/_evals.py | """Beta utility functions to assist in common eval workflows.
These functions may change in the future.
"""
import collections
import datetime
import itertools
import uuid
from typing import DefaultDict, List, Optional, Sequence, Tuple, TypeVar
import langsmith.run_trees as rt
import langsmith.schemas as ls_schemas
from langsmith import evaluation as ls_eval
from langsmith._internal._beta_decorator import warn_beta
from langsmith.client import Client
def _convert_ids(run_dict: dict, id_map: dict):
"""Convert the IDs in the run dictionary using the provided ID map.
Parameters:
- run_dict (dict): The dictionary representing a run.
- id_map (dict): The dictionary mapping old IDs to new IDs.
Returns:
- dict: The updated run dictionary.
"""
do = run_dict["dotted_order"]
for k, v in id_map.items():
do = do.replace(str(k), str(v))
run_dict["dotted_order"] = do
if run_dict.get("parent_run_id"):
run_dict["parent_run_id"] = id_map[run_dict["parent_run_id"]]
if not run_dict.get("extra"):
run_dict["extra"] = {}
return run_dict
def _convert_root_run(root: ls_schemas.Run, run_to_example_map: dict) -> List[dict]:
"""Convert the root run and its child runs to a list of dictionaries.
Parameters:
- root (ls_schemas.Run): The root run to convert.
- run_to_example_map (dict): The dictionary mapping run IDs to example IDs.
Returns:
- List[dict]: The list of converted run dictionaries.
"""
runs_ = [root]
trace_id = uuid.uuid4()
id_map = {root.trace_id: trace_id}
results = []
while runs_:
src = runs_.pop()
src_dict = src.dict(exclude={"parent_run_ids", "child_run_ids", "session_id"})
id_map[src_dict["id"]] = id_map.get(src_dict["id"], uuid.uuid4())
src_dict["id"] = id_map[src_dict["id"]]
src_dict["trace_id"] = id_map[src_dict["trace_id"]]
if src.child_runs:
runs_.extend(src.child_runs)
results.append(src_dict)
result = [_convert_ids(r, id_map) for r in results]
result[0]["reference_example_id"] = run_to_example_map[root.id]
return result
@warn_beta
def convert_runs_to_test(
runs: Sequence[ls_schemas.Run],
*,
dataset_name: str,
test_project_name: Optional[str] = None,
client: Optional[Client] = None,
load_child_runs: bool = False,
include_outputs: bool = False,
) -> ls_schemas.TracerSession:
"""Convert the following runs to a dataset + test.
This makes it easy to sample prod runs into a new regression testing
workflow and compare against a candidate system.
Internally, this function does the following:
1. Create a dataset from the provided production run inputs.
2. Create a new test project.
3. Clone the production runs and re-upload against the dataset.
Parameters:
- runs (Sequence[ls_schemas.Run]): A sequence of runs to be executed as a test.
- dataset_name (str): The name of the dataset to associate with the test runs.
- client (Optional[Client]): An optional LangSmith client instance. If not provided,
a new client will be created.
- load_child_runs (bool): Whether to load child runs when copying runs.
Defaults to False.
Returns:
- ls_schemas.TracerSession: The project containing the cloned runs.
Examples:
--------
.. code-block:: python
import langsmith
import random
client = langsmith.Client()
# Randomly sample 100 runs from a prod project
runs = list(client.list_runs(project_name="My Project", execution_order=1))
sampled_runs = random.sample(runs, min(len(runs), 100))
runs_as_test(runs, dataset_name="Random Runs")
# Select runs named "extractor" whose root traces received good feedback
runs = client.list_runs(
project_name="<your_project>",
filter='eq(name, "extractor")',
trace_filter='and(eq(feedback_key, "user_score"), eq(feedback_score, 1))',
)
runs_as_test(runs, dataset_name="Extraction Good")
"""
if not runs:
raise ValueError(f"""Expected a non-empty sequence of runs. Received: {runs}""")
client = client or rt.get_cached_client()
ds = client.create_dataset(dataset_name=dataset_name)
outputs = [r.outputs for r in runs] if include_outputs else None
client.create_examples(
inputs=[r.inputs for r in runs],
outputs=outputs,
source_run_ids=[r.id for r in runs],
dataset_id=ds.id,
)
if not load_child_runs:
runs_to_copy = runs
else:
runs_to_copy = [
client.read_run(r.id, load_child_runs=load_child_runs) for r in runs
]
test_project_name = test_project_name or f"prod-baseline-{uuid.uuid4().hex[:6]}"
examples = list(client.list_examples(dataset_name=dataset_name))
run_to_example_map = {e.source_run_id: e.id for e in examples}
dataset_version = (
examples[0].modified_at if examples[0].modified_at else examples[0].created_at
)
to_create = [
run_dict
for root_run in runs_to_copy
for run_dict in _convert_root_run(root_run, run_to_example_map)
]
project = client.create_project(
project_name=test_project_name,
reference_dataset_id=ds.id,
metadata={
"which": "prod-baseline",
"dataset_version": dataset_version.isoformat(),
},
)
for new_run in to_create:
client.create_run(**new_run, project_name=test_project_name)
_ = client.update_project(
project.id, end_time=datetime.datetime.now(tz=datetime.timezone.utc)
)
return project
def _load_nested_traces(project_name: str, client: Client) -> List[ls_schemas.Run]:
runs = client.list_runs(project_name=project_name)
treemap: DefaultDict[uuid.UUID, List[ls_schemas.Run]] = collections.defaultdict(
list
)
results = []
all_runs = {}
for run in runs:
if run.parent_run_id is not None:
treemap[run.parent_run_id].append(run)
else:
results.append(run)
all_runs[run.id] = run
for run_id, child_runs in treemap.items():
all_runs[run_id].child_runs = sorted(child_runs, key=lambda r: r.dotted_order)
return results
T = TypeVar("T")
U = TypeVar("U")
def _outer_product(list1: List[T], list2: List[U]) -> List[Tuple[T, U]]:
return list(itertools.product(list1, list2))
@warn_beta
def compute_test_metrics(
project_name: str,
*,
evaluators: list,
max_concurrency: Optional[int] = 10,
client: Optional[Client] = None,
) -> None:
"""Compute test metrics for a given test name using a list of evaluators.
Args:
project_name (str): The name of the test project to evaluate.
evaluators (list): A list of evaluators to compute metrics with.
max_concurrency (Optional[int], optional): The maximum number of concurrent
evaluations. Defaults to 10.
client (Optional[Client], optional): The client to use for evaluations.
Defaults to None.
Returns:
None: This function does not return any value.
"""
from langsmith import ContextThreadPoolExecutor
evaluators_: List[ls_eval.RunEvaluator] = []
for func in evaluators:
if isinstance(func, ls_eval.RunEvaluator):
evaluators_.append(func)
elif callable(func):
evaluators_.append(ls_eval.run_evaluator(func))
else:
raise NotImplementedError(
f"Evaluation not yet implemented for evaluator of type {type(func)}"
)
client = client or rt.get_cached_client()
traces = _load_nested_traces(project_name, client)
with ContextThreadPoolExecutor(max_workers=max_concurrency) as executor:
results = executor.map(
client.evaluate_run, *zip(*_outer_product(traces, evaluators_))
)
for _ in results:
pass
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/beta/__init__.py | """Beta functionality prone to change."""
from langsmith._internal._beta_decorator import warn_beta
from langsmith.beta._evals import compute_test_metrics, convert_runs_to_test
__all__ = ["convert_runs_to_test", "compute_test_metrics", "warn_beta"]
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/cli/main.py | import argparse
import json
import logging
import os
import subprocess
from pathlib import Path
from typing import Dict, List, Mapping, Optional, Union, cast
from langsmith import env as ls_env
from langsmith import utils as ls_utils
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger(__name__)
_DIR = Path(__file__).parent
def pprint_services(services_status: List[Mapping[str, Union[str, List[str]]]]) -> None:
# Loop through and collect Service, State, and Publishers["PublishedPorts"]
# for each service
services = []
for service in services_status:
service_status: Dict[str, str] = {
"Service": str(service["Service"]),
"Status": str(service["Status"]),
}
publishers = cast(List[Dict], service.get("Publishers", []))
if publishers:
service_status["PublishedPorts"] = ", ".join(
[str(publisher["PublishedPort"]) for publisher in publishers]
)
services.append(service_status)
max_service_len = max(len(service["Service"]) for service in services)
max_state_len = max(len(service["Status"]) for service in services)
service_message = [
"\n"
+ "Service".ljust(max_service_len + 2)
+ "Status".ljust(max_state_len + 2)
+ "Published Ports"
]
for service in services:
service_str = service["Service"].ljust(max_service_len + 2)
state_str = service["Status"].ljust(max_state_len + 2)
ports_str = service.get("PublishedPorts", "")
service_message.append(service_str + state_str + ports_str)
service_message.append(
"\nTo connect, set the following environment variables"
" in your LangChain application:"
"\nLANGSMITH_TRACING_V2=true"
"\nLANGSMITH_ENDPOINT=http://localhost:80/api"
)
logger.info("\n".join(service_message))
class LangSmithCommand:
"""Manage the LangSmith Tracing server."""
def __init__(self) -> None:
self.docker_compose_file = (
Path(__file__).absolute().parent / "docker-compose.yaml"
)
@property
def docker_compose_command(self) -> List[str]:
return ls_utils.get_docker_compose_command()
def _open_browser(self, url: str) -> None:
try:
subprocess.run(["open", url])
except FileNotFoundError:
pass
def _start_local(self) -> None:
command = [
*self.docker_compose_command,
"-f",
str(self.docker_compose_file),
]
subprocess.run(
[
*command,
"up",
"--quiet-pull",
"--wait",
]
)
logger.info(
"LangSmith server is running at http://localhost:80/api.\n"
"To view the app, navigate your browser to http://localhost:80"
"\n\nTo connect your LangChain application to the server"
" locally,\nset the following environment variable"
" when running your LangChain application.\n"
)
logger.info("\tLANGSMITH_TRACING=true")
logger.info("\tLANGSMITH_ENDPOINT=http://localhost:80/api\n")
self._open_browser("http://localhost")
def pull(
self,
*,
version: str = "0.5.7",
) -> None:
"""Pull the latest LangSmith images.
Args:
version: The LangSmith version to use for LangSmith. Defaults to 0.5.7
"""
os.environ["_LANGSMITH_IMAGE_VERSION"] = version
subprocess.run(
[
*self.docker_compose_command,
"-f",
str(self.docker_compose_file),
"pull",
]
)
def start(
self,
*,
openai_api_key: Optional[str] = None,
langsmith_license_key: str,
version: str = "0.5.7",
) -> None:
"""Run the LangSmith server locally.
Args:
openai_api_key: The OpenAI API key to use for LangSmith
If not provided, the OpenAI API Key will be read from the
OPENAI_API_KEY environment variable. If neither are provided,
some features of LangSmith will not be available.
langsmith_license_key: The LangSmith license key to use for LangSmith
If not provided, the LangSmith license key will be read from the
LANGSMITH_LICENSE_KEY environment variable. If neither are provided,
Langsmith will not start up.
version: The LangSmith version to use for LangSmith. Defaults to latest.
"""
if openai_api_key is not None:
os.environ["OPENAI_API_KEY"] = openai_api_key
if langsmith_license_key is not None:
os.environ["LANGSMITH_LICENSE_KEY"] = langsmith_license_key
self.pull(version=version)
self._start_local()
def stop(self, clear_volumes: bool = False) -> None:
"""Stop the LangSmith server."""
cmd = [
*self.docker_compose_command,
"-f",
str(self.docker_compose_file),
"down",
]
if clear_volumes:
confirm = input(
"You are about to delete all the locally cached "
"LangSmith containers and volumes. "
"This operation cannot be undone. Are you sure? [y/N]"
)
if confirm.lower() != "y":
print("Aborting.")
return
cmd.append("--volumes")
subprocess.run(cmd)
def logs(self) -> None:
"""Print the logs from the LangSmith server."""
subprocess.run(
[
*self.docker_compose_command,
"-f",
str(self.docker_compose_file),
"logs",
]
)
def status(self) -> None:
"""Provide information about the status LangSmith server."""
command = [
*self.docker_compose_command,
"-f",
str(self.docker_compose_file),
"ps",
"--format",
"json",
]
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
command_stdout = result.stdout.decode("utf-8")
services_status = json.loads(command_stdout)
except json.JSONDecodeError:
logger.error("Error checking LangSmith server status.")
return
if services_status:
logger.info("The LangSmith server is currently running.")
pprint_services(services_status)
else:
logger.info("The LangSmith server is not running.")
return
def env() -> None:
"""Print the runtime environment information."""
env = ls_env.get_runtime_environment()
env.update(ls_env.get_docker_environment())
env.update(ls_env.get_langchain_env_vars())
# calculate the max length of keys
max_key_length = max(len(key) for key in env.keys())
logger.info("LangChain Environment:")
for k, v in env.items():
logger.info(f"{k:{max_key_length}}: {v}")
def main() -> None:
"""Main entrypoint for the CLI."""
print("BY USING THIS SOFTWARE YOU AGREE TO THE TERMS OF SERVICE AT:")
print("https://smith.langchain.com/terms-of-service.pdf")
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(description="LangSmith CLI commands")
server_command = LangSmithCommand()
server_start_parser = subparsers.add_parser(
"start", description="Start the LangSmith server."
)
server_start_parser.add_argument(
"--openai-api-key",
default=os.getenv("OPENAI_API_KEY"),
help="The OpenAI API key to use for LangSmith."
" If not provided, the OpenAI API Key will be read from the"
" OPENAI_API_KEY environment variable. If neither are provided,"
" some features of LangSmith will not be available.",
)
server_start_parser.add_argument(
"--langsmith-license-key",
default=os.getenv("LANGSMITH_LICENSE_KEY"),
help="The LangSmith license key to use for LangSmith."
" If not provided, the LangSmith License Key will be read from the"
" LANGSMITH_LICENSE_KEY environment variable. If neither are provided,"
" the Langsmith application will not spin up.",
)
server_start_parser.add_argument(
"--version",
default="0.5.7",
help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.",
)
server_start_parser.set_defaults(
func=lambda args: server_command.start(
openai_api_key=args.openai_api_key,
langsmith_license_key=args.langsmith_license_key,
version=args.version,
)
)
server_stop_parser = subparsers.add_parser(
"stop", description="Stop the LangSmith server."
)
server_stop_parser.add_argument(
"--clear-volumes",
action="store_true",
help="Delete all the locally cached LangSmith containers and volumes.",
)
server_stop_parser.set_defaults(
func=lambda args: server_command.stop(clear_volumes=args.clear_volumes)
)
server_pull_parser = subparsers.add_parser(
"pull", description="Pull the latest LangSmith images."
)
server_pull_parser.add_argument(
"--version",
default="0.5.7",
help="The LangSmith version to use for LangSmith. Defaults to 0.5.7.",
)
server_pull_parser.set_defaults(
func=lambda args: server_command.pull(version=args.version)
)
server_logs_parser = subparsers.add_parser(
"logs", description="Show the LangSmith server logs."
)
server_logs_parser.set_defaults(func=lambda args: server_command.logs())
server_status_parser = subparsers.add_parser(
"status", description="Show the LangSmith server status."
)
server_status_parser.set_defaults(func=lambda args: server_command.status())
env_parser = subparsers.add_parser("env")
env_parser.set_defaults(func=lambda args: env())
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
return
args.func(args)
if __name__ == "__main__":
main()
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/cli/users.xml | <clickhouse>
<users>
<default>
<access_management>1</access_management>
<named_collection_control>1</named_collection_control>
<show_named_collections>1</show_named_collections>
<show_named_collections_secrets>1</show_named_collections_secrets>
<profile>default</profile>
</default>
</users>
<profiles>
<default>
<async_insert>1</async_insert>
<async_insert_max_data_size>2000000</async_insert_max_data_size>
<wait_for_async_insert>0</wait_for_async_insert>
<parallel_view_processing>1</parallel_view_processing>
<allow_simdjson>0</allow_simdjson>
<lightweight_deletes_sync>0</lightweight_deletes_sync>
</default>
</profiles>
</clickhouse>
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/cli/.env.example | # Don't change this file. Instead, copy it to .env and change the values there. The default values will work out of the box as long as you provide your license key.
_LANGSMITH_IMAGE_VERSION=0.8.12 # Change to the desired Langsmith image version
LANGSMITH_LICENSE_KEY=your-license-key # Change to your Langsmith license key
AUTH_TYPE=none # Set to oauth if you want to use OAuth2.0 with PKCE. Set to mixed for basic auth or OAuth2.0 with OAuth2.0 client secret
OAUTH_CLIENT_ID=your-client-id # Required if AUTH_TYPE=oauth or mixed with OAuth2.0 with OAuth2.0 client secret
OAUTH_ISSUER_URL=https://your-issuer-url # Required if AUTH_TYPE=oauth or mixed with OAuth2.0 with OAuth2.0 client secret
OAUTH_CLIENT_SECRET=your-client-secret # Required if AUTH_TYPE=mixed with OAuth2.0 with OAuth2.0 client secret
LANGSMITH_URL=http://localhost:1980 # Change to your hosted Langsmith URL. Required if AUTH_TYPE=mixed with OAuth2.0 client secret
API_KEY_SALT=super # Change to your desired API key salt. Can be any random value. Must be set if AUTH_TYPE=oauth
POSTGRES_DATABASE_URI=postgres:postgres@langchain-db:5432/postgres # Change to your database URI if using external postgres. Otherwise, leave it as is
REDIS_DATABASE_URI=redis://langchain-redis:6379 # Change to your Redis URI if using external Redis. Otherwise, leave it as is
LOG_LEVEL=warning # Change to your desired log level
MAX_ASYNC_JOBS_PER_WORKER=10 # Change to your desired maximum async jobs per worker. We recommend 10/suggest spinning up more replicas of the queue worker if you need more throughput
ASYNCPG_POOL_MAX_SIZE=3 # Change the PG pool size based off your pg instance/requirements.
CLICKHOUSE_HOST=langchain-clickhouse # Change to your Clickhouse host if using external Clickhouse. Otherwise, leave it as is
CLICKHOUSE_USER=default # Change to your Clickhouse user if needed
CLICKHOUSE_DB=default # Change to your Clickhouse database if needed
CLICKHOUSE_PORT=8123 # Change to your Clickhouse port if needed
CLICKHOUSE_TLS=false # Change to true if you are using TLS to connect to Clickhouse. Otherwise, leave it as is
CLICKHOUSE_PASSWORD=password # Change to your Clickhouse password if needed
CLICKHOUSE_NATIVE_PORT=9000 # Change to your Clickhouse native port if needed
ORG_CREATION_DISABLED=false # Set to true if you want to disable org creation
WORKSPACE_SCOPE_ORG_INVITES_ENABLED=false # Set to true if you want to disable workspace scope org invites
PERSONAL_ORGS_DISABLED=false # Set to true if you want to disable personal orgs
TTL_ENABLED=true # Set to true if you want to enable TTL for your data
SHORT_LIVED_TTL_SECONDS=1209600 # Set to your desired TTL for short-lived traces. Default is 14 days
LONG_LIVED_TTL_SECONDS=34560000 # Set to your desired TTL for long-lived traces. Default is 400 days
BLOB_STORAGE_ENABLED=false # Set to true if you want to enable blob storage
BLOB_STORAGE_BUCKET_NAME=langsmith-blob-storage # Change to your desired blob storage bucket name
BLOB_STORAGE_API_URL=https://s3.us-west-2.amazonaws.com # Change to your desired blob storage API URL
BLOB_STORAGE_ACCESS_KEY=your-access-key # Change to your desired blob storage access key
BLOB_STORAGE_ACCESS_KEY_SECRET=your-access-key-secret # Change to your desired blob storage access key secret
CH_SEARCH_ENABLED=true # Set to false if you do not want to store tokenized inputs/outputs in clickhouse
BASIC_AUTH_ENABLED=false # Set to true if you want to enable basic auth
BASIC_AUTH_JWT_SECRET=your-jwt-secret # Change to your desired basic auth JWT secret
INITIAL_ORG_ADMIN_EMAIL=your-email # Change to your desired initial org admin email. Only used if BASIC_AUTH_ENABLED=true
INITIAL_ORG_ADMIN_PASSWORD=your-password # Change to your desired initial org admin password. Only used if BASIC_AUTH_ENABLED=true
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/cli/docker-compose.yaml | services:
langchain-playground:
image: langchain/langsmith-playground:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
ports:
- 3001:3001
environment:
- PORT=3001
- LANGCHAIN_ENV=local_docker
- LOG_LEVEL=${LOG_LEVEL:-info}
langchain-frontend:
image: langchain/langsmith-frontend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
environment:
- VITE_BACKEND_AUTH_TYPE=${AUTH_TYPE:-none}
- VITE_BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false}
- VITE_OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID}
- VITE_OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL}
ports:
- 1980:1980
depends_on:
- langchain-backend
- langchain-playground
langchain-ace-backend:
image: langchain/langsmith-ace-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
ports:
- 1987:1987
environment:
- PORT=1987
command:
- "deno"
- "run"
- "--unstable-worker-options"
- "--allow-env"
- "--allow-net=0.0.0.0:1987"
- "--node-modules-dir"
- "-R"
- "src/main.ts"
- "-R"
- "src/python_worker.ts"
langchain-backend:
image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
environment:
- PORT=1984
- LANGCHAIN_ENV=local_docker
- LANGSMITH_URL=${LANGSMITH_URL:-http://langchain-frontend:1980}
- GO_ENDPOINT=http://langchain-platform-backend:1986
- SMITH_BACKEND_ENDPOINT=${SMITH_BACKEND_ENDPOINT:-http://langchain-backend:1984}
- LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY}
- LOG_LEVEL=${LOG_LEVEL:-info}
- AUTH_TYPE=${AUTH_TYPE:-none}
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID}
- OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET}
- OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL}
- API_KEY_SALT=${API_KEY_SALT}
- X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT}
- POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres}
- REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379}
- CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DB=${CLICKHOUSE_DB:-default}
- CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123}
- CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false}
- FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false}
- FF_TRACE_TIERS_ENABLED=${TTL_ENABLED:-true}
- FF_UPGRADE_TRACE_TIER_ENABLED=${TTL_ENABLED:-true}
- FF_S3_STORAGE_ENABLED=${BLOB_STORAGE_ENABLED:-false}
- S3_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets}
- S3_RUN_MANIFEST_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets}
- S3_API_URL=${BLOB_STORAGE_API_URL:-https://s3.us-west-2.amazonaws.com}
- S3_ACCESS_KEY=${BLOB_STORAGE_ACCESS_KEY}
- S3_ACCESS_KEY_SECRET=${BLOB_STORAGE_ACCESS_KEY_SECRET}
- FF_CH_SEARCH_ENABLED=${CH_SEARCH_ENABLED:-true}
- BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false}
- BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET}
- INITIAL_ORG_ADMIN_EMAIL=${INITIAL_ORG_ADMIN_EMAIL}
- INITIAL_ORG_ADMIN_PASSWORD=${INITIAL_ORG_ADMIN_PASSWORD}
ports:
- 1984:1984
depends_on:
langchain-db:
condition: service_healthy
langchain-redis:
condition: service_healthy
clickhouse-setup:
condition: service_completed_successfully
postgres-setup:
condition: service_completed_successfully
restart: always
langchain-platform-backend:
image: langchain/langsmith-go-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
environment:
- PORT=1986
- LANGCHAIN_ENV=local_docker
- LANGSMITH_URL=${LANGSMITH_URL:-http://langchain-frontend:1980}
- SMITH_BACKEND_ENDPOINT=${SMITH_BACKEND_ENDPOINT:-http://langchain-backend:1984}
- LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- LOG_LEVEL=${LOG_LEVEL:-warning}
- AUTH_TYPE=${AUTH_TYPE:-none}
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID}
- OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET}
- OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL}
- API_KEY_SALT=${API_KEY_SALT}
- X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT}
- POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres}
- REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379}
- BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false}
- BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET}
ports:
- 1986:1986
depends_on:
langchain-db:
condition: service_healthy
langchain-redis:
condition: service_healthy
clickhouse-setup:
condition: service_completed_successfully
postgres-setup:
condition: service_completed_successfully
restart: always
langchain-queue:
image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
environment:
- LANGCHAIN_ENV=local_docker
- GO_ENDPOINT=http://langchain-platform-backend:1986
- SMITH_BACKEND_ENDPOINT=http://langchain-backend:1984
- LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY}
- LOG_LEVEL=${LOG_LEVEL:-info}
- AUTH_TYPE=${AUTH_TYPE:-none}
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID}
- OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL}
- API_KEY_SALT=${API_KEY_SALT}
- X_SERVICE_AUTH_JWT_SECRET=${API_KEY_SALT}
- POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres}
- REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379}
- CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DB=${CLICKHOUSE_DB:-default}
- CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123}
- CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false}
- FF_ORG_CREATION_DISABLED=${ORG_CREATION_DISABLED:-false}
- FF_TRACE_TIERS_ENABLED=${TTL_ENABLED:-true}
- FF_UPGRADE_TRACE_TIER_ENABLED=${TTL_ENABLED:-true}
- FF_S3_STORAGE_ENABLED=${BLOB_STORAGE_ENABLED:-false}
- S3_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets}
- S3_RUN_MANIFEST_BUCKET_NAME=${BLOB_STORAGE_BUCKET_NAME:-langsmith-s3-assets}
- S3_API_URL=${BLOB_STORAGE_API_URL:-https://s3.us-west-2.amazonaws.com}
- S3_ACCESS_KEY=${BLOB_STORAGE_ACCESS_KEY}
- S3_ACCESS_KEY_SECRET=${BLOB_STORAGE_ACCESS_KEY_SECRET}
- FF_CH_SEARCH_ENABLED=${CH_SEARCH_ENABLED:-true}
- BASIC_AUTH_ENABLED=${BASIC_AUTH_ENABLED:-false}
- BASIC_AUTH_JWT_SECRET=${BASIC_AUTH_JWT_SECRET}
command:
- "saq"
- "app.workers.queues.single_queue_worker.settings"
- "--quiet"
depends_on:
langchain-db:
condition: service_healthy
langchain-redis:
condition: service_healthy
clickhouse-setup:
condition: service_completed_successfully
postgres-setup:
condition: service_completed_successfully
restart: always
langchain-db:
image: postgres:14.7
command:
[
"postgres",
"-c",
"log_min_messages=WARNING",
"-c",
"client_min_messages=WARNING",
]
environment:
- POSTGRES_PASSWORD=postgres
- POSTGRES_USER=postgres
- POSTGRES_DB=postgres
volumes:
- langchain-db-data:/var/lib/postgresql/data
ports:
- 5433:5432
healthcheck:
test: ["CMD", "pg_isready", "-U", "postgres"]
interval: 2s
timeout: 2s
retries: 30
langchain-redis:
image: redis:7
ports:
- 63791:6379
volumes:
- langchain-redis-data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 2s
timeout: 2s
retries: 30
langchain-clickhouse:
image: clickhouse/clickhouse-server:24.5
user: "101:101"
restart: always
environment:
- CLICKHOUSE_DB=${CLICKHOUSE_DB:-default}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
volumes:
- langchain-clickhouse-data:/var/lib/clickhouse
- ./users.xml:/etc/clickhouse-server/users.d/users.xml
ports:
- 8124:8123
- 9001:9000
healthcheck:
test: ["CMD", "clickhouse-client", "--query", "SELECT 1"]
interval: 2s
timeout: 2s
retries: 30
clickhouse-setup:
image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
depends_on:
langchain-clickhouse:
condition: service_healthy
restart: "on-failure:10"
environment:
- CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DB=${CLICKHOUSE_DB:-default}
- CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123}
- CLICKHOUSE_NATIVE_PORT=${CLICKHOUSE_NATIVE_PORT:-9000}
- CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false}
command:
[
"bash",
"scripts/wait_for_clickhouse_and_migrate.sh"
]
postgres-setup:
image: langchain/langsmith-backend:${_LANGSMITH_IMAGE_VERSION:-0.8.12}
depends_on:
langchain-db:
condition: service_healthy
environment:
- LANGCHAIN_ENV=local_docker
- LANGSMITH_LICENSE_KEY=${LANGSMITH_LICENSE_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- LOG_LEVEL=${LOG_LEVEL:-warning}
- AUTH_TYPE=${AUTH_TYPE:-none}
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID}
- OAUTH_ISSUER_URL=${OAUTH_ISSUER_URL}
- API_KEY_SALT=${API_KEY_SALT}
- POSTGRES_DATABASE_URI=${POSTGRES_DATABASE_URI:-postgres:postgres@langchain-db:5432/postgres}
- REDIS_DATABASE_URI=${REDIS_DATABASE_URI:-redis://langchain-redis:6379}
- MAX_ASYNC_JOBS_PER_WORKER=${MAX_ASYNC_JOBS_PER_WORKER:-10}
- ASYNCPG_POOL_MAX_SIZE=${ASYNCPG_POOL_MAX_SIZE:-3}
- CLICKHOUSE_HOST=${CLICKHOUSE_HOST:-langchain-clickhouse}
- CLICKHOUSE_USER=${CLICKHOUSE_USER:-default}
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD:-password}
- CLICKHOUSE_DB=${CLICKHOUSE_DB:-default}
- CLICKHOUSE_PORT=${CLICKHOUSE_PORT:-8123}
- CLICKHOUSE_NATIVE_PORT=${CLICKHOUSE_NATIVE_PORT:-9000}
- CLICKHOUSE_TLS=${CLICKHOUSE_TLS:-false}
restart: "on-failure:10"
command:
[
"bash",
"-c",
"alembic upgrade head",
]
volumes:
langchain-db-data:
langchain-redis-data:
langchain-clickhouse-data:
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/wrappers/_openai.py | from __future__ import annotations
import functools
import logging
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
from typing_extensions import TypedDict
from langsmith import client as ls_client
from langsmith import run_helpers
from langsmith.schemas import InputTokenDetails, OutputTokenDetails, UsageMetadata
if TYPE_CHECKING:
from openai import AsyncOpenAI, OpenAI
from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk,
Choice,
ChoiceDeltaToolCall,
)
from openai.types.completion import Completion
# Any is used since it may work with Azure or other providers
C = TypeVar("C", bound=Union["OpenAI", "AsyncOpenAI", Any])
logger = logging.getLogger(__name__)
@functools.lru_cache
def _get_not_given() -> Optional[Type]:
try:
from openai._types import NotGiven
return NotGiven
except ImportError:
return None
def _strip_not_given(d: dict) -> dict:
try:
not_given = _get_not_given()
if not_given is None:
return d
return {k: v for k, v in d.items() if not isinstance(v, not_given)}
except Exception as e:
logger.error(f"Error stripping NotGiven: {e}")
return d
def _infer_invocation_params(model_type: str, kwargs: dict):
stripped = _strip_not_given(kwargs)
stop = stripped.get("stop")
if stop and isinstance(stop, str):
stop = [stop]
return {
"ls_provider": "openai",
"ls_model_type": model_type,
"ls_model_name": stripped.get("model", None),
"ls_temperature": stripped.get("temperature", None),
"ls_max_tokens": stripped.get("max_tokens", None),
"ls_stop": stop,
}
def _reduce_choices(choices: List[Choice]) -> dict:
reversed_choices = list(reversed(choices))
message: Dict[str, Any] = {
"role": "assistant",
"content": "",
}
for c in reversed_choices:
if c.delta.role:
message["role"] = c.delta.role
break
tool_calls: DefaultDict[int, List[ChoiceDeltaToolCall]] = defaultdict(list)
for c in choices:
if c.delta.content:
message["content"] += c.delta.content
if c.delta.function_call:
if not message.get("function_call"):
message["function_call"] = {"name": "", "arguments": ""}
if c.delta.function_call.name:
message["function_call"]["name"] += c.delta.function_call.name
if c.delta.function_call.arguments:
message["function_call"]["arguments"] += c.delta.function_call.arguments
if c.delta.tool_calls:
for tool_call in c.delta.tool_calls:
tool_calls[c.index].append(tool_call)
if tool_calls:
message["tool_calls"] = [None for _ in tool_calls.keys()]
for index, tool_call_chunks in tool_calls.items():
message["tool_calls"][index] = {
"index": index,
"id": next((c.id for c in tool_call_chunks if c.id), None),
"type": next((c.type for c in tool_call_chunks if c.type), None),
}
for chunk in tool_call_chunks:
if chunk.function:
if not message["tool_calls"][index].get("function"):
message["tool_calls"][index]["function"] = {
"name": "",
"arguments": "",
}
if chunk.function.name:
fn_ = message["tool_calls"][index]["function"]
fn_["name"] += chunk.function.name
if chunk.function.arguments:
fn_ = message["tool_calls"][index]["function"]
fn_["arguments"] += chunk.function.arguments
return {
"index": choices[0].index,
"finish_reason": next(
(c.finish_reason for c in reversed_choices if c.finish_reason),
None,
),
"message": message,
}
def _reduce_chat(all_chunks: List[ChatCompletionChunk]) -> dict:
choices_by_index: DefaultDict[int, List[Choice]] = defaultdict(list)
for chunk in all_chunks:
for choice in chunk.choices:
choices_by_index[choice.index].append(choice)
if all_chunks:
d = all_chunks[-1].model_dump()
d["choices"] = [
_reduce_choices(choices) for choices in choices_by_index.values()
]
else:
d = {"choices": [{"message": {"role": "assistant", "content": ""}}]}
# streamed outputs don't go through `process_outputs`
# so we need to flatten metadata here
oai_token_usage = d.pop("usage", None)
d["usage_metadata"] = (
_create_usage_metadata(oai_token_usage) if oai_token_usage else None
)
return d
def _reduce_completions(all_chunks: List[Completion]) -> dict:
all_content = []
for chunk in all_chunks:
content = chunk.choices[0].text
if content is not None:
all_content.append(content)
content = "".join(all_content)
if all_chunks:
d = all_chunks[-1].model_dump()
d["choices"] = [{"text": content}]
else:
d = {"choices": [{"text": content}]}
return d
def _create_usage_metadata(oai_token_usage: dict) -> UsageMetadata:
input_tokens = oai_token_usage.get("prompt_tokens") or 0
output_tokens = oai_token_usage.get("completion_tokens") or 0
total_tokens = oai_token_usage.get("total_tokens") or input_tokens + output_tokens
input_token_details: dict = {
"audio": (oai_token_usage.get("prompt_tokens_details") or {}).get(
"audio_tokens"
),
"cache_read": (oai_token_usage.get("prompt_tokens_details") or {}).get(
"cached_tokens"
),
}
output_token_details: dict = {
"audio": (oai_token_usage.get("completion_tokens_details") or {}).get(
"audio_tokens"
),
"reasoning": (oai_token_usage.get("completion_tokens_details") or {}).get(
"reasoning_tokens"
),
}
return UsageMetadata(
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=total_tokens,
input_token_details=InputTokenDetails(
**{k: v for k, v in input_token_details.items() if v is not None}
),
output_token_details=OutputTokenDetails(
**{k: v for k, v in output_token_details.items() if v is not None}
),
)
def _process_chat_completion(outputs: Any):
try:
rdict = outputs.model_dump()
oai_token_usage = rdict.pop("usage", None)
rdict["usage_metadata"] = (
_create_usage_metadata(oai_token_usage) if oai_token_usage else None
)
return rdict
except BaseException as e:
logger.debug(f"Error processing chat completion: {e}")
return {"output": outputs}
def _get_wrapper(
original_create: Callable,
name: str,
reduce_fn: Callable,
tracing_extra: Optional[TracingExtra] = None,
invocation_params_fn: Optional[Callable] = None,
process_outputs: Optional[Callable] = None,
) -> Callable:
textra = tracing_extra or {}
@functools.wraps(original_create)
def create(*args, stream: bool = False, **kwargs):
decorator = run_helpers.traceable(
name=name,
run_type="llm",
reduce_fn=reduce_fn if stream else None,
process_inputs=_strip_not_given,
_invocation_params_fn=invocation_params_fn,
process_outputs=process_outputs,
**textra,
)
return decorator(original_create)(*args, stream=stream, **kwargs)
@functools.wraps(original_create)
async def acreate(*args, stream: bool = False, **kwargs):
kwargs = _strip_not_given(kwargs)
decorator = run_helpers.traceable(
name=name,
run_type="llm",
reduce_fn=reduce_fn if stream else None,
process_inputs=_strip_not_given,
_invocation_params_fn=invocation_params_fn,
process_outputs=process_outputs,
**textra,
)
return await decorator(original_create)(*args, stream=stream, **kwargs)
return acreate if run_helpers.is_async(original_create) else create
class TracingExtra(TypedDict, total=False):
metadata: Optional[Mapping[str, Any]]
tags: Optional[List[str]]
client: Optional[ls_client.Client]
def wrap_openai(
client: C,
*,
tracing_extra: Optional[TracingExtra] = None,
chat_name: str = "ChatOpenAI",
completions_name: str = "OpenAI",
) -> C:
"""Patch the OpenAI client to make it traceable.
Args:
client (Union[OpenAI, AsyncOpenAI]): The client to patch.
tracing_extra (Optional[TracingExtra], optional): Extra tracing information.
Defaults to None.
chat_name (str, optional): The run name for the chat completions endpoint.
Defaults to "ChatOpenAI".
completions_name (str, optional): The run name for the completions endpoint.
Defaults to "OpenAI".
Returns:
Union[OpenAI, AsyncOpenAI]: The patched client.
"""
client.chat.completions.create = _get_wrapper( # type: ignore[method-assign]
client.chat.completions.create,
chat_name,
_reduce_chat,
tracing_extra=tracing_extra,
invocation_params_fn=functools.partial(_infer_invocation_params, "chat"),
process_outputs=_process_chat_completion,
)
client.completions.create = _get_wrapper( # type: ignore[method-assign]
client.completions.create,
completions_name,
_reduce_completions,
tracing_extra=tracing_extra,
invocation_params_fn=functools.partial(_infer_invocation_params, "llm"),
)
return client
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/wrappers/__init__.py | """This module provides convenient tracing wrappers for popular libraries."""
from langsmith.wrappers._openai import wrap_openai
__all__ = ["wrap_openai"]
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/env/_runtime_env.py | """Environment information."""
import functools
import logging
import os
import platform
import subprocess
from typing import Dict, List, Optional, Union
from langsmith.utils import get_docker_compose_command
from langsmith.env._git import exec_git
try:
# psutil is an optional dependency
import psutil
_PSUTIL_AVAILABLE = True
except ImportError:
_PSUTIL_AVAILABLE = False
logger = logging.getLogger(__name__)
def get_runtime_and_metrics() -> dict:
"""Get the runtime information as well as metrics."""
return {**get_runtime_environment(), **get_system_metrics()}
def get_system_metrics() -> Dict[str, Union[float, dict]]:
"""Get CPU and other performance metrics."""
global _PSUTIL_AVAILABLE
if not _PSUTIL_AVAILABLE:
return {}
try:
process = psutil.Process(os.getpid())
metrics: Dict[str, Union[float, dict]] = {}
with process.oneshot():
mem_info = process.memory_info()
metrics["thread_count"] = float(process.num_threads())
metrics["mem"] = {
"rss": float(mem_info.rss),
}
ctx_switches = process.num_ctx_switches()
cpu_times = process.cpu_times()
metrics["cpu"] = {
"time": {
"sys": cpu_times.system,
"user": cpu_times.user,
},
"ctx_switches": {
"voluntary": float(ctx_switches.voluntary),
"involuntary": float(ctx_switches.involuntary),
},
"percent": process.cpu_percent(),
}
return metrics
except Exception as e:
# If psutil is installed but not compatible with the build,
# we'll just cease further attempts to use it.
_PSUTIL_AVAILABLE = False
logger.debug("Failed to get system metrics: %s", e)
return {}
@functools.lru_cache(maxsize=1)
def get_runtime_environment() -> dict:
"""Get information about the environment."""
# Lazy import to avoid circular imports
from langsmith import __version__
shas = get_release_shas()
return {
"sdk": "langsmith-py",
"sdk_version": __version__,
"library": "langsmith",
"platform": platform.platform(),
"runtime": "python",
"py_implementation": platform.python_implementation(),
"runtime_version": platform.python_version(),
"langchain_version": get_langchain_environment(),
"langchain_core_version": get_langchain_core_version(),
**shas,
}
@functools.lru_cache(maxsize=1)
def get_langchain_environment() -> Optional[str]:
try:
import langchain # type: ignore
return langchain.__version__
except: # noqa
return None
@functools.lru_cache(maxsize=1)
def get_langchain_core_version() -> Optional[str]:
try:
import langchain_core # type: ignore
return langchain_core.__version__
except ImportError:
return None
@functools.lru_cache(maxsize=1)
def get_docker_version() -> Optional[str]:
import subprocess
try:
docker_version = (
subprocess.check_output(["docker", "--version"]).decode("utf-8").strip()
)
except FileNotFoundError:
docker_version = "unknown"
except: # noqa
return None
return docker_version
@functools.lru_cache(maxsize=1)
def get_docker_compose_version() -> Optional[str]:
try:
docker_compose_version = (
subprocess.check_output(["docker-compose", "--version"])
.decode("utf-8")
.strip()
)
except FileNotFoundError:
docker_compose_version = "unknown"
except: # noqa
return None
return docker_compose_version
@functools.lru_cache(maxsize=1)
def _get_compose_command() -> Optional[List[str]]:
try:
compose_command = get_docker_compose_command()
except ValueError as e:
compose_command = [f"NOT INSTALLED: {e}"]
except: # noqa
return None
return compose_command
@functools.lru_cache(maxsize=1)
def get_docker_environment() -> dict:
"""Get information about the environment."""
compose_command = _get_compose_command()
return {
"docker_version": get_docker_version(),
"docker_compose_command": (
" ".join(compose_command) if compose_command is not None else None
),
"docker_compose_version": get_docker_compose_version(),
}
def get_langchain_env_vars() -> dict:
"""Retrieve the langchain environment variables."""
env_vars = {k: v for k, v in os.environ.items() if k.startswith("LANGCHAIN_")}
for key in list(env_vars):
if "key" in key.lower():
v = env_vars[key]
env_vars[key] = v[:2] + "*" * (len(v) - 4) + v[-2:]
return env_vars
@functools.lru_cache(maxsize=1)
def get_langchain_env_var_metadata() -> dict:
"""Retrieve the langchain environment variables."""
excluded = {
"LANGCHAIN_API_KEY",
"LANGCHAIN_ENDPOINT",
"LANGCHAIN_TRACING_V2",
"LANGCHAIN_PROJECT",
"LANGCHAIN_SESSION",
"LANGSMITH_RUNS_ENDPOINTS",
}
langchain_metadata = {
k: v
for k, v in os.environ.items()
if (k.startswith("LANGCHAIN_") or k.startswith("LANGSMITH_"))
and k not in excluded
and "key" not in k.lower()
and "secret" not in k.lower()
and "token" not in k.lower()
}
env_revision_id = langchain_metadata.pop("LANGCHAIN_REVISION_ID", None)
if env_revision_id:
langchain_metadata["revision_id"] = env_revision_id
elif default_revision_id := _get_default_revision_id():
langchain_metadata["revision_id"] = default_revision_id
return langchain_metadata
@functools.lru_cache(maxsize=1)
def _get_default_revision_id() -> Optional[str]:
"""Get the default revision ID based on `git describe`."""
try:
return exec_git(["describe", "--tags", "--always", "--dirty"])
except BaseException:
return None
@functools.lru_cache(maxsize=1)
def get_release_shas() -> Dict[str, str]:
common_release_envs = [
"VERCEL_GIT_COMMIT_SHA",
"NEXT_PUBLIC_VERCEL_GIT_COMMIT_SHA",
"COMMIT_REF",
"RENDER_GIT_COMMIT",
"CI_COMMIT_SHA",
"CIRCLE_SHA1",
"CF_PAGES_COMMIT_SHA",
"REACT_APP_GIT_SHA",
"SOURCE_VERSION",
"GITHUB_SHA",
"TRAVIS_COMMIT",
"GIT_COMMIT",
"BUILD_VCS_NUMBER",
"bamboo_planRepository_revision",
"Build.SourceVersion",
"BITBUCKET_COMMIT",
"DRONE_COMMIT_SHA",
"SEMAPHORE_GIT_SHA",
"BUILDKITE_COMMIT",
]
shas = {}
for env in common_release_envs:
env_var = os.environ.get(env)
if env_var is not None:
shas[env] = env_var
return shas
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/env/_git.py | """Fetch information about any current git repo."""
import functools
import logging
import subprocess
from typing import List, Optional, TypeVar
from typing_extensions import TypedDict
logger = logging.getLogger(__name__)
T = TypeVar("T")
def exec_git(command: List[str]) -> Optional[str]:
try:
return subprocess.check_output(
["git"] + command, encoding="utf-8", stderr=subprocess.DEVNULL
).strip()
except BaseException:
return None
class GitInfo(TypedDict, total=False):
repo_name: Optional[str]
remote_url: Optional[str]
commit: Optional[str]
branch: Optional[str]
author_name: Optional[str]
author_email: Optional[str]
commit_time: Optional[str]
dirty: Optional[bool]
tags: Optional[str]
@functools.lru_cache(maxsize=1)
def get_git_info(remote: str = "origin") -> GitInfo:
"""Get information about the git repository."""
if not exec_git(["rev-parse", "--is-inside-work-tree"]):
return GitInfo(
remote_url=None,
commit=None,
branch=None,
author_name=None,
author_email=None,
commit_time=None,
dirty=None,
tags=None,
repo_name=None,
)
return {
"remote_url": exec_git(["remote", "get-url", remote]),
"commit": exec_git(["rev-parse", "HEAD"]),
"commit_time": exec_git(["log", "-1", "--format=%ct"]),
"branch": exec_git(["rev-parse", "--abbrev-ref", "HEAD"]),
"tags": exec_git(
["describe", "--tags", "--exact-match", "--always", "--dirty"]
),
"dirty": exec_git(["status", "--porcelain"]) != "",
"author_name": exec_git(["log", "-1", "--format=%an"]),
"author_email": exec_git(["log", "-1", "--format=%ae"]),
"repo_name": (exec_git(["rev-parse", "--show-toplevel"]) or "").split("/")[-1],
}
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/env/__init__.py | """Utilities to get information about the runtime environment."""
from langsmith.env._git import get_git_info
from langsmith.env._runtime_env import (
get_docker_compose_command,
get_docker_compose_version,
get_docker_environment,
get_docker_version,
get_langchain_env_var_metadata,
get_langchain_env_vars,
get_langchain_environment,
get_release_shas,
get_runtime_and_metrics,
get_runtime_environment,
get_system_metrics,
)
__all__ = [
"get_docker_compose_command",
"get_docker_compose_version",
"get_docker_environment",
"get_docker_version",
"get_langchain_env_var_metadata",
"get_langchain_env_vars",
"get_langchain_environment",
"get_release_shas",
"get_runtime_and_metrics",
"get_runtime_environment",
"get_system_metrics",
"get_git_info",
]
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/string_evaluator.py | """This module contains the StringEvaluator class."""
from typing import Callable, Dict, Optional
from pydantic import BaseModel
from langsmith.evaluation.evaluator import EvaluationResult, RunEvaluator
from langsmith.schemas import Example, Run
class StringEvaluator(RunEvaluator, BaseModel):
"""Grades the run's string input, output, and optional answer."""
evaluation_name: Optional[str] = None
"""The name evaluation, such as 'Accuracy' or 'Salience'."""
input_key: str = "input"
"""The key in the run inputs to extract the input string."""
prediction_key: str = "output"
"""The key in the run outputs to extra the prediction string."""
answer_key: Optional[str] = "output"
"""The key in the example outputs the answer string."""
grading_function: Callable[[str, str, Optional[str]], Dict]
"""Function that grades the run output against the example output."""
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> EvaluationResult:
"""Evaluate a single run."""
if run.outputs is None:
raise ValueError("Run outputs cannot be None.")
if not example or example.outputs is None or self.answer_key is None:
answer = None
else:
answer = example.outputs.get(self.answer_key)
run_input = run.inputs[self.input_key]
run_output = run.outputs[self.prediction_key]
grading_results = self.grading_function(run_input, run_output, answer)
return EvaluationResult(**{"key": self.evaluation_name, **grading_results})
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/_runner.py | """V2 Evaluation Interface."""
from __future__ import annotations
import ast
import collections
import concurrent.futures as cf
import datetime
import functools
import inspect
import itertools
import logging
import pathlib
import queue
import random
import textwrap
import threading
import uuid
from contextvars import copy_context
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
DefaultDict,
Dict,
Generator,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from typing_extensions import TypedDict, overload
import langsmith
from langsmith import env as ls_env
from langsmith import run_helpers as rh
from langsmith import run_trees as rt
from langsmith import schemas
from langsmith import utils as ls_utils
from langsmith._internal._beta_decorator import _warn_once
from langsmith.evaluation.evaluator import (
SUMMARY_EVALUATOR_T,
ComparisonEvaluationResult,
DynamicComparisonRunEvaluator,
DynamicRunEvaluator,
EvaluationResult,
EvaluationResults,
RunEvaluator,
_normalize_summary_evaluator,
comparison_evaluator,
run_evaluator,
)
from langsmith.evaluation.integrations import LangChainStringEvaluator
if TYPE_CHECKING:
import pandas as pd
from langchain_core.runnables import Runnable
DataFrame = pd.DataFrame
else:
DataFrame = Any
logger = logging.getLogger(__name__)
TARGET_T = Callable[[dict], dict]
# Data format: dataset-name, dataset_id, or examples
DATA_T = Union[str, uuid.UUID, Iterable[schemas.Example], schemas.Dataset]
# Summary evaluator runs over the whole dataset
# and reports aggregate metric(s)
# Row-level evaluator
EVALUATOR_T = Union[
RunEvaluator,
Callable[
[schemas.Run, Optional[schemas.Example]],
Union[EvaluationResult, EvaluationResults],
],
Callable[..., Union[dict, EvaluationResults, EvaluationResult]],
]
AEVALUATOR_T = Union[
Callable[
[schemas.Run, Optional[schemas.Example]],
Awaitable[Union[EvaluationResult, EvaluationResults]],
],
]
EXPERIMENT_T = Union[str, uuid.UUID, schemas.TracerSession]
@overload
def evaluate(
target: Union[TARGET_T, Runnable, EXPERIMENT_T],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[Sequence[EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> ExperimentResults: ...
@overload
def evaluate(
target: Union[Tuple[EXPERIMENT_T, EXPERIMENT_T]],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[Sequence[COMPARATIVE_EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> ComparativeExperimentResults: ...
def evaluate(
target: Union[TARGET_T, Runnable, EXPERIMENT_T, Tuple[EXPERIMENT_T, EXPERIMENT_T]],
/,
data: Optional[DATA_T] = None,
evaluators: Optional[
Union[Sequence[EVALUATOR_T], Sequence[COMPARATIVE_EVALUATOR_T]]
] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[EXPERIMENT_T] = None,
upload_results: bool = True,
**kwargs: Any,
) -> Union[ExperimentResults, ComparativeExperimentResults]:
r"""Evaluate a target system on a given dataset.
Args:
target (TARGET_T | Runnable | EXPERIMENT_T | Tuple[EXPERIMENT_T, EXPERIMENT_T]):
The target system or experiment(s) to evaluate. Can be a function
that takes a dict and returns a dict, a langchain Runnable, an
existing experiment ID, or a two-tuple of experiment IDs.
data (DATA_T): The dataset to evaluate on. Can be a dataset name, a list of
examples, or a generator of examples.
evaluators (Sequence[EVALUATOR_T] | Sequence[COMPARATIVE_EVALUATOR_T] | None):
A list of evaluators to run on each example. The evaluator signature
depends on the target type. Default to None.
summary_evaluators (Sequence[SUMMARY_EVALUATOR_T] | None): A list of summary
evaluators to run on the entire dataset. Should not be specified if
comparing two existing experiments. Defaults to None.
metadata (dict | None): Metadata to attach to the experiment.
Defaults to None.
experiment_prefix (str | None): A prefix to provide for your experiment name.
Defaults to None.
description (str | None): A free-form text description for the experiment.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
client (langsmith.Client | None): The LangSmith client to use.
Defaults to None.
blocking (bool): Whether to block until the evaluation is complete.
Defaults to True.
num_repetitions (int): The number of times to run the evaluation.
Each item in the dataset will be run and evaluated this many times.
Defaults to 1.
experiment (schemas.TracerSession | None): An existing experiment to
extend. If provided, experiment_prefix is ignored. For advanced
usage only. Should not be specified if target is an existing experiment or
two-tuple fo experiments.
load_nested (bool): Whether to load all child runs for the experiment.
Default is to only load the top-level root runs. Should only be specified
when target is an existing experiment or two-tuple of experiments.
randomize_order (bool): Whether to randomize the order of the outputs for each
evaluation. Default is False. Should only be specified when target is a
two-tuple of existing experiments.
Returns:
ExperimentResults: If target is a function, Runnable, or existing experiment.
ComparativeExperimentResults: If target is a two-tuple of existing experiments.
Examples:
Prepare the dataset:
>>> from typing import Sequence
>>> from langsmith import Client
>>> from langsmith.evaluation import evaluate
>>> from langsmith.schemas import Example, Run
>>> client = Client()
>>> dataset = client.clone_public_dataset(
... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
... )
>>> dataset_name = "Evaluate Examples"
Basic usage:
>>> def accuracy(run: Run, example: Example):
... # Row-level evaluator for accuracy.
... pred = run.outputs["output"]
... expected = example.outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [run.outputs["output"].lower() for run in runs]
... expected = [example.outputs["answer"].lower() for example in examples]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
>>> def predict(inputs: dict) -> dict:
... # This can be any function or just an API call to your app.
... return {"output": "Yes"}
>>> results = evaluate(
... predict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Evaluating the accuracy of a simple prediction model.",
... metadata={
... "my-prompt-version": "abcd-1234",
... },
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating over only a subset of the examples
>>> experiment_name = results.experiment_name
>>> examples = client.list_examples(dataset_name=dataset_name, limit=5)
>>> results = evaluate(
... predict,
... data=examples,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Just testing a subset synchronously.",
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Streaming each prediction to more easily + eagerly debug.
>>> results = evaluate(
... predict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... description="I don't even have to block!",
... blocking=False,
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> for i, result in enumerate(results): # doctest: +ELLIPSIS
... pass
Using the `evaluate` API with an off-the-shelf LangChain evaluator:
>>> from langsmith.evaluation import LangChainStringEvaluator
>>> from langchain_openai import ChatOpenAI
>>> def prepare_criteria_data(run: Run, example: Example):
... return {
... "prediction": run.outputs["output"],
... "reference": example.outputs["answer"],
... "input": str(example.inputs),
... }
>>> results = evaluate(
... predict,
... data=dataset_name,
... evaluators=[
... accuracy,
... LangChainStringEvaluator("embedding_distance"),
... LangChainStringEvaluator(
... "labeled_criteria",
... config={
... "criteria": {
... "usefulness": "The prediction is useful if it is correct"
... " and/or asks a useful followup question."
... },
... "llm": ChatOpenAI(model="gpt-4o"),
... },
... prepare_data=prepare_criteria_data,
... ),
... ],
... description="Evaluating with off-the-shelf LangChain evaluators.",
... summary_evaluators=[precision],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating a LangChain object:
>>> from langchain_core.runnables import chain as as_runnable
>>> @as_runnable
... def nested_predict(inputs):
... return {"output": "Yes"}
>>> @as_runnable
... def lc_predict(inputs):
... return nested_predict.invoke(inputs)
>>> results = evaluate(
... lc_predict.invoke,
... data=dataset_name,
... evaluators=[accuracy],
... description="This time we're evaluating a LangChain object.",
... summary_evaluators=[precision],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
.. versionchanged:: 0.2.0
'max_concurrency' default updated from None (no limit on concurrency)
to 0 (no concurrency at all).
""" # noqa: E501
if isinstance(target, (str, uuid.UUID, schemas.TracerSession)):
invalid_args = {
"num_repetitions": num_repetitions > 1,
"experiment": bool(experiment),
"upload_results": not upload_results,
"experiment_prefix": bool(experiment_prefix),
"data": bool(data),
}
if any(invalid_args.values()):
msg = (
f"Received invalid arguments. "
f"{tuple(k for k, v in invalid_args.items() if v)} should not be "
f"specified when target is an existing experiment."
)
raise ValueError(msg)
target_id = target if isinstance(target, (str, uuid.UUID)) else target.id
logger.debug(f"Running evaluation over existing experiment {target_id}...")
return evaluate_existing(
target,
evaluators=cast(Optional[Sequence[EVALUATOR_T]], evaluators),
summary_evaluators=summary_evaluators,
metadata=metadata,
max_concurrency=max_concurrency,
client=client,
blocking=blocking,
**kwargs,
)
elif isinstance(target, tuple):
invalid_args = {
"num_repetitions": num_repetitions > 1,
"experiment": bool(experiment),
"upload_results": not upload_results,
"summary_evaluators": bool(summary_evaluators),
"data": bool(data),
}
if len(target) != 2 or not all(
isinstance(t, (str, uuid.UUID, schemas.TracerSession)) for t in target
):
msg = (
"Received invalid target. If a tuple is specified it must have length "
"2 and each element should by the ID or schemas.TracerSession of an "
f"existing experiment. Received {target=}"
)
raise ValueError(msg)
elif any(invalid_args.values()):
msg = (
f"Received invalid arguments. "
f"{tuple(k for k, v in invalid_args.items() if v)} should not be "
f"specified when target is two existing experiments."
)
raise ValueError(msg)
if max_concurrency is not None:
kwargs["max_concurrency"] = max_concurrency
target_ids = [t if isinstance(t, (str, uuid.UUID)) else t.id for t in target]
logger.debug(
f"Running pairwise evaluation over existing experiments {target_ids}..."
)
return evaluate_comparative(
target,
evaluators=cast(Sequence[COMPARATIVE_EVALUATOR_T], evaluators or ()),
experiment_prefix=experiment_prefix,
description=description,
client=client,
metadata=metadata,
**kwargs,
)
elif kwargs:
msg = (
f"Received unsupported arguments {kwargs}. These arguments are not "
f"supported when creating a new experiment."
)
raise ValueError(msg)
elif not data:
msg = "Must specify 'data' when running evaluations over a target function."
raise ValueError(msg)
elif callable(target) and rh.is_async(target):
msg = (
"Async functions are not supported by `evaluate`. "
"Please use `aevaluate` instead:\n\n"
"from langsmith import aevaluate\n\n"
"await aevaluate(\n"
" async_target_function,\n"
" data=data,\n"
" evaluators=evaluators,\n"
" # ... other parameters\n"
")"
)
raise ValueError(msg)
elif experiment and experiment_prefix:
msg = (
"Expected at most one of 'experiment' or 'experiment_prefix',"
" but both were provided. "
f"Got: experiment={experiment}, experiment_prefix={experiment_prefix}"
)
raise ValueError(msg)
else:
if not upload_results:
_warn_once("'upload_results' parameter is in beta.")
logger.debug(f"Running evaluation over target system {target}...")
return _evaluate(
target,
data=data,
evaluators=cast(Optional[Sequence[EVALUATOR_T]], evaluators),
summary_evaluators=summary_evaluators,
metadata=metadata,
experiment_prefix=experiment_prefix,
description=description,
max_concurrency=max_concurrency,
num_repetitions=num_repetitions,
client=client,
blocking=blocking,
experiment=experiment,
upload_results=upload_results,
)
def evaluate_existing(
experiment: Union[str, uuid.UUID, schemas.TracerSession],
/,
evaluators: Optional[Sequence[EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
max_concurrency: Optional[int] = 0,
client: Optional[langsmith.Client] = None,
load_nested: bool = False,
blocking: bool = True,
) -> ExperimentResults:
r"""Evaluate existing experiment runs.
Args:
experiment (Union[str, uuid.UUID]): The identifier of the experiment to evaluate.
data (DATA_T): The data to use for evaluation.
evaluators (Optional[Sequence[EVALUATOR_T]]): Optional sequence of evaluators to use for individual run evaluation.
summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): Optional sequence of evaluators
to apply over the entire dataset.
metadata (Optional[dict]): Optional metadata to include in the evaluation results.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
client (Optional[langsmith.Client]): Optional Langsmith client to use for evaluation.
load_nested: Whether to load all child runs for the experiment.
Default is to only load the top-level root runs.
blocking (bool): Whether to block until evaluation is complete.
Returns:
ExperimentResults: The evaluation results.
Environment:
- LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to save time and
cost during testing. Recommended to commit the cache files to your repository
for faster CI/CD runs.
Requires the 'langsmith[vcr]' package to be installed.
Examples:
>>> from langsmith.evaluation import evaluate, evaluate_existing
>>> dataset_name = "Evaluate Examples"
>>> def predict(inputs: dict) -> dict:
... # This can be any function or just an API call to your app.
... return {"output": "Yes"}
>>> # First run inference on the dataset
... results = evaluate(
... predict,
... data=dataset_name,
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> # Then apply evaluators to the experiment
... def accuracy(run: Run, example: Example):
... # Row-level evaluator for accuracy.
... pred = run.outputs["output"]
... expected = example.outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [run.outputs["output"].lower() for run in runs]
... expected = [example.outputs["answer"].lower() for example in examples]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
>>> experiment_name = (
... results.experiment_name
... ) # Can use the returned experiment name
>>> experiment_name = "My Experiment:64e6e91" # Or manually specify
>>> results = evaluate_existing(
... experiment_name,
... summary_evaluators=[precision],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
""" # noqa: E501
client = client or rt.get_cached_client(timeout_ms=(20_000, 90_001))
project = _load_experiment(experiment, client)
runs = _load_traces(experiment, client, load_nested=load_nested)
data_map = _load_examples_map(client, project)
data = [data_map[cast(uuid.UUID, run.reference_example_id)] for run in runs]
return _evaluate(
runs,
data=data,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
metadata=metadata,
max_concurrency=max_concurrency,
client=client,
blocking=blocking,
experiment=project,
)
class ExperimentResultRow(TypedDict):
run: schemas.Run
example: schemas.Example
evaluation_results: EvaluationResults
class ExperimentResults:
"""Represents the results of an evaluate() call.
This class provides an iterator interface to iterate over the experiment results
as they become available. It also provides methods to access the experiment name,
the number of results, and to wait for the results to be processed.
Methods:
experiment_name() -> str: Returns the name of the experiment.
wait() -> None: Waits for the experiment data to be processed.
"""
def __init__(self, experiment_manager: _ExperimentManager, blocking: bool = True):
self._manager = experiment_manager
self._results: List[ExperimentResultRow] = []
self._queue: queue.Queue[ExperimentResultRow] = queue.Queue()
self._processing_complete = threading.Event()
if not blocking:
self._thread: Optional[threading.Thread] = threading.Thread(
target=self._process_data
)
self._thread.start()
else:
self._thread = None
self._process_data()
@property
def experiment_name(self) -> str:
return self._manager.experiment_name
def __iter__(self) -> Iterator[ExperimentResultRow]:
ix = 0
while (
not self._processing_complete.is_set()
or not self._queue.empty()
or ix < len(self._results)
):
try:
if ix < len(self._results):
yield self._results[ix]
ix += 1
else:
self._queue.get(block=True, timeout=0.1)
except queue.Empty:
continue
def _process_data(self) -> None:
tqdm = _load_tqdm()
results = self._manager.get_results()
for item in tqdm(results):
self._queue.put(item)
self._results.append(item)
summary_scores = self._manager.get_summary_scores()
self._summary_results = summary_scores
self._processing_complete.set()
def __len__(self) -> int:
return len(self._results)
def to_pandas(
self, start: Optional[int] = 0, end: Optional[int] = None
) -> DataFrame:
return _to_pandas(self._results, start=start, end=end)
def _repr_html_(self) -> str:
import importlib.util
if self._results and importlib.util.find_spec("pandas"):
df = self.to_pandas()
return df._repr_html_() # type: ignore[operator]
else:
return self.__repr__()
def __repr__(self) -> str:
return f"<ExperimentResults {self.experiment_name}>"
def wait(self) -> None:
"""Wait for the evaluation runner to complete.
This method blocks the current thread until the evaluation runner has
finished its execution.
"""
if self._thread:
self._thread.join()
## Public API for Comparison Experiments
# Row-level evaluator
COMPARATIVE_EVALUATOR_T = Callable[
[Sequence[schemas.Run], Optional[schemas.Example]],
Union[
Union[ComparisonEvaluationResult, dict],
Awaitable[Union[ComparisonEvaluationResult, dict]],
],
]
def evaluate_comparative(
experiments: Tuple[EXPERIMENT_T, EXPERIMENT_T],
/,
evaluators: Sequence[COMPARATIVE_EVALUATOR_T],
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: int = 5,
client: Optional[langsmith.Client] = None,
metadata: Optional[dict] = None,
load_nested: bool = False,
randomize_order: bool = False,
) -> ComparativeExperimentResults:
r"""Evaluate existing experiment runs against each other.
This lets you use pairwise preference scoring to generate more
reliable feedback in your experiments.
Args:
experiments (Tuple[Union[str, uuid.UUID], Union[str, uuid.UUID]]):
The identifiers of the experiments to compare.
evaluators (Sequence[COMPARATIVE_EVALUATOR_T]):
A list of evaluators to run on each example.
experiment_prefix (Optional[str]): A prefix to provide for your experiment name.
Defaults to None.
description (Optional[str]): A free-form text description for the experiment.
max_concurrency (int): The maximum number of concurrent evaluations to run.
Defaults to 5.
client (Optional[langsmith.Client]): The LangSmith client to use.
Defaults to None.
metadata (Optional[dict]): Metadata to attach to the experiment.
Defaults to None.
load_nested (bool): Whether to load all child runs for the experiment.
Default is to only load the top-level root runs.
randomize_order (bool): Whether to randomize the order of the outputs for each evaluation.
Default is False.
Returns:
ComparativeExperimentResults: The results of the comparative evaluation.
Examples:
Suppose you want to compare two prompts to see which one is more effective.
You would first prepare your dataset:
>>> from typing import Sequence
>>> from langsmith import Client
>>> from langsmith.evaluation import evaluate
>>> from langsmith.schemas import Example, Run
>>> client = Client()
>>> dataset = client.clone_public_dataset(
... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
... )
>>> dataset_name = "Evaluate Examples"
Then you would run your different prompts:
>>> import functools
>>> import openai
>>> from langsmith.evaluation import evaluate
>>> from langsmith.wrappers import wrap_openai
>>> oai_client = openai.Client()
>>> wrapped_client = wrap_openai(oai_client)
>>> prompt_1 = "You are a helpful assistant."
>>> prompt_2 = "You are an exceedingly helpful assistant."
>>> def predict(inputs: dict, prompt: str) -> dict:
... completion = wrapped_client.chat.completions.create(
... model="gpt-3.5-turbo",
... messages=[
... {"role": "system", "content": prompt},
... {
... "role": "user",
... "content": f"Context: {inputs['context']}"
... f"\n\ninputs['question']",
... },
... ],
... )
... return {"output": completion.choices[0].message.content}
>>> results_1 = evaluate(
... functools.partial(predict, prompt=prompt_1),
... data=dataset_name,
... description="Evaluating our basic system prompt.",
... blocking=False, # Run these experiments in parallel
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> results_2 = evaluate(
... functools.partial(predict, prompt=prompt_2),
... data=dataset_name,
... description="Evaluating our advanced system prompt.",
... blocking=False,
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> results_1.wait()
>>> results_2.wait()
>>> import time
>>> time.sleep(10) # Wait for the traces to be fully processed
Finally, you would compare the two prompts directly:
>>> import json
>>> from langsmith.evaluation import evaluate_comparative
>>> def score_preferences(runs: list, example: schemas.Example):
... assert len(runs) == 2 # Comparing 2 systems
... assert isinstance(example, schemas.Example)
... assert all(run.reference_example_id == example.id for run in runs)
... pred_a = runs[0].outputs["output"]
... pred_b = runs[1].outputs["output"]
... ground_truth = example.outputs["answer"]
... tools = [
... {
... "type": "function",
... "function": {
... "name": "rank_preferences",
... "description": "Saves the prefered response ('A' or 'B')",
... "parameters": {
... "type": "object",
... "properties": {
... "reasoning": {
... "type": "string",
... "description": "The reasoning behind the choice.",
... },
... "preferred_option": {
... "type": "string",
... "enum": ["A", "B"],
... "description": "The preferred option, either 'A' or 'B'",
... },
... },
... "required": ["preferred_option"],
... },
... },
... }
... ]
... completion = openai.Client().chat.completions.create(
... model="gpt-3.5-turbo",
... messages=[
... {"role": "system", "content": "Select the better response."},
... {
... "role": "user",
... "content": f"Option A: {pred_a}"
... f"\n\nOption B: {pred_b}"
... f"\n\nGround Truth: {ground_truth}",
... },
... ],
... tools=tools,
... tool_choice={
... "type": "function",
... "function": {"name": "rank_preferences"},
... },
... )
... tool_args = completion.choices[0].message.tool_calls[0].function.arguments
... loaded_args = json.loads(tool_args)
... preference = loaded_args["preferred_option"]
... comment = loaded_args["reasoning"]
... if preference == "A":
... return {
... "key": "ranked_preference",
... "scores": {runs[0].id: 1, runs[1].id: 0},
... "comment": comment,
... }
... else:
... return {
... "key": "ranked_preference",
... "scores": {runs[0].id: 0, runs[1].id: 1},
... "comment": comment,
... }
>>> def score_length_difference(runs: list, example: schemas.Example):
... # Just return whichever response is longer.
... # Just an example, not actually useful in real life.
... assert len(runs) == 2 # Comparing 2 systems
... assert isinstance(example, schemas.Example)
... assert all(run.reference_example_id == example.id for run in runs)
... pred_a = runs[0].outputs["output"]
... pred_b = runs[1].outputs["output"]
... if len(pred_a) > len(pred_b):
... return {
... "key": "length_difference",
... "scores": {runs[0].id: 1, runs[1].id: 0},
... }
... else:
... return {
... "key": "length_difference",
... "scores": {runs[0].id: 0, runs[1].id: 1},
... }
>>> results = evaluate_comparative(
... [results_1.experiment_name, results_2.experiment_name],
... evaluators=[score_preferences, score_length_difference],
... client=client,
... ) # doctest: +ELLIPSIS
View the pairwise evaluation results at:...
>>> eval_results = list(results)
>>> assert len(eval_results) >= 10 # doctest: +SKIP
>>> assert all(
... "feedback.ranked_preference" in r["evaluation_results"]
... for r in eval_results
... ) # doctest: +SKIP
>>> assert all(
... "feedback.length_difference" in r["evaluation_results"]
... for r in eval_results
... ) # doctest: +SKIP
""" # noqa: E501
if len(experiments) < 2:
raise ValueError("Comparative evaluation requires at least 2 experiments.")
if not evaluators:
raise ValueError(
"At least one evaluator is required for comparative evaluation."
)
if max_concurrency < 0:
raise ValueError("max_concurrency must be a positive integer.")
client = client or rt.get_cached_client()
# TODO: Add information about comparison experiments
projects = [_load_experiment(experiment, client) for experiment in experiments]
ref_datasets_ = [str(p.reference_dataset_id) for p in projects]
if not len(set(ref_datasets_)) == 1:
raise ValueError("All experiments must have the same reference dataset.")
experiment_ids = [p.id for p in projects]
if experiment_prefix is None:
experiment_names = [p.name for p in projects if p.name is not None]
experiment_name = (
" vs. ".join(experiment_names) + "-" + str(uuid.uuid4().hex[:4])
)
else:
experiment_name = experiment_prefix + "-" + str(uuid.uuid4().hex[:8])
comparative_experiment_id = uuid.uuid4()
comparative_experiment = client.create_comparative_experiment(
experiment_name,
experiments=experiment_ids,
description=description,
metadata=metadata,
id=comparative_experiment_id,
)
_print_comparative_experiment_start(
cast(
Tuple[schemas.TracerSessionResult, schemas.TracerSessionResult],
tuple(projects),
),
comparative_experiment,
)
runs = [
_load_traces(experiment, client, load_nested=load_nested)
for experiment in experiments
]
# Only check intersections for the experiments
examples_intersection = None
for runs_list in runs:
example_ids_set = {run.reference_example_id for run in runs_list}
if examples_intersection is None:
examples_intersection = example_ids_set
else:
examples_intersection &= example_ids_set
example_ids_nullable = (
list(examples_intersection) if examples_intersection is not None else []
)
example_ids = [eid for eid in example_ids_nullable if eid is not None]
# TODO: Warn if different dataset versions, etc. are used in the different
# experiments. We aren't providing any training wheels here.
batch_size = 99
data = {}
for i in range(0, len(example_ids), batch_size):
example_ids_batch = example_ids[i : i + batch_size]
for e in client.list_examples(
dataset_id=projects[0].reference_dataset_id,
as_of=projects[0].metadata.get("dataset_version"),
example_ids=example_ids_batch,
):
data[e.id] = e
runs_dict: Dict[uuid.UUID, List[schemas.Run]] = collections.defaultdict(list)
for runs_list in runs:
for run in runs_list:
if run.reference_example_id in data:
runs_dict[cast(uuid.UUID, run.reference_example_id)].append(run)
comparators = [comparison_evaluator(evaluator) for evaluator in evaluators or []]
results: dict = {}
def evaluate_and_submit_feedback(
runs_list: list[schemas.Run],
example: schemas.Example,
comparator: DynamicComparisonRunEvaluator,
executor: cf.Executor,
) -> ComparisonEvaluationResult:
feedback_group_id = uuid.uuid4()
if randomize_order:
random.shuffle(runs_list)
with rh.tracing_context(project_name="evaluators", client=client):
result = comparator.compare_runs(runs_list, example)
if client is None:
raise ValueError("Client is required to submit feedback.")
comments = (
{str(rid): result.comment for rid in result.scores}
if isinstance(result.comment, str)
else (result.comment or {})
)
for run_id, score in result.scores.items():
executor.submit(
client.create_feedback,
run_id=run_id,
key=result.key,
score=score,
comment=comments.get(str(run_id)),
comparative_experiment_id=comparative_experiment.id,
source_run_id=result.source_run_id,
feedback_group_id=feedback_group_id,
)
return result
tqdm = _load_tqdm()
with ls_utils.ContextThreadPoolExecutor(
max_workers=max_concurrency or 1
) as executor:
futures = []
for example_id, runs_list in tqdm(runs_dict.items()):
results[example_id] = {"runs": runs_list}
for comparator in comparators:
if max_concurrency > 1:
future = executor.submit(
evaluate_and_submit_feedback,
runs_list,
data[example_id],
comparator,
executor,
)
futures.append(future)
else:
result = evaluate_and_submit_feedback(
runs_list, data[example_id], comparator, executor
)
results[example_id][f"feedback.{result.key}"] = result
if futures:
cf.wait(futures)
for future in futures:
result = future.result()
results[example_id][f"feedback.{result.key}"] = result
return ComparativeExperimentResults(results, data)
class ComparativeExperimentResults:
"""Represents the results of an evaluate_comparative() call.
This class provides an iterator interface to iterate over the experiment results
as they become available. It also provides methods to access the experiment name,
the number of results, and to wait for the results to be processed.
Methods:
experiment_name() -> str: Returns the name of the experiment.
wait() -> None: Waits for the experiment data to be processed.
"""
def __init__(
self,
results: dict,
examples: Optional[Dict[uuid.UUID, schemas.Example]] = None,
):
self._results = results
self._examples = examples
def __getitem__(self, key):
"""Return the result associated with the given key."""
return self._results[key]
def __iter__(self):
for key, value in self._results.items():
yield {
"example": self._examples[key] if self._examples else None,
"evaluation_results": value,
}
## Private API
def _print_comparative_experiment_start(
experiments: Tuple[schemas.TracerSession, schemas.TracerSession],
comparative_experiment: schemas.ComparativeExperiment,
) -> None:
url = experiments[0].url or experiments[1].url
if url:
project_url = url.split("?")[0]
dataset_id = comparative_experiment.reference_dataset_id
base_url = project_url.split("/projects/p/")[0]
comparison_url = (
f"{base_url}/datasets/{dataset_id}/compare?"
f"selectedSessions={'%2C'.join([str(e.id) for e in experiments])}"
f"&comparativeExperiment={comparative_experiment.id}"
)
print( # noqa: T201
f"View the pairwise evaluation results at:\n{comparison_url}\n\n"
)
def _is_callable(target: Union[TARGET_T, Iterable[schemas.Run], Runnable]) -> bool:
return callable(target) or _is_langchain_runnable(target)
def _evaluate(
target: Union[TARGET_T, Iterable[schemas.Run], Runnable],
/,
data: DATA_T,
evaluators: Optional[Sequence[EVALUATOR_T]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = None,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None,
upload_results: bool = True,
) -> ExperimentResults:
# Initialize the experiment manager.
client = client or rt.get_cached_client()
runs = None if _is_callable(target) else cast(Iterable[schemas.Run], target)
experiment_, runs = _resolve_experiment(
experiment,
runs,
client,
)
manager = _ExperimentManager(
data,
client=client,
metadata=metadata,
experiment=experiment_ or experiment_prefix,
description=description,
num_repetitions=num_repetitions,
# If provided, we don't need to create a new experiment.
runs=runs,
# Create or resolve the experiment.
upload_results=upload_results,
).start()
cache_dir = ls_utils.get_cache_dir(None)
cache_path = (
pathlib.Path(cache_dir) / f"{manager.dataset_id}.yaml" if cache_dir else None
)
with ls_utils.with_optional_cache(cache_path, ignore_hosts=[client.api_url]):
if _is_callable(target):
# Add predictions to the experiment.
manager = manager.with_predictions(
cast(TARGET_T, target), max_concurrency=max_concurrency
)
if evaluators:
# Apply evaluators to the predictions.
manager = manager.with_evaluators(
evaluators, max_concurrency=max_concurrency
)
if summary_evaluators:
# Apply the experiment-level summary evaluators.
manager = manager.with_summary_evaluators(summary_evaluators)
# Start consuming the results.
results = ExperimentResults(manager, blocking=blocking)
return results
def _is_uuid(value: str) -> bool:
try:
uuid.UUID(value)
return True
except ValueError:
return False
def _load_experiment(
project: EXPERIMENT_T, client: langsmith.Client
) -> schemas.TracerSession:
if isinstance(project, schemas.TracerSession):
return project
elif isinstance(project, uuid.UUID) or _is_uuid(project):
return client.read_project(project_id=project)
else:
return client.read_project(project_name=project)
def _load_traces(
project: Union[str, uuid.UUID, schemas.TracerSession],
client: langsmith.Client,
load_nested: bool = False,
) -> List[schemas.Run]:
"""Load nested traces for a given project."""
is_root = None if load_nested else True
if isinstance(project, schemas.TracerSession):
runs = client.list_runs(project_id=project.id, is_root=is_root)
elif isinstance(project, uuid.UUID) or _is_uuid(project):
runs = client.list_runs(project_id=project, is_root=is_root)
else:
runs = client.list_runs(project_name=project, is_root=is_root)
if not load_nested:
return list(runs)
treemap: DefaultDict[uuid.UUID, List[schemas.Run]] = collections.defaultdict(list)
results = []
all_runs = {}
for run in runs:
if run.parent_run_id is not None:
treemap[run.parent_run_id].append(run)
else:
results.append(run)
all_runs[run.id] = run
for run_id, child_runs in treemap.items():
all_runs[run_id].child_runs = sorted(child_runs, key=lambda r: r.dotted_order)
return results
def _load_examples_map(
client: langsmith.Client, project: schemas.TracerSession
) -> Dict[uuid.UUID, schemas.Example]:
return {
e.id: e
for e in client.list_examples(
dataset_id=project.reference_dataset_id,
as_of=project.metadata.get("dataset_version"),
)
}
IT = TypeVar("IT")
def _load_tqdm() -> Callable[[IT], IT]:
try:
from tqdm.auto import tqdm
except ImportError:
return lambda x: x
return tqdm # type: ignore[return-value]
ET = TypeVar("ET", bound="_ExperimentManagerMixin")
class _ExperimentManagerMixin:
def __init__(
self,
/,
experiment: Optional[Union[schemas.TracerSession, str]],
metadata: Optional[dict] = None,
client: Optional[langsmith.Client] = None,
description: Optional[str] = None,
):
self.client = client or rt.get_cached_client()
self._experiment: Optional[schemas.TracerSession] = None
if experiment is None:
self._experiment_name = _get_random_name()
elif isinstance(experiment, str):
self._experiment_name = experiment + "-" + str(uuid.uuid4().hex[:8])
else:
self._experiment_name = cast(str, experiment.name)
self._experiment = experiment
metadata = metadata or {}
if not metadata.get("revision_id"):
metadata = {
"revision_id": ls_env.get_langchain_env_var_metadata().get(
"revision_id"
),
**metadata,
}
self._metadata = metadata or {}
self._description = description
@property
def experiment_name(self) -> str:
if self._experiment_name is not None:
return self._experiment_name
raise ValueError(
"Experiment name not provided, and experiment not yet started."
)
def _get_experiment(self) -> schemas.TracerSession:
if self._experiment is None:
raise ValueError("Experiment not started yet.")
return self._experiment
def _get_experiment_metadata(self):
project_metadata = self._metadata or {}
git_info = ls_env.get_git_info()
if git_info:
project_metadata = {
**project_metadata,
"git": git_info,
}
if self._experiment:
project_metadata = {
**self._experiment.metadata,
**project_metadata,
}
return project_metadata
def _create_experiment(
self, dataset_id: uuid.UUID, metadata: dict
) -> schemas.TracerSession:
# There is a chance of name collision, so we'll retry
starting_name = self._experiment_name
num_attempts = 10
for _ in range(num_attempts):
try:
return self.client.create_project(
self._experiment_name,
description=self._description,
reference_dataset_id=dataset_id,
metadata=metadata,
)
except ls_utils.LangSmithConflictError:
self._experiment_name = f"{starting_name}-{str(uuid.uuid4().hex[:6])}"
raise ValueError(
f"Could not find a unique experiment name in {num_attempts} attempts."
" Please try again with a different experiment name."
)
def _get_project(self, first_example: schemas.Example) -> schemas.TracerSession:
if self._experiment is None:
project_metadata = self._get_experiment_metadata()
project = self._create_experiment(
first_example.dataset_id, project_metadata
)
else:
project = self._experiment
return project
def _print_experiment_start(
self, project: Optional[schemas.TracerSession], first_example: schemas.Example
) -> None:
if project and project.url:
# TODO: Make this a public API
project_url = project.url.split("?")[0]
dataset_id = first_example.dataset_id
base_url = project_url.split("/projects/p/")[0]
comparison_url = (
f"{base_url}/datasets/{dataset_id}/compare?"
f"selectedSessions={project.id}"
)
print( # noqa: T201
f"View the evaluation results for experiment: '{self.experiment_name}'"
f" at:\n{comparison_url}\n\n"
)
else:
# HACKHACK
print( # noqa: T201
"Starting evaluation of experiment: %s", self.experiment_name
)
class _ExperimentManager(_ExperimentManagerMixin):
"""Manage the execution of experiments.
Supports lazily running predictions and evaluations in parallel to facilitate
result streaming and early debugging.
Args:
data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR
a generator of examples.
num_repetitions (int): The number of times to run over the data.
runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment
predictions.
experiment (Optional[schemas.TracerSession]): The tracer session
associated with the experiment.
experiment_prefix (Optional[str]): The prefix for the experiment name.
metadata (Optional[dict]): Additional metadata for the experiment.
client (Optional[langsmith.Client]): The Langsmith client used for
the experiment.
evaluation_results (Optional[Iterable[EvaluationResults]]): The evaluation
sresults for the experiment.
summary_results (Optional[Iterable[EvaluationResults]]): The aggregate results
for the experiment.
"""
def __init__(
self,
data: DATA_T,
/,
experiment: Optional[Union[schemas.TracerSession, str]],
metadata: Optional[dict] = None,
client: Optional[langsmith.Client] = None,
runs: Optional[Iterable[schemas.Run]] = None,
evaluation_results: Optional[Iterable[EvaluationResults]] = None,
summary_results: Optional[Iterable[EvaluationResults]] = None,
description: Optional[str] = None,
num_repetitions: int = 1,
upload_results: bool = True,
):
super().__init__(
experiment=experiment,
metadata=metadata,
client=client,
description=description,
)
self._data = data
self._examples: Optional[Iterable[schemas.Example]] = None
self._runs = runs
self._evaluation_results = evaluation_results
self._summary_results = summary_results
self._num_repetitions = num_repetitions
self._upload_results = upload_results
@property
def examples(self) -> Iterable[schemas.Example]:
if self._examples is None:
self._examples = _resolve_data(self._data, client=self.client)
if self._num_repetitions > 1:
self._examples = itertools.chain.from_iterable(
itertools.tee(self._examples, self._num_repetitions)
)
self._examples, examples_iter = itertools.tee(self._examples)
return examples_iter
@property
def dataset_id(self) -> str:
if self._experiment is None or not getattr(
self._experiment, "reference_dataset_id", None
):
example = next(iter(self.examples))
return str(example.dataset_id)
return str(
cast(schemas.TracerSessionResult, self._experiment).reference_dataset_id
)
@property
def evaluation_results(self) -> Iterable[EvaluationResults]:
if self._evaluation_results is None:
return ({"results": []} for _ in self.examples)
return self._evaluation_results
@property
def runs(self) -> Iterable[schemas.Run]:
if self._runs is None:
raise ValueError(
"Runs not provided in this experiment." " Please predict first."
)
self._runs, runs_iter = itertools.tee(self._runs)
return runs_iter
def start(self) -> _ExperimentManager:
first_example = next(itertools.islice(self.examples, 1))
project = self._get_project(first_example) if self._upload_results else None
self._print_experiment_start(project, first_example)
self._metadata["num_repetitions"] = self._num_repetitions
return self.__class__(
self.examples,
experiment=project,
metadata=self._metadata,
client=self.client,
runs=self._runs,
evaluation_results=self._evaluation_results,
upload_results=self._upload_results,
)
def with_predictions(
self,
target: TARGET_T,
/,
max_concurrency: Optional[int] = None,
) -> _ExperimentManager:
"""Lazily apply the target function to the experiment."""
context = copy_context()
_experiment_results = context.run(
self._predict, target, max_concurrency=max_concurrency
)
r1, r2 = itertools.tee(_experiment_results, 2)
return _ExperimentManager(
(pred["example"] for pred in r1),
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=(pred["run"] for pred in r2),
upload_results=self._upload_results,
# TODO: Can't do multiple prediction rounds rn.
)
def with_evaluators(
self,
evaluators: Sequence[
Union[
EVALUATOR_T,
RunEvaluator,
]
],
*,
max_concurrency: Optional[int] = None,
) -> _ExperimentManager:
"""Lazily apply the provided evaluators to the experiment."""
evaluators = _resolve_evaluators(evaluators)
context = copy_context()
experiment_results = context.run(
self._score, evaluators, max_concurrency=max_concurrency
)
# Split the generator into three so the manager
# can consume each value individually.
r1, r2, r3 = itertools.tee(experiment_results, 3)
return _ExperimentManager(
(result["example"] for result in r1),
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=(result["run"] for result in r2),
evaluation_results=(result["evaluation_results"] for result in r3),
summary_results=self._summary_results,
upload_results=self._upload_results,
)
def with_summary_evaluators(
self,
summary_evaluators: Sequence[SUMMARY_EVALUATOR_T],
) -> _ExperimentManager:
"""Lazily apply the provided summary evaluators to the experiment."""
wrapped_evaluators = _wrap_summary_evaluators(summary_evaluators)
context = copy_context()
aggregate_feedback_gen = context.run(
self._apply_summary_evaluators, wrapped_evaluators
)
return _ExperimentManager(
self.examples,
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=self.runs,
evaluation_results=self._evaluation_results,
summary_results=aggregate_feedback_gen,
upload_results=self._upload_results,
)
def get_results(self) -> Iterable[ExperimentResultRow]:
"""Return the traces, evaluation results, and associated examples."""
for run, example, evaluation_results in zip(
self.runs, self.examples, self.evaluation_results
):
yield ExperimentResultRow(
run=run,
example=example,
evaluation_results=evaluation_results,
)
def get_summary_scores(self) -> Dict[str, List[dict]]:
"""If summary_evaluators were applied, consume and return the results."""
if self._summary_results is None:
return {"results": []}
# Consume the generator
return {
"results": [
res # type: ignore[misc]
for results in self._summary_results
for res in results["results"]
]
}
# Private methods
def _predict(
self, target: TARGET_T, /, max_concurrency: Optional[int] = None
) -> Generator[_ForwardResults, None, None]:
"""Run the target function on the examples."""
fn = _ensure_traceable(target)
if max_concurrency == 0:
for example in self.examples:
yield _forward(
fn,
example,
self.experiment_name,
self._metadata,
self.client,
self._upload_results,
)
else:
with ls_utils.ContextThreadPoolExecutor(max_concurrency) as executor:
futures = [
executor.submit(
_forward,
fn,
example,
self.experiment_name,
self._metadata,
self.client,
self._upload_results,
)
for example in self.examples
]
for future in cf.as_completed(futures):
yield future.result()
# Close out the project.
self._end()
def _run_evaluators(
self,
evaluators: Sequence[RunEvaluator],
current_results: ExperimentResultRow,
executor: cf.ThreadPoolExecutor,
) -> ExperimentResultRow:
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{
"experiment": self.experiment_name,
"reference_example_id": current_results["example"].id,
"reference_run_id": current_results["run"].id,
},
}
with rh.tracing_context(
**{
**current_context,
"project_name": "evaluators",
"metadata": metadata,
"enabled": "local" if not self._upload_results else True,
"client": self.client,
}
):
run = current_results["run"]
example = current_results["example"]
eval_results = current_results["evaluation_results"]
for evaluator in evaluators:
try:
evaluator_response = evaluator.evaluate_run(
run=run,
example=example,
)
eval_results["results"].extend(
self.client._select_eval_results(evaluator_response)
)
if self._upload_results:
# TODO: This is a hack
self.client._log_evaluation_feedback(
evaluator_response, run=run, _executor=executor
)
except Exception as e:
try:
feedback_keys = _extract_feedback_keys(evaluator)
error_response = EvaluationResults(
results=[
EvaluationResult(
key=key,
source_run_id=run.id,
comment=repr(e),
extra={"error": True},
)
for key in feedback_keys
]
)
eval_results["results"].extend(
self.client._select_eval_results(error_response)
)
if self._upload_results:
# TODO: This is a hack
self.client._log_evaluation_feedback(
error_response, run=run, _executor=executor
)
except Exception as e2:
logger.debug(f"Error parsing feedback keys: {e2}")
pass
logger.error(
f"Error running evaluator {repr(evaluator)} on"
f" run {run.id if run else ''}: {repr(e)}",
exc_info=True,
)
return ExperimentResultRow(
run=run,
example=example,
evaluation_results=eval_results,
)
def _score(
self,
evaluators: Sequence[RunEvaluator],
max_concurrency: Optional[int] = None,
) -> Iterable[ExperimentResultRow]:
"""Run the evaluators on the prediction stream.
Expects runs to be available in the manager.
(e.g. from a previous prediction step)
"""
with ls_utils.ContextThreadPoolExecutor(
max_workers=max_concurrency or 1
) as executor:
if max_concurrency == 0:
context = copy_context()
for current_results in self.get_results():
yield context.run(
self._run_evaluators,
evaluators,
current_results,
executor,
)
else:
futures = set()
for current_results in self.get_results():
futures.add(
executor.submit(
self._run_evaluators,
evaluators,
current_results,
executor,
)
)
try:
# Since prediction may be slow, yield (with a timeout) to
# allow for early results to be emitted.
for future in cf.as_completed(futures, timeout=0.001):
yield future.result()
futures.remove(future)
except (cf.TimeoutError, TimeoutError):
pass
for future in cf.as_completed(futures):
result = future.result()
yield result
def _apply_summary_evaluators(
self, summary_evaluators: Sequence[SUMMARY_EVALUATOR_T]
) -> Generator[EvaluationResults, None, None]:
runs, examples = [], []
for run, example in zip(self.runs, self.examples):
runs.append(run)
examples.append(example)
aggregate_feedback = []
with ls_utils.ContextThreadPoolExecutor() as executor:
project_id = self._get_experiment().id if self._upload_results else None
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{
"experiment": self.experiment_name,
"experiment_id": project_id,
},
}
with rh.tracing_context(
**{
**current_context,
"project_name": "evaluators",
"metadata": metadata,
"client": self.client,
"enabled": "local" if not self._upload_results else True,
}
):
for evaluator in summary_evaluators:
try:
summary_eval_result = evaluator(runs, examples)
# TODO: Expose public API for this.
flattened_results = self.client._select_eval_results(
summary_eval_result,
fn_name=evaluator.__name__,
)
aggregate_feedback.extend(flattened_results)
if self._upload_results:
for result in flattened_results:
feedback = result.dict(exclude={"target_run_id"})
evaluator_info = feedback.pop("evaluator_info", None)
executor.submit(
self.client.create_feedback,
**feedback,
run_id=None,
project_id=project_id,
source_info=evaluator_info,
)
except Exception as e:
logger.error(
f"Error running summary evaluator {repr(evaluator)}: {e}",
exc_info=True,
)
yield {"results": aggregate_feedback}
def _get_dataset_version(self) -> Optional[str]:
examples = list(self.examples)
modified_at = [ex.modified_at for ex in examples if ex.modified_at]
# Should always be defined in practice when fetched,
# but the typing permits None
max_modified_at = max(modified_at) if modified_at else None
return max_modified_at.isoformat() if max_modified_at else None
def _get_dataset_splits(self) -> Optional[list[str]]:
examples = list(self.examples)
splits = set()
for example in examples:
if (
example.metadata
and example.metadata.get("dataset_split")
and isinstance(example.metadata["dataset_split"], list)
):
for split in example.metadata["dataset_split"]:
if isinstance(split, str):
splits.add(split)
else:
splits.add("base")
return list(splits)
def _end(self) -> None:
if not self._upload_results:
return
experiment = self._experiment
if experiment is None:
raise ValueError("Experiment not started yet.")
project_metadata = self._get_experiment_metadata()
project_metadata["dataset_version"] = self._get_dataset_version()
project_metadata["dataset_splits"] = self._get_dataset_splits()
self.client.update_project(
experiment.id,
end_time=experiment.end_time
or datetime.datetime.now(datetime.timezone.utc),
metadata={
**experiment.metadata,
**project_metadata,
},
)
def _resolve_evaluators(
evaluators: Sequence[Union[EVALUATOR_T, RunEvaluator, AEVALUATOR_T]],
) -> Sequence[RunEvaluator]:
results = []
for evaluator in evaluators:
if isinstance(evaluator, RunEvaluator):
results.append(evaluator)
elif isinstance(evaluator, LangChainStringEvaluator):
results.append(evaluator.as_run_evaluator())
else:
results.append(run_evaluator(evaluator))
return results
def _wrap_summary_evaluators(
evaluators: Sequence[SUMMARY_EVALUATOR_T],
) -> List[SUMMARY_EVALUATOR_T]:
def _wrap(evaluator: SUMMARY_EVALUATOR_T) -> SUMMARY_EVALUATOR_T:
eval_name = getattr(evaluator, "__name__", "BatchEvaluator")
evaluator = _normalize_summary_evaluator(evaluator)
@functools.wraps(evaluator)
def _wrapper_inner(
runs: Sequence[schemas.Run], examples: Sequence[schemas.Example]
) -> Union[EvaluationResult, EvaluationResults]:
@rh.traceable(name=eval_name)
def _wrapper_super_inner(
runs_: str, examples_: str
) -> Union[EvaluationResult, EvaluationResults]:
return evaluator(list(runs), list(examples))
return _wrapper_super_inner(
f"Runs[] (Length={len(runs)})", f"Examples[] (Length={len(examples)})"
)
return _wrapper_inner
results = []
for evaluator in evaluators:
results.append(_wrap(evaluator))
return results
class _ForwardResults(TypedDict):
run: schemas.Run
example: schemas.Example
def _forward(
fn: rh.SupportsLangsmithExtra,
example: schemas.Example,
experiment_name: str,
metadata: dict,
client: langsmith.Client,
upload_results: bool,
) -> _ForwardResults:
run: Optional[schemas.RunBase] = None
def _get_run(r: rt.RunTree) -> None:
nonlocal run
run = r
with rh.tracing_context(enabled="local" if not upload_results else True):
example_version = (
example.modified_at.isoformat()
if example.modified_at
else example.created_at.isoformat()
)
langsmith_extra = rh.LangSmithExtra(
reference_example_id=example.id,
on_end=_get_run,
project_name=experiment_name,
metadata={**metadata, "example_version": example_version},
client=client,
)
try:
fn(example.inputs, langsmith_extra=langsmith_extra)
except Exception as e:
logger.error(
f"Error running target function: {e}", exc_info=True, stacklevel=1
)
return _ForwardResults(run=cast(schemas.Run, run), example=example)
def _is_valid_uuid(value: str) -> bool:
try:
uuid.UUID(value)
return True
except ValueError:
return False
def _resolve_data(
data: DATA_T, *, client: langsmith.Client
) -> Iterable[schemas.Example]:
"""Return the examples for the given dataset."""
if isinstance(data, uuid.UUID):
return client.list_examples(dataset_id=data)
elif isinstance(data, str) and _is_valid_uuid(data):
return client.list_examples(dataset_id=uuid.UUID(data))
elif isinstance(data, str):
return client.list_examples(dataset_name=data)
elif isinstance(data, schemas.Dataset):
return client.list_examples(dataset_id=data.id)
return data
def _ensure_traceable(
target: TARGET_T | rh.SupportsLangsmithExtra[[dict], dict] | Runnable,
) -> rh.SupportsLangsmithExtra[[dict], dict]:
"""Ensure the target function is traceable."""
if not _is_callable(target):
raise ValueError(
"Target must be a callable function or a langchain/langgraph object. For "
"example:\n\n"
"def predict(inputs: dict) -> dict:\n"
" # do work, like chain.invoke(inputs)\n"
" return {...}\n\n"
"evaluate(\n"
" predict,\n"
" ...\n"
")"
)
if rh.is_traceable_function(target):
fn: rh.SupportsLangsmithExtra[[dict], dict] = target
else:
if _is_langchain_runnable(target):
target = target.invoke # type: ignore[union-attr]
fn = rh.traceable(name="Target")(cast(Callable, target))
return fn
def _resolve_experiment(
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]],
runs: Optional[Iterable[schemas.Run]],
client: langsmith.Client,
) -> Tuple[
Optional[Union[schemas.TracerSession, str]], Optional[Iterable[schemas.Run]]
]:
# TODO: Remove this, handle outside the manager
if experiment is not None:
if isinstance(experiment, schemas.TracerSession):
experiment_ = experiment
else:
experiment_ = _load_experiment(experiment, client)
if not experiment_.name:
raise ValueError("Experiment name must be defined if provided.")
if not experiment_.reference_dataset_id:
raise ValueError(
"Experiment must have an associated reference_dataset_id, "
"but none was provided."
)
return experiment_, runs
# If we have runs, that means the experiment was already started.
if runs is not None:
runs_, runs = itertools.tee(runs)
first_run = next(runs_)
experiment_ = client.read_project(project_id=first_run.session_id)
if not experiment_.name:
raise ValueError("Experiment name not found for provided runs.")
return experiment_, runs
return None, None
def _get_random_name() -> str:
from langsmith.evaluation._name_generation import random_name # noqa: F401
return random_name()
def _extract_feedback_keys(evaluator: RunEvaluator):
if isinstance(evaluator, DynamicRunEvaluator):
if getattr(evaluator, "func", None):
return _extract_code_evaluator_feedback_keys(evaluator.func)
elif getattr(evaluator, "afunc", None):
return _extract_code_evaluator_feedback_keys(evaluator.afunc)
# TODO: Support for DynamicComparisonRunEvaluator
if hasattr(evaluator, "evaluator"):
# LangChainStringEvaluator
if getattr(getattr(evaluator, "evaluator"), "evaluation_name", None):
return [evaluator.evaluator.evaluation_name]
return []
def _extract_code_evaluator_feedback_keys(func: Callable) -> list[str]:
python_code = inspect.getsource(func)
def extract_dict_keys(node):
if isinstance(node, ast.Dict):
keys = []
key_value = None
for key, value in zip(node.keys, node.values):
if isinstance(key, (ast.Str, ast.Constant)):
key_str = key.s if isinstance(key, ast.Str) else key.value
if key_str == "key" and isinstance(value, (ast.Str, ast.Constant)):
key_value = (
value.s if isinstance(value, ast.Str) else value.value
)
return [key_value] if key_value else keys
elif (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "dict"
):
for keyword in node.keywords:
if keyword.arg == "key" and isinstance(
keyword.value, (ast.Str, ast.Constant)
):
return [
(
keyword.value.s
if isinstance(keyword.value, ast.Str)
else keyword.value.value
)
]
return []
def extract_evaluation_result_key(node):
if (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "EvaluationResult"
):
for keyword in node.keywords:
if keyword.arg == "key" and isinstance(
keyword.value, (ast.Str, ast.Constant)
):
return [
(
keyword.value.s
if isinstance(keyword.value, ast.Str)
else keyword.value.value
)
]
return []
def extract_evaluation_results_keys(node, variables):
if (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "EvaluationResults"
):
for keyword in node.keywords:
if keyword.arg == "results":
if isinstance(keyword.value, ast.Name):
return variables.get(keyword.value.id, [])
elif isinstance(keyword.value, ast.List):
keys = []
for elt in keyword.value.elts:
keys.extend(extract_evaluation_result_key(elt))
return keys
elif isinstance(node, ast.Dict):
for key, value in zip(node.keys, node.values):
if isinstance(key, (ast.Str, ast.Constant)) and key.s == "results":
if isinstance(value, ast.List):
keys = []
for elt in value.elts:
if isinstance(elt, ast.Dict):
for elt_key, elt_value in zip(elt.keys, elt.values):
if (
isinstance(elt_key, (ast.Str, ast.Constant))
and elt_key.s == "key"
):
if isinstance(
elt_value, (ast.Str, ast.Constant)
):
keys.append(elt_value.s)
elif (
isinstance(elt, ast.Call)
and isinstance(elt.func, ast.Name)
and elt.func.id in ("EvaluationResult", "dict")
):
for keyword in elt.keywords:
if keyword.arg == "key" and isinstance(
keyword.value, (ast.Str, ast.Constant)
):
keys.append(
keyword.value.s
if isinstance(keyword.value, ast.Str)
else keyword.value.value
)
return keys
return []
python_code = textwrap.dedent(python_code)
try:
tree = ast.parse(python_code)
function_def = tree.body[0]
if not isinstance(function_def, (ast.FunctionDef, ast.AsyncFunctionDef)):
return []
variables = {}
keys = []
for node in ast.walk(function_def):
if isinstance(node, ast.Assign):
if isinstance(node.value, ast.List):
list_keys = []
for elt in node.value.elts:
list_keys.extend(extract_evaluation_result_key(elt))
if isinstance(node.targets[0], ast.Name):
variables[node.targets[0].id] = list_keys
elif isinstance(node, ast.Return) and node.value is not None:
dict_keys = extract_dict_keys(node.value)
eval_result_key = extract_evaluation_result_key(node.value)
eval_results_keys = extract_evaluation_results_keys(
node.value, variables
)
keys.extend(dict_keys)
keys.extend(eval_result_key)
keys.extend(eval_results_keys)
# If no keys found, return the function name
return keys if keys else [function_def.name]
except SyntaxError:
return []
def _to_pandas(
results: list[ExperimentResultRow],
start: Optional[int] = 0,
end: Optional[int] = None,
):
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"The 'pandas' library is required to use the 'to_pandas' function. "
"Please install it using 'pip install pandas' or "
"'conda install pandas' before calling this method."
) from e
return pd.DataFrame(_flatten_experiment_results(results, start=start, end=end))
def _flatten_experiment_results(
results: list[ExperimentResultRow],
start: Optional[int] = 0,
end: Optional[int] = None,
):
return [
{
**{f"inputs.{k}": v for k, v in x["example"].inputs.items()},
**{f"outputs.{k}": v for k, v in (x["run"].outputs or {}).items()},
"error": x["run"].error,
**(
{f"reference.{k}": v for k, v in x["example"].outputs.items()}
if x["example"].outputs is not None
else {}
),
**{
f"feedback.{r.key}": r.score if r.score is not None else r.value
for r in x["evaluation_results"]["results"]
},
"execution_time": (
(x["run"].end_time - x["run"].start_time).total_seconds()
if x["run"].end_time
else None
),
"example_id": x["run"].reference_example_id,
"id": x["run"].id,
}
for x in results[start:end]
]
@functools.lru_cache(maxsize=1)
def _import_langchain_runnable() -> Optional[type]:
try:
from langchain_core.runnables import Runnable
return Runnable
except ImportError:
return None
def _is_langchain_runnable(o: Any) -> bool:
return bool((Runnable := _import_langchain_runnable()) and isinstance(o, Runnable))
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/_arunner.py | """V2 Evaluation Interface."""
from __future__ import annotations
import asyncio
import concurrent.futures as cf
import datetime
import logging
import pathlib
import uuid
from typing import (
TYPE_CHECKING,
Any,
AsyncIterable,
AsyncIterator,
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
TypeVar,
Union,
cast,
)
import langsmith
from langsmith import run_helpers as rh
from langsmith import run_trees, schemas
from langsmith import run_trees as rt
from langsmith import utils as ls_utils
from langsmith._internal import _aiter as aitertools
from langsmith._internal._beta_decorator import _warn_once
from langsmith.evaluation._runner import (
AEVALUATOR_T,
DATA_T,
EVALUATOR_T,
ExperimentResultRow,
_ExperimentManagerMixin,
_extract_feedback_keys,
_ForwardResults,
_is_langchain_runnable,
_load_examples_map,
_load_experiment,
_load_tqdm,
_load_traces,
_resolve_data,
_resolve_evaluators,
_resolve_experiment,
_to_pandas,
_wrap_summary_evaluators,
)
from langsmith.evaluation.evaluator import (
SUMMARY_EVALUATOR_T,
EvaluationResult,
EvaluationResults,
RunEvaluator,
)
if TYPE_CHECKING:
import pandas as pd
from langchain_core.runnables import Runnable
DataFrame = pd.DataFrame
else:
DataFrame = Any
logger = logging.getLogger(__name__)
ATARGET_T = Callable[[dict], Awaitable[dict]]
async def aevaluate(
target: Union[
ATARGET_T, AsyncIterable[dict], Runnable, str, uuid.UUID, schemas.TracerSession
],
/,
data: Union[
DATA_T, AsyncIterable[schemas.Example], Iterable[schemas.Example], None
] = None,
evaluators: Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = 0,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None,
upload_results: bool = True,
**kwargs: Any,
) -> AsyncExperimentResults:
r"""Evaluate an async target system on a given dataset.
Args:
target (AsyncCallable[[dict], dict] | AsyncIterable[dict] | Runnable | EXPERIMENT_T | Tuple[EXPERIMENT_T, EXPERIMENT_T]):
The target system or experiment(s) to evaluate. Can be an async function
that takes a dict and returns a dict, a langchain Runnable, an
existing experiment ID, or a two-tuple of experiment IDs.
data (Union[DATA_T, AsyncIterable[schemas.Example]]): The dataset to evaluate on. Can be a dataset name, a list of
examples, an async generator of examples, or an async iterable of examples.
evaluators (Optional[Sequence[EVALUATOR_T]]): A list of evaluators to run
on each example. Defaults to None.
summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): A list of summary
evaluators to run on the entire dataset. Defaults to None.
metadata (Optional[dict]): Metadata to attach to the experiment.
Defaults to None.
experiment_prefix (Optional[str]): A prefix to provide for your experiment name.
Defaults to None.
description (Optional[str]): A description of the experiment.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
num_repetitions (int): The number of times to run the evaluation.
Each item in the dataset will be run and evaluated this many times.
Defaults to 1.
client (Optional[langsmith.Client]): The LangSmith client to use.
Defaults to None.
blocking (bool): Whether to block until the evaluation is complete.
Defaults to True.
experiment (Optional[schemas.TracerSession]): An existing experiment to
extend. If provided, experiment_prefix is ignored. For advanced
usage only.
load_nested: Whether to load all child runs for the experiment.
Default is to only load the top-level root runs. Should only be specified
when evaluating an existing experiment.
Returns:
AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results.
Environment:
- LANGSMITH_TEST_CACHE: If set, API calls will be cached to disk to save time and
cost during testing. Recommended to commit the cache files to your repository
for faster CI/CD runs.
Requires the 'langsmith[vcr]' package to be installed.
Examples:
>>> from typing import Sequence
>>> from langsmith import Client, aevaluate
>>> from langsmith.schemas import Example, Run
>>> client = Client()
>>> dataset = client.clone_public_dataset(
... "https://smith.langchain.com/public/419dcab2-1d66-4b94-8901-0357ead390df/d"
... )
>>> dataset_name = "Evaluate Examples"
Basic usage:
>>> def accuracy(run: Run, example: Example):
... # Row-level evaluator for accuracy.
... pred = run.outputs["output"]
... expected = example.outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [run.outputs["output"].lower() for run in runs]
... expected = [example.outputs["answer"].lower() for example in examples]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
>>> import asyncio
>>> async def apredict(inputs: dict) -> dict:
... # This can be any async function or just an API call to your app.
... await asyncio.sleep(0.1)
... return {"output": "Yes"}
>>> results = asyncio.run(
... aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment",
... description="Evaluate the accuracy of the model asynchronously.",
... metadata={
... "my-prompt-version": "abcd-1234",
... },
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Evaluating over only a subset of the examples using an async generator:
>>> async def example_generator():
... examples = client.list_examples(dataset_name=dataset_name, limit=5)
... for example in examples:
... yield example
>>> results = asyncio.run(
... aevaluate(
... apredict,
... data=example_generator(),
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Subset Experiment",
... description="Evaluate a subset of examples asynchronously.",
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Streaming each prediction to more easily + eagerly debug.
>>> results = asyncio.run(
... aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Streaming Experiment",
... description="Streaming predictions for debugging.",
... blocking=False,
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
>>> async def aenumerate(iterable):
... async for elem in iterable:
... print(elem)
>>> asyncio.run(aenumerate(results))
Running without concurrency:
>>> results = asyncio.run(
... aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... experiment_prefix="My Experiment Without Concurrency",
... description="This was run without concurrency.",
... max_concurrency=0,
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Using Async evaluators:
>>> async def helpfulness(run: Run, example: Example):
... # Row-level evaluator for helpfulness.
... await asyncio.sleep(5) # Replace with your LLM API call
... return {"score": run.outputs["output"] == "Yes"}
>>> results = asyncio.run(
... aevaluate(
... apredict,
... data=dataset_name,
... evaluators=[helpfulness],
... summary_evaluators=[precision],
... experiment_prefix="My Helpful Experiment",
... description="Applying async evaluators example.",
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
.. versionchanged:: 0.2.0
'max_concurrency' default updated from None (no limit on concurrency)
to 0 (no concurrency at all).
""" # noqa: E501
if isinstance(target, (str, uuid.UUID, schemas.TracerSession)):
invalid_args = {
"num_repetitions": num_repetitions > 1,
"experiment": bool(experiment),
"upload_results": not upload_results,
"experiment_prefix": bool(experiment_prefix),
"data": bool(data),
}
if any(invalid_args.values()):
msg = (
f"Received invalid arguments. "
f"{tuple(k for k, v in invalid_args.items() if v)} should not be "
f"specified when target is an existing experiment."
)
raise ValueError(msg)
target_id = target if isinstance(target, (str, uuid.UUID)) else target.id
logger.debug(f"Running evaluation over existing experiment {target_id}...")
return await aevaluate_existing(
target,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
metadata=metadata,
max_concurrency=max_concurrency,
client=client,
blocking=blocking,
**kwargs,
)
elif isinstance(target, tuple):
msg = (
"Running a comparison of two existing experiments asynchronously is not "
"currently supported. Please use the `evaluate()` method instead and make "
"sure that your evaluators are defined as synchronous functions."
)
raise ValueError(msg)
elif kwargs:
msg = (
f"Received unsupported arguments {kwargs}. These arguments are not "
f"supported when creating a new experiment."
)
raise ValueError(msg)
elif not data:
msg = "Must specify 'data' when running evaluations over a target function."
raise ValueError(msg)
elif experiment and experiment_prefix:
msg = (
"Expected at most one of 'experiment' or 'experiment_prefix',"
" but both were provided. "
f"Got: experiment={experiment}, experiment_prefix={experiment_prefix}"
)
raise ValueError(msg)
else:
if not upload_results:
_warn_once("'upload_results' parameter is in beta.")
logger.debug(f"Running evaluation over target system {target}...")
return await _aevaluate(
target,
data=data,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
metadata=metadata,
experiment_prefix=experiment_prefix,
description=description,
max_concurrency=max_concurrency,
num_repetitions=num_repetitions,
client=client,
blocking=blocking,
experiment=experiment,
upload_results=upload_results,
)
async def aevaluate_existing(
experiment: Union[str, uuid.UUID, schemas.TracerSession],
/,
evaluators: Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
max_concurrency: Optional[int] = 0,
client: Optional[langsmith.Client] = None,
load_nested: bool = False,
blocking: bool = True,
) -> AsyncExperimentResults:
r"""Evaluate existing experiment runs asynchronously.
Args:
experiment (Union[str, uuid.UUID]): The identifier of the experiment to evaluate.
evaluators (Optional[Sequence[EVALUATOR_T]]): Optional sequence of evaluators to use for individual run evaluation.
summary_evaluators (Optional[Sequence[SUMMARY_EVALUATOR_T]]): Optional sequence of evaluators
to apply over the entire dataset.
metadata (Optional[dict]): Optional metadata to include in the evaluation results.
max_concurrency (int | None): The maximum number of concurrent
evaluations to run. If None then no limit is set. If 0 then no concurrency.
Defaults to 0.
client (Optional[langsmith.Client]): Optional Langsmith client to use for evaluation.
load_nested: Whether to load all child runs for the experiment.
Default is to only load the top-level root runs.
blocking (bool): Whether to block until evaluation is complete.
Returns:
AsyncIterator[ExperimentResultRow]: An async iterator over the experiment results.
Examples:
Define your evaluators
>>> from typing import Sequence
>>> from langsmith.schemas import Example, Run
>>> def accuracy(run: Run, example: Example):
... # Row-level evaluator for accuracy.
... pred = run.outputs["output"]
... expected = example.outputs["answer"]
... return {"score": expected.lower() == pred.lower()}
>>> def precision(runs: Sequence[Run], examples: Sequence[Example]):
... # Experiment-level evaluator for precision.
... # TP / (TP + FP)
... predictions = [run.outputs["output"].lower() for run in runs]
... expected = [example.outputs["answer"].lower() for example in examples]
... # yes and no are the only possible answers
... tp = sum([p == e for p, e in zip(predictions, expected) if p == "yes"])
... fp = sum([p == "yes" and e == "no" for p, e in zip(predictions, expected)])
... return {"score": tp / (tp + fp)}
Load the experiment and run the evaluation.
>>> from langsmith import aevaluate, aevaluate_existing
>>> dataset_name = "Evaluate Examples"
>>> async def apredict(inputs: dict) -> dict:
... # This can be any async function or just an API call to your app.
... await asyncio.sleep(0.1)
... return {"output": "Yes"}
>>> # First run inference on the dataset
... results = asyncio.run(
... aevaluate(
... apredict,
... data=dataset_name,
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
Then evaluate the results
>>> experiment_name = "My Experiment:64e6e91" # Or manually specify
>>> results = asyncio.run(
... aevaluate_existing(
... experiment_name,
... evaluators=[accuracy],
... summary_evaluators=[precision],
... )
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
""" # noqa: E501
client = client or run_trees.get_cached_client()
project = (
experiment
if isinstance(experiment, schemas.TracerSession)
else (await aitertools.aio_to_thread(_load_experiment, experiment, client))
)
runs = await aitertools.aio_to_thread(
_load_traces, experiment, client, load_nested=load_nested
)
data_map = await aitertools.aio_to_thread(_load_examples_map, client, project)
data = [data_map[run.reference_example_id] for run in runs]
return await _aevaluate(
runs,
data=data,
evaluators=evaluators,
summary_evaluators=summary_evaluators,
metadata=metadata,
max_concurrency=max_concurrency,
client=client,
blocking=blocking,
experiment=project,
)
async def _aevaluate(
target: Union[ATARGET_T, AsyncIterable[dict], Iterable[schemas.Run], Runnable],
/,
data: Union[DATA_T, AsyncIterable[schemas.Example]],
evaluators: Optional[Sequence[Union[EVALUATOR_T, AEVALUATOR_T]]] = None,
summary_evaluators: Optional[Sequence[SUMMARY_EVALUATOR_T]] = None,
metadata: Optional[dict] = None,
experiment_prefix: Optional[str] = None,
description: Optional[str] = None,
max_concurrency: Optional[int] = None,
num_repetitions: int = 1,
client: Optional[langsmith.Client] = None,
blocking: bool = True,
experiment: Optional[Union[schemas.TracerSession, str, uuid.UUID]] = None,
upload_results: bool = True,
) -> AsyncExperimentResults:
is_async_target = (
asyncio.iscoroutinefunction(target)
or (hasattr(target, "__aiter__") and asyncio.iscoroutine(target.__aiter__()))
or _is_langchain_runnable(target)
)
client = client or rt.get_cached_client()
runs = None if is_async_target else cast(Iterable[schemas.Run], target)
experiment_, runs = await aitertools.aio_to_thread(
_resolve_experiment,
experiment,
runs,
client,
)
manager = await _AsyncExperimentManager(
data,
client=client,
metadata=metadata,
experiment=experiment_ or experiment_prefix,
description=description,
num_repetitions=num_repetitions,
runs=runs,
upload_results=upload_results,
).astart()
cache_dir = ls_utils.get_cache_dir(None)
if cache_dir is not None:
dsid = await manager.get_dataset_id()
cache_path = pathlib.Path(cache_dir) / f"{dsid}.yaml"
else:
cache_path = None
with ls_utils.with_optional_cache(cache_path, ignore_hosts=[client.api_url]):
if is_async_target:
manager = await manager.awith_predictions(
cast(ATARGET_T, target), max_concurrency=max_concurrency
)
if evaluators:
manager = await manager.awith_evaluators(
evaluators, max_concurrency=max_concurrency
)
if summary_evaluators:
manager = await manager.awith_summary_evaluators(summary_evaluators)
results = AsyncExperimentResults(manager)
if blocking:
await results.wait()
return results
class _AsyncExperimentManager(_ExperimentManagerMixin):
"""Manage the execution of experiments asynchronously.
Supports lazily running predictions and evaluations in parallel to facilitate
result streaming and early debugging.
Args:
data (DATA_T): The data used for the experiment. Can be a dataset name or ID OR
a generator of examples.
runs (Optional[Iterable[schemas.Run]]): The runs associated with the experiment
predictions.
experiment (Optional[schemas.TracerSession]): The tracer session
associated with the experiment.
experiment_prefix (Optional[str]): The prefix for the experiment name.
description (Optional[str]): The description for the experiment.
metadata (Optional[dict]): Additional metadata for the experiment.
client (Optional[langsmith.Client]): The Langsmith client used for
the experiment.
evaluation_results (Optional[Iterable[EvaluationResults]]): The evaluation
sresults for the experiment.
summary_results (Optional[Iterable[EvaluationResults]]): The aggregate results
for the experiment.
"""
def __init__(
self,
data: Union[DATA_T, AsyncIterable[schemas.Example]],
/,
experiment: Optional[Union[schemas.TracerSession, str]] = None,
metadata: Optional[dict] = None,
runs: Optional[Union[Iterable[schemas.Run], AsyncIterable[schemas.Run]]] = None,
client: Optional[langsmith.Client] = None,
evaluation_results: Optional[AsyncIterable[EvaluationResults]] = None,
summary_results: Optional[AsyncIterable[EvaluationResults]] = None,
description: Optional[str] = None,
num_repetitions: int = 1,
upload_results: bool = True,
):
super().__init__(
experiment=experiment,
metadata=metadata,
client=client,
description=description,
)
self._data = data
self._examples: Optional[AsyncIterable[schemas.Example]] = None
self._runs = (
aitertools.ensure_async_iterator(runs) if runs is not None else None
)
self._evaluation_results = evaluation_results
self._summary_results = summary_results
self._num_repetitions = num_repetitions
self._upload_results = upload_results
async def aget_examples(self) -> AsyncIterator[schemas.Example]:
if self._examples is None:
self._examples = _aresolve_data(self._data, client=self.client)
if self._num_repetitions > 1:
self._examples = async_chain_from_iterable(
aitertools.atee(self._examples, self._num_repetitions)
)
self._examples, examples_iter = aitertools.atee(
aitertools.ensure_async_iterator(self._examples), 2, lock=asyncio.Lock()
)
return examples_iter
async def get_dataset_id(self) -> str:
if self._experiment is None or not getattr(
self._experiment, "reference_dataset_id", None
):
example = await aitertools.py_anext(await self.aget_examples())
if example is None:
raise ValueError("No examples found in the dataset.")
return str(example.dataset_id)
return str(self._experiment.reference_dataset_id)
async def aget_runs(self) -> AsyncIterator[schemas.Run]:
if self._runs is None:
raise ValueError("Runs not loaded yet.")
self._runs, runs = aitertools.atee(
aitertools.ensure_async_iterator(self._runs), 2, lock=asyncio.Lock()
)
async for run in runs:
yield run
async def aget_evaluation_results(self) -> AsyncIterator[EvaluationResults]:
if self._evaluation_results is None:
async for _ in await self.aget_examples():
yield {"results": []}
else:
self._evaluation_results, evaluation_results = aitertools.atee(
aitertools.ensure_async_iterator(self._evaluation_results),
2,
lock=asyncio.Lock(),
)
async for result in evaluation_results:
yield result
async def astart(self) -> _AsyncExperimentManager:
try:
first_example = await aitertools.py_anext(await self.aget_examples())
except StopAsyncIteration:
raise ValueError(
"No examples found in the dataset. "
"Please ensure the data provided to aevaluate is not empty."
)
if not first_example:
raise ValueError(
"No examples found in the dataset."
"Please ensure the data provided to aevaluate is not empty."
)
project = self._get_project(first_example) if self._upload_results else None
self._print_experiment_start(project, first_example)
self._metadata["num_repetitions"] = self._num_repetitions
return self.__class__(
await self.aget_examples(),
experiment=project,
metadata=self._metadata,
client=self.client,
runs=self._runs,
evaluation_results=self._evaluation_results,
upload_results=self._upload_results,
)
async def awith_predictions(
self,
target: ATARGET_T,
/,
max_concurrency: Optional[int] = None,
) -> _AsyncExperimentManager:
_experiment_results = self._apredict(target, max_concurrency=max_concurrency)
r1, r2 = aitertools.atee(_experiment_results, 2, lock=asyncio.Lock())
return _AsyncExperimentManager(
(pred["example"] async for pred in r1),
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=(pred["run"] async for pred in r2),
upload_results=self._upload_results,
)
async def awith_evaluators(
self,
evaluators: Sequence[Union[EVALUATOR_T, AEVALUATOR_T]],
*,
max_concurrency: Optional[int] = None,
) -> _AsyncExperimentManager:
evaluators = _resolve_evaluators(evaluators)
experiment_results = self._ascore(evaluators, max_concurrency=max_concurrency)
r1, r2, r3 = aitertools.atee(experiment_results, 3, lock=asyncio.Lock())
return _AsyncExperimentManager(
(result["example"] async for result in r1),
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=(result["run"] async for result in r2),
evaluation_results=(result["evaluation_results"] async for result in r3),
summary_results=self._summary_results,
upload_results=self._upload_results,
)
async def awith_summary_evaluators(
self,
summary_evaluators: Sequence[SUMMARY_EVALUATOR_T],
) -> _AsyncExperimentManager:
wrapped_evaluators = _wrap_summary_evaluators(summary_evaluators)
aggregate_feedback_gen = self._aapply_summary_evaluators(wrapped_evaluators)
return _AsyncExperimentManager(
await self.aget_examples(),
experiment=self._experiment,
metadata=self._metadata,
client=self.client,
runs=self.aget_runs(),
evaluation_results=self._evaluation_results,
summary_results=aggregate_feedback_gen,
upload_results=self._upload_results,
)
async def aget_results(self) -> AsyncIterator[ExperimentResultRow]:
async for run, example, evaluation_results in aitertools.async_zip(
self.aget_runs(), await self.aget_examples(), self.aget_evaluation_results()
):
yield ExperimentResultRow(
run=run,
example=example,
evaluation_results=evaluation_results,
)
async def aget_summary_scores(self) -> Dict[str, List[dict]]:
if self._summary_results is None:
return {"results": []}
return {
"results": [
res # type: ignore[misc]
async for results in self._summary_results
for res in results["results"]
]
}
## Private methods
async def _apredict(
self, target: ATARGET_T, /, max_concurrency: Optional[int] = None
) -> AsyncIterator[_ForwardResults]:
fn = _ensure_async_traceable(target)
async def predict_all():
async for example in await self.aget_examples():
# Yield the coroutine to be awaited later
yield _aforward(
fn, example, self.experiment_name, self._metadata, self.client
)
async for result in aitertools.aiter_with_concurrency(
max_concurrency, predict_all(), _eager_consumption_timeout=0.001
):
yield result
await self._aend()
async def _ascore(
self,
evaluators: Sequence[RunEvaluator],
max_concurrency: Optional[int] = None,
) -> AsyncIterator[ExperimentResultRow]:
with cf.ThreadPoolExecutor(max_workers=4) as executor:
async def score_all():
async for current_results in self.aget_results():
# Yield the coroutine to be awaited later in aiter_with_concurrency
yield self._arun_evaluators(
evaluators, current_results, executor=executor
)
async for result in aitertools.aiter_with_concurrency(
max_concurrency, score_all(), _eager_consumption_timeout=0.001
):
yield result
async def _arun_evaluators(
self,
evaluators: Sequence[RunEvaluator],
current_results: ExperimentResultRow,
executor: cf.ThreadPoolExecutor,
) -> ExperimentResultRow:
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{"experiment": self.experiment_name},
}
with rh.tracing_context(
**{
**current_context,
"project_name": "evaluators",
"metadata": metadata,
"enabled": "local" if not self._upload_results else True,
"client": self.client,
}
):
run = current_results["run"]
example = current_results["example"]
eval_results = current_results["evaluation_results"]
for evaluator in evaluators:
try:
evaluator_response = await evaluator.aevaluate_run(
run=run,
example=example,
)
eval_results["results"].extend(
self.client._select_eval_results(evaluator_response)
)
if self._upload_results:
self.client._log_evaluation_feedback(
evaluator_response, run=run, _executor=executor
)
except Exception as e:
try:
feedback_keys = _extract_feedback_keys(evaluator)
error_response = EvaluationResults(
results=[
EvaluationResult(
key=key,
source_run_id=run.id,
comment=repr(e),
extra={"error": True},
)
for key in feedback_keys
]
)
eval_results["results"].extend(
self.client._select_eval_results(error_response)
)
if self._upload_results:
self.client._log_evaluation_feedback(
error_response, run=run, _executor=executor
)
except Exception as e2:
logger.debug(f"Error parsing feedback keys: {e2}")
pass
logger.error(
f"Error running evaluator {repr(evaluator)} on"
f" run {run.id}: {repr(e)}",
exc_info=True,
)
logger.error(
f"Error running evaluator {repr(evaluator)} on"
f" run {run.id}: {repr(e)}",
exc_info=True,
)
return ExperimentResultRow(
run=run,
example=example,
evaluation_results=eval_results,
)
async def _aapply_summary_evaluators(
self, summary_evaluators: Sequence[SUMMARY_EVALUATOR_T]
) -> AsyncIterator[EvaluationResults]:
runs, examples = [], []
async_examples = aitertools.ensure_async_iterator(await self.aget_examples())
async for run, example in aitertools.async_zip(
self.aget_runs(), async_examples
):
runs.append(run)
examples.append(example)
aggregate_feedback = []
project_id = self._get_experiment().id if self._upload_results else None
current_context = rh.get_tracing_context()
metadata = {
**(current_context["metadata"] or {}),
**{
"experiment": self.experiment_name,
"experiment_id": project_id,
},
}
with rh.tracing_context(
**{
**current_context,
"project_name": "evaluators",
"metadata": metadata,
"enabled": "local" if not self._upload_results else True,
"client": self.client,
}
):
for evaluator in summary_evaluators:
try:
summary_eval_result = evaluator(runs, examples)
flattened_results = self.client._select_eval_results(
summary_eval_result,
fn_name=evaluator.__name__,
)
aggregate_feedback.extend(flattened_results)
if self._upload_results:
for result in flattened_results:
feedback = result.dict(exclude={"target_run_id"})
evaluator_info = feedback.pop("evaluator_info", None)
await aitertools.aio_to_thread(
self.client.create_feedback,
**feedback,
run_id=None,
project_id=project_id,
source_info=evaluator_info,
)
except Exception as e:
logger.error(
f"Error running summary evaluator {repr(evaluator)}: {e}",
exc_info=True,
)
yield {"results": aggregate_feedback}
async def _get_dataset_version(self) -> Optional[str]:
modified_at = []
async for example in await self.aget_examples():
if example.modified_at:
# Should always be defined in practice when fetched,
# but the typing permits None
modified_at.append(example.modified_at)
max_modified_at = max(modified_at) if modified_at else None
return max_modified_at.isoformat() if max_modified_at else None
async def _get_dataset_splits(self) -> Optional[list[str]]:
splits = set()
async for example in await self.aget_examples():
if (
example.metadata
and example.metadata.get("dataset_split")
and isinstance(example.metadata["dataset_split"], list)
):
for split in example.metadata["dataset_split"]:
if isinstance(split, str):
splits.add(split)
else:
splits.add("base")
return list(splits)
async def _aend(self) -> None:
if not self._upload_results:
return
experiment = self._experiment
if experiment is None:
raise ValueError("Experiment not started yet.")
project_metadata = self._get_experiment_metadata()
project_metadata["dataset_version"] = await self._get_dataset_version()
project_metadata["dataset_splits"] = await self._get_dataset_splits()
self.client.update_project(
experiment.id,
end_time=experiment.end_time
or datetime.datetime.now(datetime.timezone.utc),
metadata={
**experiment.metadata,
**project_metadata,
},
)
class AsyncExperimentResults:
def __init__(
self,
experiment_manager: _AsyncExperimentManager,
):
self._manager = experiment_manager
self._results: List[ExperimentResultRow] = []
self._lock = asyncio.Lock()
self._task = asyncio.create_task(self._process_data(self._manager))
self._processed_count = 0
@property
def experiment_name(self) -> str:
return self._manager.experiment_name
def __aiter__(self) -> AsyncIterator[ExperimentResultRow]:
return self
async def __anext__(self) -> ExperimentResultRow:
async def _wait_until_index(index: int) -> None:
while self._processed_count < index:
await asyncio.sleep(0.05)
while True:
async with self._lock:
if self._processed_count < len(self._results):
result = self._results[self._processed_count]
self._processed_count += 1
return result
elif self._task.done():
raise StopAsyncIteration
await asyncio.shield(
asyncio.wait_for(_wait_until_index(len(self._results)), timeout=None)
)
async def _process_data(self, manager: _AsyncExperimentManager) -> None:
tqdm = _load_tqdm()
async for item in tqdm(manager.aget_results()):
async with self._lock:
self._results.append(item)
summary_scores = await manager.aget_summary_scores()
async with self._lock:
self._summary_results = summary_scores
def to_pandas(
self, start: Optional[int] = 0, end: Optional[int] = None
) -> DataFrame:
return _to_pandas(self._results, start=start, end=end)
def _repr_html_(self) -> str:
import importlib.util
if self._results and importlib.util.find_spec("pandas"):
df = self.to_pandas(0, 5)
return df._repr_html_() # type: ignore[operator]
else:
return self.__repr__()
def __len__(self) -> int:
return len(self._results)
def __repr__(self) -> str:
return f"<AsyncExperimentResults {self.experiment_name}>"
async def wait(self) -> None:
await self._task
async def _aforward(
fn: rh.SupportsLangsmithExtra[[dict], Awaitable],
example: schemas.Example,
experiment_name: str,
metadata: dict,
client: langsmith.Client,
) -> _ForwardResults:
run: Optional[schemas.RunBase] = None
def _get_run(r: run_trees.RunTree) -> None:
nonlocal run
run = r
with rh.tracing_context(enabled=True):
try:
await fn(
example.inputs,
langsmith_extra=rh.LangSmithExtra(
reference_example_id=example.id,
on_end=_get_run,
project_name=experiment_name,
metadata={
**metadata,
"example_version": (
example.modified_at.isoformat()
if example.modified_at
else example.created_at.isoformat()
),
},
client=client,
),
)
except Exception as e:
logger.error(
f"Error running target function: {e}", exc_info=True, stacklevel=1
)
return _ForwardResults(
run=cast(schemas.Run, run),
example=example,
)
def _ensure_async_traceable(
target: ATARGET_T,
) -> rh.SupportsLangsmithExtra[[dict], Awaitable]:
if not asyncio.iscoroutinefunction(target) and not _is_langchain_runnable(target):
if callable(target):
raise ValueError(
"Target must be an async function. For sync functions, use evaluate."
" Example usage:\n\n"
"async def predict(inputs: dict) -> dict:\n"
" # do work, like chain.invoke(inputs)\n"
" return {...}\n"
"await aevaluate(predict, ...)"
)
else:
raise ValueError(
"Target must be a callable async function. "
"Received a non-callable object. Example usage:\n\n"
"async def predict(inputs: dict) -> dict:\n"
" # do work, like chain.invoke(inputs)\n"
" return {...}\n"
"await aevaluate(predict, ...)"
)
if rh.is_traceable_function(target):
return target # type: ignore
else:
if _is_langchain_runnable(target):
target = target.ainvoke # type: ignore[attr-defined]
return rh.traceable(name="AsyncTarget")(target)
def _aresolve_data(
data: Union[DATA_T, AsyncIterable[schemas.Example]], *, client: langsmith.Client
) -> AsyncIterator[schemas.Example]:
"""Return the examples for the given dataset."""
if isinstance(data, AsyncIterable):
return aitertools.ensure_async_iterator(data)
return aitertools.ensure_async_iterator(_resolve_data(data, client=client))
T = TypeVar("T")
async def async_chain_from_iterable(
iterable: Iterable[AsyncIterable[T]],
) -> AsyncIterator[T]:
"""Chain multiple async iterables."""
for sub_iterable in iterable:
async for item in sub_iterable:
yield item
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/_name_generation.py | import random
adjectives = [
"abandoned",
"aching",
"advanced",
"ample",
"artistic",
"back",
"best",
"bold",
"brief",
"clear",
"cold",
"complicated",
"cooked",
"crazy",
"crushing",
"damp",
"dear",
"definite",
"dependable",
"diligent",
"drab",
"earnest",
"elderly",
"enchanted",
"essential",
"excellent",
"extraneous",
"fixed",
"flowery",
"formal",
"fresh",
"frosty",
"giving",
"glossy",
"healthy",
"helpful",
"impressionable",
"kind",
"large",
"left",
"long",
"loyal",
"mealy",
"memorable",
"monthly",
"new",
"notable",
"only",
"ordinary",
"passionate",
"perfect",
"pertinent",
"proper",
"puzzled",
"reflecting",
"respectful",
"roasted",
"scholarly",
"shiny",
"slight",
"sparkling",
"spotless",
"stupendous",
"sunny",
"tart",
"terrific",
"timely",
"unique",
"upbeat",
"vacant",
"virtual",
"warm",
"weary",
"whispered",
"worthwhile",
"yellow",
]
nouns = [
"account",
"acknowledgment",
"address",
"advertising",
"airplane",
"animal",
"appointment",
"arrival",
"artist",
"attachment",
"attitude",
"availability",
"backpack",
"bag",
"balance",
"bass",
"bean",
"beauty",
"bibliography",
"bill",
"bite",
"blossom",
"boat",
"book",
"box",
"boy",
"bread",
"bridge",
"broccoli",
"building",
"butter",
"button",
"cabbage",
"cake",
"camera",
"camp",
"candle",
"candy",
"canvas",
"car",
"card",
"carrot",
"cart",
"case",
"cat",
"chain",
"chair",
"chalk",
"chance",
"change",
"channel",
"character",
"charge",
"charm",
"chart",
"check",
"cheek",
"cheese",
"chef",
"cherry",
"chicken",
"child",
"church",
"circle",
"class",
"clay",
"click",
"clock",
"cloth",
"cloud",
"clove",
"club",
"coach",
"coal",
"coast",
"coat",
"cod",
"coffee",
"collar",
"color",
"comb",
"comfort",
"comic",
"committee",
"community",
"company",
"comparison",
"competition",
"condition",
"connection",
"control",
"cook",
"copper",
"copy",
"corn",
"cough",
"country",
"cover",
"crate",
"crayon",
"cream",
"creator",
"crew",
"crown",
"current",
"curtain",
"curve",
"cushion",
"dad",
"daughter",
"day",
"death",
"debt",
"decision",
"deer",
"degree",
"design",
"desire",
"desk",
"detail",
"development",
"digestion",
"dime",
"dinner",
"direction",
"dirt",
"discovery",
"discussion",
"disease",
"disgust",
"distance",
"distribution",
"division",
"doctor",
"dog",
"door",
"drain",
"drawer",
"dress",
"drink",
"driving",
"dust",
"ear",
"earth",
"edge",
"education",
"effect",
"egg",
"end",
"energy",
"engine",
"error",
"event",
"example",
"exchange",
"existence",
"expansion",
"experience",
"expert",
"eye",
"face",
"fact",
"fall",
"family",
"farm",
"father",
"fear",
"feeling",
"field",
"finger",
"fire",
"fish",
"flag",
"flight",
"floor",
"flower",
"fold",
"food",
"football",
"force",
"form",
"frame",
"friend",
"frog",
"fruit",
"fuel",
"furniture",
"game",
"garden",
"gate",
"girl",
"glass",
"glove",
"goat",
"gold",
"government",
"grade",
"grain",
"grass",
"green",
"grip",
"group",
"growth",
"guide",
"guitar",
"hair",
"hall",
"hand",
"harbor",
"harmony",
"hat",
"head",
"health",
"heart",
"heat",
"hill",
"history",
"hobbies",
"hole",
"hope",
"horn",
"horse",
"hospital",
"hour",
"house",
"humor",
"idea",
"impulse",
"income",
"increase",
"industry",
"ink",
"insect",
"instrument",
"insurance",
"interest",
"invention",
"iron",
"island",
"jelly",
"jet",
"jewel",
"join",
"judge",
"juice",
"jump",
"kettle",
"key",
"kick",
"kiss",
"kitten",
"knee",
"knife",
"knowledge",
"land",
"language",
"laugh",
"law",
"lead",
"learning",
"leather",
"leg",
"lettuce",
"level",
"library",
"lift",
"light",
"limit",
"line",
"linen",
"lip",
"liquid",
"list",
"look",
"loss",
"love",
"lunch",
"machine",
"man",
"manager",
"map",
"marble",
"mark",
"market",
"mass",
"match",
"meal",
"measure",
"meat",
"meeting",
"memory",
"metal",
"middle",
"milk",
"mind",
"mine",
"minute",
"mist",
"mitten",
"mom",
"money",
"monkey",
"month",
"moon",
"morning",
"mother",
"motion",
"mountain",
"mouth",
"muscle",
"music",
"nail",
"name",
"nation",
"neck",
"need",
"news",
"night",
"noise",
"note",
"number",
"nut",
"observation",
"offer",
"oil",
"operation",
"opinion",
"orange",
"order",
"organization",
"ornament",
"oven",
"page",
"pail",
"pain",
"paint",
"pan",
"pancake",
"paper",
"parcel",
"parent",
"part",
"passenger",
"paste",
"payment",
"peace",
"pear",
"pen",
"pencil",
"person",
"pest",
"pet",
"picture",
"pie",
"pin",
"pipe",
"pizza",
"place",
"plane",
"plant",
"plastic",
"plate",
"play",
"pleasure",
"plot",
"plough",
"pocket",
"point",
"poison",
"police",
"pollution",
"popcorn",
"porter",
"position",
"pot",
"potato",
"powder",
"power",
"price",
"print",
"process",
"produce",
"product",
"profit",
"property",
"prose",
"protest",
"pull",
"pump",
"punishment",
"purpose",
"push",
"quarter",
"question",
"quiet",
"quill",
"quilt",
"quince",
"rabbit",
"rail",
"rain",
"range",
"rat",
"rate",
"ray",
"reaction",
"reading",
"reason",
"record",
"regret",
"relation",
"religion",
"representative",
"request",
"respect",
"rest",
"reward",
"rhythm",
"rice",
"river",
"road",
"roll",
"room",
"root",
"rose",
"route",
"rub",
"rule",
"run",
"sack",
"sail",
"salt",
"sand",
"scale",
"scarecrow",
"scarf",
"scene",
"scent",
"school",
"science",
"scissors",
"screw",
"sea",
"seat",
"secretary",
"seed",
"selection",
"self",
"sense",
"servant",
"shade",
"shake",
"shame",
"shape",
"sheep",
"sheet",
"shelf",
"ship",
"shirt",
"shock",
"shoe",
"shop",
"show",
"side",
"sign",
"silk",
"sink",
"sister",
"size",
"sky",
"sleep",
"smash",
"smell",
"smile",
"smoke",
"snail",
"snake",
"sneeze",
"snow",
"soap",
"society",
"sock",
"soda",
"sofa",
"son",
"song",
"sort",
"sound",
"soup",
"space",
"spark",
"speed",
"sponge",
"spoon",
"spray",
"spring",
"spy",
"square",
"stamp",
"star",
"start",
"statement",
"station",
"steam",
"steel",
"stem",
"step",
"stew",
"stick",
"stitch",
"stocking",
"stomach",
"stone",
"stop",
"store",
"story",
"stove",
"stranger",
"straw",
"stream",
"street",
"stretch",
"string",
"structure",
"substance",
"sugar",
"suggestion",
"suit",
"summer",
"sun",
"support",
"surprise",
"sweater",
"swim",
"system",
"table",
"tail",
"talk",
"tank",
"taste",
"tax",
"tea",
"teaching",
"team",
"tendency",
"test",
"texture",
"theory",
"thing",
"thought",
"thread",
"throat",
"thumb",
"thunder",
"ticket",
"time",
"tin",
"title",
"toad",
"toe",
"tooth",
"toothpaste",
"touch",
"town",
"toy",
"trade",
"train",
"transport",
"tray",
"treatment",
"tree",
"trick",
"trip",
"trouble",
"trousers",
"truck",
"tub",
"turkey",
"turn",
"twist",
"umbrella",
"uncle",
"underwear",
"unit",
"use",
"vacation",
"value",
"van",
"vase",
"vegetable",
"veil",
"vein",
"verse",
"vessel",
"view",
"visitor",
"voice",
"volcano",
"walk",
"wall",
"war",
"wash",
"waste",
"watch",
"water",
"wave",
"wax",
"way",
"wealth",
"weather",
"week",
"weight",
"wheel",
"whip",
"whistle",
"window",
"wine",
"wing",
"winter",
"wire",
"wish",
"woman",
"wood",
"wool",
"word",
"work",
"worm",
"wound",
"wrist",
"writer",
"yard",
"yoke",
"zebra",
"zinc",
"zipper",
"zone",
]
def random_name() -> str:
"""Generate a random name."""
adjective = random.choice(adjectives)
noun = random.choice(nouns)
number = random.randint(1, 100)
return f"{adjective}-{noun}-{number}"
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/evaluator.py | """This module contains the evaluator classes for evaluating runs."""
from __future__ import annotations
import asyncio
import inspect
import uuid
from abc import abstractmethod
from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Union,
cast,
)
from typing_extensions import TypedDict
from langsmith import schemas
try:
from pydantic.v1 import ( # type: ignore[import]
BaseModel,
Field,
ValidationError,
validator,
)
except ImportError:
from pydantic import ( # type: ignore[assignment]
BaseModel,
Field,
ValidationError,
validator,
)
import logging
from functools import wraps
from langsmith.schemas import SCORE_TYPE, VALUE_TYPE, Example, Run
logger = logging.getLogger(__name__)
class Category(TypedDict):
"""A category for categorical feedback."""
value: Optional[Union[float, int]]
"""The numeric score/ordinal corresponding to this category."""
label: str
"""The label for this category."""
class FeedbackConfig(TypedDict, total=False):
"""Configuration to define a type of feedback.
Applied on on the first creation of a feedback_key.
"""
type: Literal["continuous", "categorical", "freeform"]
"""The type of feedback."""
min: Optional[Union[float, int]]
"""The minimum permitted value (if continuous type)."""
max: Optional[Union[float, int]]
"""The maximum value permitted value (if continuous type)."""
categories: Optional[List[Union[Category, dict]]]
class EvaluationResult(BaseModel):
"""Evaluation result."""
key: str
"""The aspect, metric name, or label for this evaluation."""
score: SCORE_TYPE = None
"""The numeric score for this evaluation."""
value: VALUE_TYPE = None
"""The value for this evaluation, if not numeric."""
comment: Optional[str] = None
"""An explanation regarding the evaluation."""
correction: Optional[Dict] = None
"""What the correct value should be, if applicable."""
evaluator_info: Dict = Field(default_factory=dict)
"""Additional information about the evaluator."""
feedback_config: Optional[Union[FeedbackConfig, dict]] = None
"""The configuration used to generate this feedback."""
source_run_id: Optional[Union[uuid.UUID, str]] = None
"""The ID of the trace of the evaluator itself."""
target_run_id: Optional[Union[uuid.UUID, str]] = None
"""The ID of the trace this evaluation is applied to.
If none provided, the evaluation feedback is applied to the
root trace being."""
extra: Optional[Dict] = None
"""Metadata for the evaluator run."""
class Config:
"""Pydantic model configuration."""
allow_extra = False
@validator("value", pre=True)
def check_value_non_numeric(cls, v, values):
"""Check that the value is not numeric."""
# If a score isn't provided and the value is numeric
# it's more likely the user intended use the score field
if "score" not in values or values["score"] is None:
if isinstance(v, (int, float)):
logger.warning(
"Numeric values should be provided in"
" the 'score' field, not 'value'."
f" Got: {v}"
)
return v
class EvaluationResults(TypedDict, total=False):
"""Batch evaluation results.
This makes it easy for your evaluator to return multiple
metrics at once.
"""
results: List[EvaluationResult]
"""The evaluation results."""
class RunEvaluator:
"""Evaluator interface class."""
@abstractmethod
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Evaluate an example."""
async def aevaluate_run(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Evaluate an example asynchronously."""
return await asyncio.get_running_loop().run_in_executor(
None, self.evaluate_run, run, example
)
_RUNNABLE_OUTPUT = Union[EvaluationResult, EvaluationResults, dict]
class ComparisonEvaluationResult(BaseModel):
"""Feedback scores for the results of comparative evaluations.
These are generated by functions that compare two or more runs,
returning a ranking or other feedback.
"""
key: str
"""The aspect, metric name, or label for this evaluation."""
scores: Dict[Union[uuid.UUID, str], SCORE_TYPE]
"""The scores for each run in the comparison."""
source_run_id: Optional[Union[uuid.UUID, str]] = None
"""The ID of the trace of the evaluator itself."""
comment: Optional[Union[str, Dict[Union[uuid.UUID, str], str]]] = None
"""Comment for the scores. If a string, it's shared across all target runs.
If a dict, it maps run IDs to individual comments."""
_COMPARISON_OUTPUT = Union[ComparisonEvaluationResult, dict]
class DynamicRunEvaluator(RunEvaluator):
"""A dynamic evaluator that wraps a function and transforms it into a `RunEvaluator`.
This class is designed to be used with the `@run_evaluator` decorator, allowing
functions that take a `Run` and an optional `Example` as arguments, and return
an `EvaluationResult` or `EvaluationResults`, to be used as instances of `RunEvaluator`.
Attributes:
func (Callable): The function that is wrapped by this evaluator.
""" # noqa: E501
def __init__(
self,
func: Callable[
[Run, Optional[Example]],
Union[_RUNNABLE_OUTPUT, Awaitable[_RUNNABLE_OUTPUT]],
],
# Async function to be used for async evaluation. Optional
afunc: Optional[
Callable[
[Run, Optional[Example]],
Awaitable[_RUNNABLE_OUTPUT],
]
] = None,
):
"""Initialize the DynamicRunEvaluator with a given function.
Args:
func (Callable): A function that takes a `Run` and an optional `Example` as
arguments, and returns a dict or `ComparisonEvaluationResult`.
"""
func = _normalize_evaluator_func(func)
if afunc:
afunc = _normalize_evaluator_func(afunc) # type: ignore[assignment]
wraps(func)(self)
from langsmith import run_helpers # type: ignore
if afunc is not None:
self.afunc = run_helpers.ensure_traceable(
afunc, process_inputs=_serialize_inputs
)
self._name = getattr(afunc, "__name__", "DynamicRunEvaluator")
if inspect.iscoroutinefunction(func):
if afunc is not None:
raise TypeError(
"Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular "
"function to avoid ambiguity."
)
self.afunc = run_helpers.ensure_traceable(
func, process_inputs=_serialize_inputs
)
self._name = getattr(func, "__name__", "DynamicRunEvaluator")
else:
self.func = run_helpers.ensure_traceable(
cast(Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT], func),
process_inputs=_serialize_inputs,
)
self._name = getattr(func, "__name__", "DynamicRunEvaluator")
def _coerce_evaluation_result(
self,
result: Union[EvaluationResult, dict],
source_run_id: uuid.UUID,
allow_no_key: bool = False,
) -> EvaluationResult:
if isinstance(result, EvaluationResult):
if not result.source_run_id:
result.source_run_id = source_run_id
return result
try:
if not result:
raise ValueError(
"Expected an EvaluationResult object, or dict with a metric"
f" 'key' and optional 'score'; got empty result: {result}"
)
if "key" not in result and allow_no_key:
result["key"] = self._name
if all(k not in result for k in ("score", "value", "comment")):
raise ValueError(
"Expected an EvaluationResult object, or dict with a metric"
f" 'key' and optional 'score' or categorical 'value'; got {result}"
)
return EvaluationResult(**{"source_run_id": source_run_id, **result})
except ValidationError as e:
raise ValueError(
"Expected an EvaluationResult object, or dict with a metric"
f" 'key' and optional 'score'; got {result}"
) from e
def _coerce_evaluation_results(
self,
results: Union[dict, EvaluationResults],
source_run_id: uuid.UUID,
) -> Union[EvaluationResult, EvaluationResults]:
if "results" in results:
cp = results.copy()
cp["results"] = [
self._coerce_evaluation_result(r, source_run_id=source_run_id)
for r in results["results"]
]
return EvaluationResults(**cp)
return self._coerce_evaluation_result(
cast(dict, results), source_run_id=source_run_id, allow_no_key=True
)
def _format_result(
self,
result: Union[
EvaluationResult, EvaluationResults, dict, str, int, bool, float, list
],
source_run_id: uuid.UUID,
) -> Union[EvaluationResult, EvaluationResults]:
if isinstance(result, EvaluationResult):
if not result.source_run_id:
result.source_run_id = source_run_id
return result
result = _format_evaluator_result(result)
return self._coerce_evaluation_results(result, source_run_id)
@property
def is_async(self) -> bool:
"""Check if the evaluator function is asynchronous.
Returns:
bool: True if the evaluator function is asynchronous, False otherwise.
"""
return hasattr(self, "afunc")
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Evaluate a run using the wrapped function.
This method directly invokes the wrapped function with the provided arguments.
Args:
run (Run): The run to be evaluated.
example (Optional[Example]): An optional example to be used in the evaluation.
Returns:
Union[EvaluationResult, EvaluationResults]: The result of the evaluation.
""" # noqa: E501
if not hasattr(self, "func"):
running_loop = asyncio.get_event_loop()
if running_loop.is_running():
raise RuntimeError(
"Cannot call `evaluate_run` on an async run evaluator from"
" within an running event loop. Use `aevaluate_run` instead."
)
else:
return running_loop.run_until_complete(self.aevaluate_run(run, example))
source_run_id = uuid.uuid4()
metadata: Dict[str, Any] = {"target_run_id": run.id}
if getattr(run, "session_id", None):
metadata["experiment"] = str(run.session_id)
result = self.func(
run,
example,
langsmith_extra={"run_id": source_run_id, "metadata": metadata},
)
return self._format_result(result, source_run_id)
async def aevaluate_run(self, run: Run, example: Optional[Example] = None):
"""Evaluate a run asynchronously using the wrapped async function.
This method directly invokes the wrapped async function with the
provided arguments.
Args:
run (Run): The run to be evaluated.
example (Optional[Example]): An optional example to be used
in the evaluation.
Returns:
Union[EvaluationResult, EvaluationResults]: The result of the evaluation.
"""
if not hasattr(self, "afunc"):
return await super().aevaluate_run(run, example)
source_run_id = uuid.uuid4()
metadata: Dict[str, Any] = {"target_run_id": run.id}
if getattr(run, "session_id", None):
metadata["experiment"] = str(run.session_id)
result = await self.afunc(
run,
example,
langsmith_extra={"run_id": source_run_id, "metadata": metadata},
)
return self._format_result(result, source_run_id)
def __call__(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Make the evaluator callable, allowing it to be used like a function.
This method enables the evaluator instance to be called directly, forwarding the
call to `evaluate_run`.
Args:
run (Run): The run to be evaluated.
example (Optional[Example]): An optional example to be used in the evaluation.
Returns:
Union[EvaluationResult, EvaluationResults]: The result of the evaluation.
""" # noqa: E501
return self.evaluate_run(run, example)
def __repr__(self) -> str:
"""Represent the DynamicRunEvaluator object."""
return f"<DynamicRunEvaluator {self._name}>"
def run_evaluator(
func: Callable[
[Run, Optional[Example]], Union[_RUNNABLE_OUTPUT, Awaitable[_RUNNABLE_OUTPUT]]
],
):
"""Create a run evaluator from a function.
Decorator that transforms a function into a `RunEvaluator`.
"""
return DynamicRunEvaluator(func)
_MAXSIZE = 10_000
def _maxsize_repr(obj: Any):
s = repr(obj)
if len(s) > _MAXSIZE:
s = s[: _MAXSIZE - 4] + "...)"
return s
def _serialize_inputs(inputs: dict) -> dict:
run_truncated = _maxsize_repr(inputs.get("run"))
example_truncated = _maxsize_repr(inputs.get("example"))
return {"run": run_truncated, "example": example_truncated}
class DynamicComparisonRunEvaluator:
"""Compare predictions (as traces) from 2 or more runs."""
def __init__(
self,
func: Callable[
[Sequence[Run], Optional[Example]],
Union[_COMPARISON_OUTPUT, Awaitable[_COMPARISON_OUTPUT]],
],
# Async function to be used for async evaluation. Optional
afunc: Optional[
Callable[
[Sequence[Run], Optional[Example]],
Awaitable[_COMPARISON_OUTPUT],
]
] = None,
):
"""Initialize the DynamicRunEvaluator with a given function.
Args:
func (Callable): A function that takes a `Run` and an optional `Example` as
arguments, and returns an `EvaluationResult` or `EvaluationResults`.
"""
func = _normalize_comparison_evaluator_func(func)
if afunc:
afunc = _normalize_comparison_evaluator_func(afunc) # type: ignore[assignment]
wraps(func)(self)
from langsmith import run_helpers # type: ignore
if afunc is not None:
self.afunc = run_helpers.ensure_traceable(
afunc, process_inputs=_serialize_inputs
)
self._name = getattr(afunc, "__name__", "DynamicRunEvaluator")
if inspect.iscoroutinefunction(func):
if afunc is not None:
raise TypeError(
"Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular "
"function to avoid ambiguity."
)
self.afunc = run_helpers.ensure_traceable(
func, process_inputs=_serialize_inputs
)
self._name = getattr(func, "__name__", "DynamicRunEvaluator")
else:
self.func = run_helpers.ensure_traceable(
cast(
Callable[
[Sequence[Run], Optional[Example]],
_COMPARISON_OUTPUT,
],
func,
),
process_inputs=_serialize_inputs,
)
self._name = getattr(func, "__name__", "DynamicRunEvaluator")
@property
def is_async(self) -> bool:
"""Check if the evaluator function is asynchronous.
Returns:
bool: True if the evaluator function is asynchronous, False otherwise.
"""
return hasattr(self, "afunc")
def compare_runs(
self, runs: Sequence[Run], example: Optional[Example] = None
) -> ComparisonEvaluationResult:
"""Compare runs to score preferences.
Args:
runs: A list of runs to compare.
example: An optional example to be used in the evaluation.
""" # noqa: E501
if not hasattr(self, "func"):
running_loop = asyncio.get_event_loop()
if running_loop.is_running():
raise RuntimeError(
"Cannot call `evaluate_run` on an async run evaluator from"
" within an running event loop. Use `aevaluate_run` instead."
)
else:
return running_loop.run_until_complete(
self.acompare_runs(runs, example)
)
source_run_id = uuid.uuid4()
tags = self._get_tags(runs)
# TODO: Add metadata for the "comparison experiment" here
result = self.func(
runs,
example,
langsmith_extra={"run_id": source_run_id, "tags": tags},
)
return self._format_results(result, source_run_id, runs)
async def acompare_runs(
self, runs: Sequence[Run], example: Optional[Example] = None
) -> ComparisonEvaluationResult:
"""Evaluate a run asynchronously using the wrapped async function.
This method directly invokes the wrapped async function with the
provided arguments.
Args:
runs (Run): The runs to be evaluated.
example (Optional[Example]): An optional example to be used
in the evaluation.
Returns:
ComparisonEvaluationResult: The result of the evaluation.
"""
if not hasattr(self, "afunc"):
return self.compare_runs(runs, example)
source_run_id = uuid.uuid4()
tags = self._get_tags(runs)
# TODO: Add metadata for the "comparison experiment" here
result = await self.afunc(
runs,
example,
langsmith_extra={"run_id": source_run_id, "tags": tags},
)
return self._format_results(result, source_run_id, runs)
def __call__(
self, runs: Sequence[Run], example: Optional[Example] = None
) -> ComparisonEvaluationResult:
"""Make the evaluator callable, allowing it to be used like a function.
This method enables the evaluator instance to be called directly, forwarding the
call to `evaluate_run`.
Args:
run (Run): The run to be evaluated.
example (Optional[Example]): An optional example to be used in the evaluation.
Returns:
ComparisonEvaluationResult: The result of the evaluation.
""" # noqa: E501
return self.compare_runs(runs, example)
def __repr__(self) -> str:
"""Represent the DynamicRunEvaluator object."""
return f"<DynamicComparisonRunEvaluator {self._name}>"
@staticmethod
def _get_tags(runs: Sequence[Run]) -> List[str]:
"""Extract tags from runs."""
# Add tags to support filtering
tags = []
for run in runs:
tags.append("run:" + str(run.id))
if getattr(run, "session_id", None):
tags.append("experiment:" + str(run.session_id))
return tags
def _format_results(
self,
result: Union[dict, list, ComparisonEvaluationResult],
source_run_id: uuid.UUID,
runs: Sequence[Run],
) -> ComparisonEvaluationResult:
if isinstance(result, ComparisonEvaluationResult):
if not result.source_run_id:
result.source_run_id = source_run_id
return result
elif isinstance(result, list):
result = {
"scores": {run.id: score for run, score in zip(runs, result)},
"key": self._name,
"source_run_id": source_run_id,
}
elif isinstance(result, dict):
if "key" not in result:
result["key"] = self._name
else:
msg = (
"Expected 'dict', 'list' or 'ComparisonEvaluationResult' result "
f"object. Received: {result=}"
)
raise ValueError(msg)
try:
return ComparisonEvaluationResult(
**{"source_run_id": source_run_id, **result}
)
except ValidationError as e:
raise ValueError(
f"Expected a dictionary with a 'key' and dictionary of scores mapping"
"run IDs to numeric scores, or ComparisonEvaluationResult object,"
f" got {result}"
) from e
def comparison_evaluator(
func: Callable[
[Sequence[Run], Optional[Example]],
Union[_COMPARISON_OUTPUT, Awaitable[_COMPARISON_OUTPUT]],
],
) -> DynamicComparisonRunEvaluator:
"""Create a comaprison evaluator from a function."""
return DynamicComparisonRunEvaluator(func)
def _normalize_evaluator_func(
func: Callable,
) -> Union[
Callable[[Run, Optional[Example]], _RUNNABLE_OUTPUT],
Callable[[Run, Optional[Example]], Awaitable[_RUNNABLE_OUTPUT]],
]:
supported_args = ("run", "example", "inputs", "outputs", "reference_outputs")
sig = inspect.signature(func)
positional_args = [
pname
for pname, p in sig.parameters.items()
if p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY)
]
if not positional_args or (
not all(pname in supported_args for pname in positional_args)
and len(positional_args) != 2
):
msg = (
f"Invalid evaluator function. Must have at least one positional "
f"argument. Supported positional arguments are {supported_args}. Please "
f"see https://docs.smith.langchain.com/evaluation/how_to_guides/evaluation/evaluate_llm_application#use-custom-evaluators"
# noqa: E501
)
raise ValueError(msg)
elif not all(
pname in supported_args for pname in positional_args
) or positional_args == ["run", "example"]:
# For backwards compatibility we assume custom arg names are Run and Example
# types, respectively.
return func
else:
if inspect.iscoroutinefunction(func):
async def awrapper(
run: Run, example: Optional[Example]
) -> _RUNNABLE_OUTPUT:
arg_map = {
"run": run,
"example": example,
"inputs": example.inputs if example else {},
"outputs": run.outputs or {},
"reference_outputs": example.outputs or {} if example else {},
}
args = (arg_map[arg] for arg in positional_args)
return await func(*args)
awrapper.__name__ = (
getattr(func, "__name__")
if hasattr(func, "__name__")
else awrapper.__name__
)
return awrapper # type: ignore[return-value]
else:
def wrapper(run: Run, example: Example) -> _RUNNABLE_OUTPUT:
arg_map = {
"run": run,
"example": example,
"inputs": example.inputs if example else {},
"outputs": run.outputs or {},
"reference_outputs": example.outputs or {} if example else {},
}
args = (arg_map[arg] for arg in positional_args)
return func(*args)
wrapper.__name__ = (
getattr(func, "__name__")
if hasattr(func, "__name__")
else wrapper.__name__
)
return wrapper # type: ignore[return-value]
def _normalize_comparison_evaluator_func(
func: Callable,
) -> Union[
Callable[[Sequence[Run], Optional[Example]], _COMPARISON_OUTPUT],
Callable[[Sequence[Run], Optional[Example]], Awaitable[_COMPARISON_OUTPUT]],
]:
supported_args = ("runs", "example", "inputs", "outputs", "reference_outputs")
sig = inspect.signature(func)
positional_args = [
pname
for pname, p in sig.parameters.items()
if p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY)
]
if not positional_args or (
not all(pname in supported_args for pname in positional_args)
and len(positional_args) != 2
):
msg = (
f"Invalid evaluator function. Must have at least one positional "
f"argument. Supported positional arguments are {supported_args}. Please "
f"see https://docs.smith.langchain.com/evaluation/how_to_guides/evaluation/evaluate_llm_application#use-custom-evaluators"
# noqa: E501
)
raise ValueError(msg)
# For backwards compatibility we assume custom arg names are List[Run] and
# List[Example] types, respectively.
elif not all(
pname in supported_args for pname in positional_args
) or positional_args == ["runs", "example"]:
return func
else:
if inspect.iscoroutinefunction(func):
async def awrapper(
runs: Sequence[Run], example: Optional[Example]
) -> _COMPARISON_OUTPUT:
arg_map = {
"runs": runs,
"example": example,
"inputs": example.inputs if example else {},
"outputs": [run.outputs or {} for run in runs],
"reference_outputs": example.outputs or {} if example else {},
}
args = (arg_map[arg] for arg in positional_args)
return await func(*args)
awrapper.__name__ = (
getattr(func, "__name__")
if hasattr(func, "__name__")
else awrapper.__name__
)
return awrapper # type: ignore[return-value]
else:
def wrapper(runs: Sequence[Run], example: Example) -> _COMPARISON_OUTPUT:
arg_map = {
"runs": runs,
"example": example,
"inputs": example.inputs if example else {},
"outputs": [run.outputs or {} for run in runs],
"reference_outputs": example.outputs or {} if example else {},
}
args = (arg_map[arg] for arg in positional_args)
return func(*args)
wrapper.__name__ = (
getattr(func, "__name__")
if hasattr(func, "__name__")
else wrapper.__name__
)
return wrapper # type: ignore[return-value]
def _format_evaluator_result(
result: Union[EvaluationResults, dict, str, int, bool, float, list],
) -> Union[EvaluationResults, dict]:
if isinstance(result, (bool, float, int)):
result = {"score": result}
elif not result:
raise ValueError(
f"Expected a non-empty dict, str, bool, int, float, list, "
f"EvaluationResult, or EvaluationResults. Got {result}"
)
elif isinstance(result, list):
if not all(isinstance(x, dict) for x in result):
raise ValueError(
f"Expected a list of dicts or EvaluationResults. Received {result}."
)
result = {"results": result} # type: ignore[misc]
elif isinstance(result, str):
result = {"value": result}
elif isinstance(result, dict):
pass
else:
raise ValueError(
f"Expected a dict, str, bool, int, float, list, EvaluationResult, or "
f"EvaluationResults. Got {result}"
)
return result
SUMMARY_EVALUATOR_T = Union[
Callable[
[Sequence[schemas.Run], Sequence[schemas.Example]],
Union[EvaluationResult, EvaluationResults],
],
Callable[
[List[schemas.Run], List[schemas.Example]],
Union[EvaluationResult, EvaluationResults],
],
]
def _normalize_summary_evaluator(func: Callable) -> SUMMARY_EVALUATOR_T:
supported_args = ("runs", "examples", "inputs", "outputs", "reference_outputs")
sig = inspect.signature(func)
positional_args = [
pname
for pname, p in sig.parameters.items()
if p.kind in (p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY)
]
if not positional_args or (
not all(pname in supported_args for pname in positional_args)
and len(positional_args) != 2
):
msg = (
f"Invalid evaluator function. Must have at least one positional "
f"argument. Supported positional arguments are {supported_args}."
)
if positional_args:
msg += f" Received positional arguments {positional_args}."
raise ValueError(msg)
# For backwards compatibility we assume custom arg names are Sequence[Run] and
# Sequence[Example] types, respectively.
elif not all(
pname in supported_args for pname in positional_args
) or positional_args == ["runs", "examples"]:
return func
else:
def wrapper(
runs: Sequence[schemas.Run], examples: Sequence[schemas.Example]
) -> Union[EvaluationResult, EvaluationResults]:
arg_map = {
"runs": runs,
"examples": examples,
"inputs": [example.inputs for example in examples],
"outputs": [run.outputs or {} for run in runs],
"reference_outputs": [example.outputs or {} for example in examples],
}
args = (arg_map[arg] for arg in positional_args)
result = func(*args)
if isinstance(result, EvaluationResult):
return result
return _format_evaluator_result(result) # type: ignore[return-value]
wrapper.__name__ = (
getattr(func, "__name__") if hasattr(func, "__name__") else wrapper.__name__
)
return wrapper # type: ignore[return-value]
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/__init__.py | """Evaluation Helpers."""
from typing import TYPE_CHECKING, Any, List
if TYPE_CHECKING:
from typing import List
from langsmith.evaluation._arunner import (
aevaluate,
aevaluate_existing,
)
from langsmith.evaluation._runner import (
evaluate,
evaluate_comparative,
evaluate_existing,
)
from langsmith.evaluation.evaluator import (
EvaluationResult,
EvaluationResults,
RunEvaluator,
run_evaluator,
)
from langsmith.evaluation.integrations._langchain import LangChainStringEvaluator
def __getattr__(name: str) -> Any:
if name == "evaluate":
from langsmith.evaluation._runner import evaluate
return evaluate
elif name == "evaluate_existing":
from langsmith.evaluation._runner import evaluate_existing
return evaluate_existing
elif name == "aevaluate":
from langsmith.evaluation._arunner import aevaluate
return aevaluate
elif name == "aevaluate_existing":
from langsmith.evaluation._arunner import aevaluate_existing
return aevaluate_existing
elif name == "evaluate_comparative":
from langsmith.evaluation._runner import evaluate_comparative
return evaluate_comparative
elif name == "EvaluationResult":
from langsmith.evaluation.evaluator import EvaluationResult
return EvaluationResult
elif name == "EvaluationResults":
from langsmith.evaluation.evaluator import EvaluationResults
return EvaluationResults
elif name == "RunEvaluator":
from langsmith.evaluation.evaluator import RunEvaluator
return RunEvaluator
elif name == "run_evaluator":
from langsmith.evaluation.evaluator import run_evaluator
return run_evaluator
elif name == "StringEvaluator":
from langsmith.evaluation.string_evaluator import StringEvaluator
return StringEvaluator
elif name == "LangChainStringEvaluator":
from langsmith.evaluation.integrations._langchain import (
LangChainStringEvaluator,
)
return LangChainStringEvaluator
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"run_evaluator",
"EvaluationResult",
"EvaluationResults",
"RunEvaluator",
"StringEvaluator",
"aevaluate",
"aevaluate_existing",
"evaluate",
"evaluate_existing",
"evaluate_comparative",
"LangChainStringEvaluator",
]
def __dir__() -> List[str]:
return __all__
|
0 | lc_public_repos/langsmith-sdk/python/langsmith | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/llm_evaluator.py | """Contains the LLMEvaluator class for building LLM-as-a-judge evaluators."""
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
from pydantic import BaseModel
from langsmith._internal._beta_decorator import warn_beta
from langsmith.evaluation import EvaluationResult, EvaluationResults, RunEvaluator
from langsmith.schemas import Example, Run
class CategoricalScoreConfig(BaseModel):
"""Configuration for a categorical score."""
key: str
choices: List[str]
description: str
include_explanation: bool = False
explanation_description: Optional[str] = None
class ContinuousScoreConfig(BaseModel):
"""Configuration for a continuous score."""
key: str
min: float = 0
max: float = 1
description: str
include_explanation: bool = False
explanation_description: Optional[str] = None
def _create_score_json_schema(
score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig],
) -> dict:
properties: Dict[str, Any] = {}
if isinstance(score_config, CategoricalScoreConfig):
properties["score"] = {
"type": "string",
"enum": score_config.choices,
"description": f"The score for the evaluation, one of "
f"{', '.join(score_config.choices)}.",
}
elif isinstance(score_config, ContinuousScoreConfig):
properties["score"] = {
"type": "number",
"minimum": score_config.min,
"maximum": score_config.max,
"description": f"The score for the evaluation, between "
f"{score_config.min} and {score_config.max}, inclusive.",
}
else:
raise ValueError("Invalid score type. Must be 'categorical' or 'continuous'")
if score_config.include_explanation:
properties["explanation"] = {
"type": "string",
"description": (
"The explanation for the score."
if score_config.explanation_description is None
else score_config.explanation_description
),
}
return {
"title": score_config.key,
"description": score_config.description,
"type": "object",
"properties": properties,
"required": (
["score", "explanation"] if score_config.include_explanation else ["score"]
),
}
class LLMEvaluator(RunEvaluator):
"""A class for building LLM-as-a-judge evaluators."""
def __init__(
self,
*,
prompt_template: Union[str, List[Tuple[str, str]]],
score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig],
map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None,
model_name: str = "gpt-4o",
model_provider: str = "openai",
**kwargs,
):
"""Initialize the LLMEvaluator.
Args:
prompt_template (Union[str, List[Tuple[str, str]]): The prompt
template to use for the evaluation. If a string is provided, it is
assumed to be a human / user message.
score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]):
The configuration for the score, either categorical or continuous.
map_variables (Optional[Callable[[Run, Example], dict]], optional):
A function that maps the run and example to the variables in the
prompt. Defaults to None. If None, it is assumed that the prompt
only requires 'input', 'output', and 'expected'.
model_name (Optional[str], optional): The model to use for the evaluation.
Defaults to "gpt-4o".
model_provider (Optional[str], optional): The model provider to use
for the evaluation. Defaults to "openai".
"""
try:
from langchain.chat_models import init_chat_model
except ImportError as e:
raise ImportError(
"LLMEvaluator requires langchain to be installed. "
"Please install langchain by running `pip install langchain`."
) from e
chat_model = init_chat_model(
model=model_name, model_provider=model_provider, **kwargs
)
self._initialize(prompt_template, score_config, map_variables, chat_model)
@classmethod
def from_model(
cls,
model: Any,
*,
prompt_template: Union[str, List[Tuple[str, str]]],
score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig],
map_variables: Optional[Callable[[Run, Optional[Example]], dict]] = None,
):
"""Create an LLMEvaluator instance from a BaseChatModel instance.
Args:
model (BaseChatModel): The chat model instance to use for the evaluation.
prompt_template (Union[str, List[Tuple[str, str]]): The prompt
template to use for the evaluation. If a string is provided, it is
assumed to be a system message.
score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]):
The configuration for the score, either categorical or continuous.
map_variables (Optional[Callable[[Run, Example]], dict]], optional):
A function that maps the run and example to the variables in the
prompt. Defaults to None. If None, it is assumed that the prompt
only requires 'input', 'output', and 'expected'.
Returns:
LLMEvaluator: An instance of LLMEvaluator.
"""
instance = cls.__new__(cls)
instance._initialize(prompt_template, score_config, map_variables, model)
return instance
def _initialize(
self,
prompt_template: Union[str, List[Tuple[str, str]]],
score_config: Union[CategoricalScoreConfig, ContinuousScoreConfig],
map_variables: Optional[Callable[[Run, Optional[Example]], dict]],
chat_model: Any,
):
"""Shared initialization code for __init__ and from_model.
Args:
prompt_template (Union[str, List[Tuple[str, str]]): The prompt template.
score_config (Union[CategoricalScoreConfig, ContinuousScoreConfig]):
The score configuration.
map_variables (Optional[Callable[[Run, Example]], dict]]):
Function to map variables.
chat_model (BaseChatModel): The chat model instance.
"""
try:
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.prompts import ChatPromptTemplate
except ImportError as e:
raise ImportError(
"LLMEvaluator requires langchain-core to be installed. "
"Please install langchain-core by running `pip install langchain-core`."
) from e
if not (
isinstance(chat_model, BaseChatModel)
and hasattr(chat_model, "with_structured_output")
):
raise ValueError(
"chat_model must be an instance of "
"BaseLanguageModel and support structured output."
)
if isinstance(prompt_template, str):
self.prompt = ChatPromptTemplate.from_messages([("human", prompt_template)])
else:
self.prompt = ChatPromptTemplate.from_messages(prompt_template)
if set(self.prompt.input_variables) - {"input", "output", "expected"}:
if not map_variables:
raise ValueError(
"map_inputs must be provided if the prompt template contains "
"variables other than 'input', 'output', and 'expected'"
)
self.map_variables = map_variables
self.score_config = score_config
self.score_schema = _create_score_json_schema(self.score_config)
chat_model = chat_model.with_structured_output(self.score_schema)
self.runnable = self.prompt | chat_model
@warn_beta
def evaluate_run(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Evaluate a run."""
variables = self._prepare_variables(run, example)
output: dict = cast(dict, self.runnable.invoke(variables))
return self._parse_output(output)
@warn_beta
async def aevaluate_run(
self, run: Run, example: Optional[Example] = None
) -> Union[EvaluationResult, EvaluationResults]:
"""Asynchronously evaluate a run."""
variables = self._prepare_variables(run, example)
output: dict = cast(dict, await self.runnable.ainvoke(variables))
return self._parse_output(output)
def _prepare_variables(self, run: Run, example: Optional[Example]) -> dict:
"""Prepare variables for model invocation."""
if self.map_variables:
return self.map_variables(run, example)
variables = {}
if "input" in self.prompt.input_variables:
if len(run.inputs) == 0:
raise ValueError(
"No input keys are present in run.inputs but the prompt "
"requires 'input'."
)
if len(run.inputs) != 1:
raise ValueError(
"Multiple input keys are present in run.inputs. Please provide "
"a map_variables function."
)
variables["input"] = list(run.inputs.values())[0]
if "output" in self.prompt.input_variables:
if not run.outputs:
raise ValueError(
"No output keys are present in run.outputs but the prompt "
"requires 'output'."
)
if len(run.outputs) == 0:
raise ValueError(
"No output keys are present in run.outputs but the prompt "
"requires 'output'."
)
if len(run.outputs) != 1:
raise ValueError(
"Multiple output keys are present in run.outputs. Please "
"provide a map_variables function."
)
variables["output"] = list(run.outputs.values())[0]
if "expected" in self.prompt.input_variables:
if not example or not example.outputs:
raise ValueError(
"No example or example outputs is provided but the prompt "
"requires 'expected'."
)
if len(example.outputs) == 0:
raise ValueError(
"No output keys are present in example.outputs but the prompt "
"requires 'expected'."
)
if len(example.outputs) != 1:
raise ValueError(
"Multiple output keys are present in example.outputs. Please "
"provide a map_variables function."
)
variables["expected"] = list(example.outputs.values())[0]
return variables
def _parse_output(self, output: dict) -> Union[EvaluationResult, EvaluationResults]:
"""Parse the model output into an evaluation result."""
if isinstance(self.score_config, CategoricalScoreConfig):
value = output["score"]
explanation = output.get("explanation", None)
return EvaluationResult(
key=self.score_config.key, value=value, comment=explanation
)
elif isinstance(self.score_config, ContinuousScoreConfig):
score = output["score"]
explanation = output.get("explanation", None)
return EvaluationResult(
key=self.score_config.key, score=score, comment=explanation
)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith/evaluation | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/integrations/_langchain.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Optional, TypedDict, Union
from langsmith.evaluation.evaluator import DynamicRunEvaluator
from langsmith.run_helpers import traceable
from langsmith.schemas import Example, Run
if TYPE_CHECKING:
from langchain.evaluation.schema import StringEvaluator
from langsmith.evaluation.evaluator import RunEvaluator
class SingleEvaluatorInput(TypedDict):
"""The input to a `StringEvaluator`."""
prediction: str
"""The prediction string."""
reference: Optional[Any]
"""The reference string."""
input: Optional[str]
"""The input string."""
class LangChainStringEvaluator:
r"""A class for wrapping a LangChain StringEvaluator.
Requires the `langchain` package to be installed.
Attributes:
evaluator (StringEvaluator): The underlying StringEvaluator OR the name
of the evaluator to load.
Methods:
as_run_evaluator() -> RunEvaluator:
Convert the LangChainStringEvaluator to a RunEvaluator.
Examples:
Creating a simple LangChainStringEvaluator:
>>> evaluator = LangChainStringEvaluator("exact_match")
Converting a LangChainStringEvaluator to a RunEvaluator:
>>> from langsmith.evaluation import LangChainStringEvaluator
>>> from langchain_openai import ChatOpenAI
>>> evaluator = LangChainStringEvaluator(
... "criteria",
... config={
... "criteria": {
... "usefulness": "The prediction is useful if"
... " it is correct and/or asks a useful followup question."
... },
... "llm": ChatOpenAI(model="gpt-4o"),
... },
... )
>>> run_evaluator = evaluator.as_run_evaluator()
>>> run_evaluator # doctest: +ELLIPSIS
<DynamicRunEvaluator ...>
Customizing the LLM model used by the evaluator:
>>> from langsmith.evaluation import LangChainStringEvaluator
>>> from langchain_anthropic import ChatAnthropic
>>> evaluator = LangChainStringEvaluator(
... "criteria",
... config={
... "criteria": {
... "usefulness": "The prediction is useful if"
... " it is correct and/or asks a useful followup question."
... },
... "llm": ChatAnthropic(model="claude-3-opus-20240229"),
... },
... )
>>> run_evaluator = evaluator.as_run_evaluator()
>>> run_evaluator # doctest: +ELLIPSIS
<DynamicRunEvaluator ...>
Using the `evaluate` API with different evaluators:
>>> def prepare_data(run: Run, example: Example):
... # Convert the evaluation data into the format expected by the evaluator
... # Only required for datasets with multiple inputs/output keys
... return {
... "prediction": run.outputs["prediction"],
... "reference": example.outputs["answer"],
... "input": str(example.inputs),
... }
>>> import re
>>> from langchain_anthropic import ChatAnthropic
>>> import langsmith
>>> from langsmith.evaluation import LangChainStringEvaluator, evaluate
>>> criteria_evaluator = LangChainStringEvaluator(
... "criteria",
... config={
... "criteria": {
... "usefulness": "The prediction is useful if it is correct"
... " and/or asks a useful followup question."
... },
... "llm": ChatAnthropic(model="claude-3-opus-20240229"),
... },
... prepare_data=prepare_data,
... )
>>> embedding_evaluator = LangChainStringEvaluator("embedding_distance")
>>> exact_match_evaluator = LangChainStringEvaluator("exact_match")
>>> regex_match_evaluator = LangChainStringEvaluator(
... "regex_match", config={"flags": re.IGNORECASE}, prepare_data=prepare_data
... )
>>> scoring_evaluator = LangChainStringEvaluator(
... "labeled_score_string",
... config={
... "criteria": {
... "accuracy": "Score 1: Completely inaccurate\nScore 5: Somewhat accurate\nScore 10: Completely accurate"
... },
... "normalize_by": 10,
... "llm": ChatAnthropic(model="claude-3-opus-20240229"),
... },
... prepare_data=prepare_data,
... )
>>> string_distance_evaluator = LangChainStringEvaluator(
... "string_distance",
... config={"distance_metric": "levenshtein"},
... prepare_data=prepare_data,
... )
>>> from langsmith import Client
>>> client = Client()
>>> results = evaluate(
... lambda inputs: {"prediction": "foo"},
... data=client.list_examples(dataset_name="Evaluate Examples", limit=1),
... evaluators=[
... embedding_evaluator,
... criteria_evaluator,
... exact_match_evaluator,
... regex_match_evaluator,
... scoring_evaluator,
... string_distance_evaluator,
... ],
... ) # doctest: +ELLIPSIS
View the evaluation results for experiment:...
""" # noqa: E501
def __init__(
self,
evaluator: Union[StringEvaluator, str],
*,
config: Optional[dict] = None,
prepare_data: Optional[
Callable[[Run, Optional[Example]], SingleEvaluatorInput]
] = None,
):
"""Initialize a LangChainStringEvaluator.
See: https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.schema.StringEvaluator.html#langchain-evaluation-schema-stringevaluator
Args:
evaluator (StringEvaluator): The underlying StringEvaluator.
"""
from langchain.evaluation.schema import StringEvaluator # noqa: F811
if isinstance(evaluator, StringEvaluator):
self.evaluator = evaluator
elif isinstance(evaluator, str):
from langchain.evaluation import load_evaluator # noqa: F811
self.evaluator = load_evaluator(evaluator, **(config or {})) # type: ignore[assignment, arg-type]
else:
raise NotImplementedError(f"Unsupported evaluator type: {type(evaluator)}")
self._prepare_data = prepare_data
def as_run_evaluator(
self,
) -> RunEvaluator:
"""Convert the LangChainStringEvaluator to a RunEvaluator.
This is the object used in the LangSmith `evaluate` API.
Returns:
RunEvaluator: The converted RunEvaluator.
"""
input_str = (
"\n \"input\": example.inputs['input'],"
if self.evaluator.requires_input
else ""
)
reference_str = (
"\n \"reference\": example.outputs['expected']"
if self.evaluator.requires_reference
else ""
)
customization_error_str = f"""
def prepare_data(run, example):
return {{
"prediction": run.outputs['my_output'],{reference_str}{input_str}
}}
evaluator = LangChainStringEvaluator(..., prepare_data=prepare_data)
"""
@traceable
def prepare_evaluator_inputs(
run: Run, example: Optional[Example] = None
) -> SingleEvaluatorInput:
if run.outputs and len(run.outputs) > 1:
raise ValueError(
f"Evaluator {self.evaluator} only supports a single prediction "
"key. Please ensure that the run has a single output."
" Or initialize with a prepare_data:\n"
f"{customization_error_str}"
)
if (
self.evaluator.requires_reference
and example
and example.outputs
and len(example.outputs) > 1
):
raise ValueError(
f"Evaluator {self.evaluator} nly supports a single reference key. "
"Please ensure that the example has a single output."
" Or create a custom evaluator yourself:\n"
f"{customization_error_str}"
)
if (
self.evaluator.requires_input
and example
and example.inputs
and len(example.inputs) > 1
):
raise ValueError(
f"Evaluator {self.evaluator} only supports a single input key. "
"Please ensure that the example has a single input."
" Or initialize with a prepare_data:\n"
f"{customization_error_str}"
)
return SingleEvaluatorInput(
prediction=next(iter(run.outputs.values())), # type: ignore[union-attr]
reference=(
next(iter(example.outputs.values()))
if (
self.evaluator.requires_reference
and example
and example.outputs
)
else None
),
input=(
next(iter(example.inputs.values()))
if (self.evaluator.requires_input and example and example.inputs)
else None
),
)
@traceable(name=self.evaluator.evaluation_name)
def evaluate(run: Run, example: Optional[Example] = None) -> dict:
eval_inputs = (
prepare_evaluator_inputs(run, example)
if self._prepare_data is None
else self._prepare_data(run, example)
)
results = self.evaluator.evaluate_strings(**eval_inputs)
return {"key": self.evaluator.evaluation_name, **results}
@traceable(name=self.evaluator.evaluation_name)
async def aevaluate(run: Run, example: Optional[Example] = None) -> dict:
eval_inputs = (
prepare_evaluator_inputs(run, example)
if self._prepare_data is None
else self._prepare_data(run, example)
)
results = await self.evaluator.aevaluate_strings(**eval_inputs)
return {"key": self.evaluator.evaluation_name, **results}
return DynamicRunEvaluator(evaluate, aevaluate)
|
0 | lc_public_repos/langsmith-sdk/python/langsmith/evaluation | lc_public_repos/langsmith-sdk/python/langsmith/evaluation/integrations/__init__.py | """This module provides integration wrappers for popular open source eval frameworks.
to be used with LangSmith.
"""
from langsmith.evaluation.integrations._langchain import LangChainStringEvaluator
__all__ = ["LangChainStringEvaluator"]
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?= -j auto
SPHINXBUILD ?= sphinx-build
SPHINXAUTOBUILD ?= sphinx-autobuild
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Generate API reference RST files
generate-api-rst:
python ./create_api_rst.py
# Combined target to generate API RST and build HTML
api-docs: generate-api-rst build-html
.PHONY: generate-api-rst build-html api-docs
clobber: clean
rm -rf langsmith
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@echo "SOURCEDIR: $(SOURCEDIR)"
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/conf.py | """Configuration file for the Sphinx documentation builder."""
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
from pathlib import Path
import toml
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import StringList
from sphinx.util.docutils import SphinxDirective
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
_DIR = Path(__file__).parent.absolute()
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../python"))
with (_DIR.parent / "pyproject.toml").open("r") as f:
data = toml.load(f)
class ExampleLinksDirective(SphinxDirective):
"""Directive to generate a list of links to examples.
We have a script that extracts links to API reference docs
from our notebook examples. This directive uses that information
to backlink to the examples from the API reference docs.
"""
has_content = False
required_arguments = 1
def run(self):
"""Run the directive.
Called any time :example_links:`ClassName` is used
in the template *.rst files.
"""
class_or_func_name = self.arguments[0]
links = {}
list_node = nodes.bullet_list()
for doc_name, link in sorted(links.items()):
item_node = nodes.list_item()
para_node = nodes.paragraph()
link_node = nodes.reference()
link_node["refuri"] = link
link_node.append(nodes.Text(doc_name))
para_node.append(link_node)
item_node.append(para_node)
list_node.append(item_node)
if list_node.children:
title_node = nodes.rubric()
title_node.append(nodes.Text(f"Examples using {class_or_func_name}"))
return [title_node, list_node]
return [list_node]
class Beta(BaseAdmonition):
required_arguments = 0
node_class = nodes.admonition
def run(self):
self.content = self.content or StringList(
[
(
"This feature is in beta. It is actively being worked on, so the "
"API may change."
)
]
)
self.arguments = self.arguments or ["Beta"]
return super().run()
def setup(app):
app.add_directive("example_links", ExampleLinksDirective)
app.add_directive("beta", Beta)
# -- Project information -----------------------------------------------------
project = "🦜️🛠️ LangSmith"
copyright = "2024, LangChain Inc"
author = "LangChain, Inc"
html_favicon = "_static/img/brand/favicon.png"
html_last_updated_fmt = "%b %d, %Y"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autodoc.typehints",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinxcontrib.autodoc_pydantic",
# "IPython.sphinxext.ipython_console_highlighting",
"myst_parser",
"_extensions.gallery_directive",
"sphinx_design",
"sphinx_copybutton",
]
source_suffix = [".rst", ".md"]
# some autodoc pydantic options are repeated in the actual template.
# potentially user error, but there may be bugs in the sphinx extension
# with options not being passed through correctly (from either the location in the code)
autodoc_pydantic_model_show_json = False
autodoc_pydantic_field_list_validators = False
autodoc_pydantic_config_members = False
autodoc_pydantic_model_show_config_summary = False
autodoc_pydantic_model_show_validator_members = False
autodoc_pydantic_model_show_validator_summary = False
autodoc_pydantic_model_signature_prefix = "class"
autodoc_pydantic_field_signature_prefix = "param"
autodoc_member_order = "groupwise"
autoclass_content = "both"
autodoc_typehints_format = "short"
autodoc_typehints = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# The theme to use for HTML and HTML Help pages.
html_theme = "pydata_sphinx_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# # -- General configuration ------------------------------------------------
"sidebar_includehidden": True,
"use_edit_page_button": False,
# # "analytics": {
# # "plausible_analytics_domain": "scikit-learn.org",
# # "plausible_analytics_url": "https://views.scientific-python.org/js/script.js",
# # },
# # If "prev-next" is included in article_footer_items, then setting show_prev_next
# # to True would repeat prev and next links. See
# # https://github.com/pydata/pydata-sphinx-theme/blob/b731dc230bc26a3d1d1bb039c56c977a9b3d25d8/src/pydata_sphinx_theme/theme/pydata_sphinx_theme/layout.html#L118-L129
"show_prev_next": False,
"search_bar_text": "Search",
"navigation_with_keys": True,
"collapse_navigation": True,
"navigation_depth": 3,
"show_nav_level": 1,
"show_toc_level": 3,
"navbar_align": "left",
"header_links_before_dropdown": 5,
"header_dropdown_text": "Modules",
"logo": {
"image_light": "_static/wordmark-api.svg",
"image_dark": "_static/wordmark-api-dark.svg",
},
"surface_warnings": True,
# # -- Template placement in theme layouts ----------------------------------
"navbar_start": ["navbar-logo"],
# # Note that the alignment of navbar_center is controlled by navbar_align
"navbar_center": ["navbar-nav"],
"navbar_end": ["langsmith_docs", "theme-switcher", "navbar-icon-links"],
# # navbar_persistent is persistent right (even when on mobiles)
"navbar_persistent": ["search-field"],
"article_header_start": ["breadcrumbs"],
"article_header_end": [],
"article_footer_items": [],
"content_footer_items": [],
# # Use html_sidebars that map page patterns to list of sidebar templates
# "primary_sidebar_end": [],
"footer_start": ["copyright"],
"footer_center": [],
"footer_end": [],
# # When specified as a dictionary, the keys should follow glob-style patterns, as in
# # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns
# # In particular, "**" specifies the default for all pages
# # Use :html_theme.sidebar_secondary.remove: for file-wide removal
# "secondary_sidebar_items": {"**": ["page-toc", "sourcelink"]},
# "show_version_warning_banner": True,
# "announcement": None,
"icon_links": [
{
# Label for this link
"name": "GitHub",
# URL where the link will redirect
"url": "https://github.com/langchain-ai/langsmith-sdk", # required
# Icon class (if "type": "fontawesome"), or path to local image (if "type": "local")
"icon": "fa-brands fa-square-github",
# The type of image to be used (see below for details)
"type": "fontawesome",
},
{
"name": "X / Twitter",
"url": "https://twitter.com/langchainai",
"icon": "fab fa-twitter-square",
},
],
"icon_links_label": "Quick Links",
"external_links": [],
"sidebar_primary_title": "Your Custom Title Here",
}
html_context = {
"display_github": True, # Integrate GitHub
"github_user": "langchain-ai", # Username
"github_repo": "langsmith-sdk", # Repo name
"github_version": "master", # Version
"conf_py_path": "/docs/api_reference", # Path in the checkout to the docs root
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (e.g. https://...)
html_css_files = ["css/custom.css"]
html_use_index = False
myst_enable_extensions = ["colon_fence"]
# generate autosummary even if no references
autosummary_generate = True
html_copy_source = False
html_show_sourcelink = False
# Set canonical URL from the Read the Docs Domain
html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
# Tell Jinja2 templates the build is running on Read the Docs
if os.environ.get("READTHEDOCS", "") == "True":
html_context["READTHEDOCS"] = True
master_doc = "index"
templates_path = ["templates"]
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/requirements.txt | autodoc_pydantic>=1,<2
sphinx<=7
myst-parser>=3
sphinx-autobuild>=2024
pydata-sphinx-theme>=0.15
toml>=0.10.2
myst-nb>=1.1.1
pyyaml
sphinx-design
sphinx-copybutton
beautifulsoup4
openai
-e python
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/.python-version | 3.11
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/create_api_rst.py | """Script for auto-generating api_reference.rst."""
import importlib
import inspect
import logging
import os
import sys
from enum import Enum
from pathlib import Path
from typing import Dict, List, Literal, Sequence, TypedDict, Union
import toml
from pydantic import BaseModel
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
ROOT_DIR = Path(__file__).parents[1].absolute()
HERE = Path(__file__).parent
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
PACKAGE_DIR = ROOT_DIR / "langsmith"
ClassKind = Literal["TypedDict", "Regular", "Pydantic", "enum"]
class ClassInfo(TypedDict):
name: str
qualified_name: str
kind: ClassKind
is_public: bool
is_deprecated: bool
class FunctionInfo(TypedDict):
name: str
qualified_name: str
is_public: bool
is_deprecated: bool
class ModuleMembers(TypedDict):
classes_: Sequence[ClassInfo]
functions: Sequence[FunctionInfo]
_EXCLUDED_NAMES = {
"close_session",
"convert_prompt_to_anthropic_format",
"convert_prompt_to_openai_format",
"BaseMessageLike",
"TracingQueueItem",
"filter_logs",
"StringEvaluator",
"LLMEvaluator",
"ensure_traceable",
"RunLikeDict",
"RunTypeEnum",
"is_traceable_function",
"is_async",
"get_run_tree_context",
"as_runnable",
"SupportsLangsmithExtra",
"get_tracing_context",
}
_EXCLUDED_MODULES = {"cli"}
_INCLUDED_UTILS = {
"ContextThreadPoolExecutor",
"LangSmithAPIError",
"LangSmithAuthError",
"LangSmithConflictError",
"LangSmithConnectionError",
"LangSmithError",
"LangSmithMissingAPIKeyWarning",
"LangSmithNotFoundError",
"LangSmithRateLimitError",
"LangSmithRetry",
"LangSmithUserError",
"LangSmithWarning",
}
def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
classes_: List[ClassInfo] = []
functions: List[FunctionInfo] = []
module = importlib.import_module(module_path)
for name, type_ in inspect.getmembers(module):
if "evaluation" in module_path:
print(module_path, name)
if (
not hasattr(type_, "__module__")
or type_.__module__ != module_path
or name in _EXCLUDED_NAMES
or (module_path.endswith("utils") and name not in _INCLUDED_UTILS)
):
logger.info(f"Excluding {module_path}.{name}")
continue
if inspect.isclass(type_):
kind: ClassKind = (
"TypedDict"
if type(type_).__name__ in ("_TypedDictMeta", "_TypedDictMeta")
else (
"enum"
if issubclass(type_, Enum)
else "Pydantic" if issubclass(type_, BaseModel) else "Regular"
)
)
# if hasattr(type_, "__slots__"):
# for func_name, func_type in inspect.getmembers(type_):
# if inspect.isfunction(func_type):
# functions.append(
# FunctionInfo(
# name=func_name,
# qualified_name=f"{namespace}.{name}.{func_name}",
# is_public=not func_name.startswith("_"),
# is_deprecated=".. deprecated::"
# in (func_type.__doc__ or ""),
# )
# )
classes_.append(
ClassInfo(
name=name,
qualified_name=f"{namespace}.{name}",
kind=kind,
is_public=not name.startswith("_"),
is_deprecated=".. deprecated::" in (type_.__doc__ or ""),
)
)
elif inspect.isfunction(type_):
functions.append(
FunctionInfo(
name=name,
qualified_name=f"{namespace}.{name}",
is_public=not name.startswith("_"),
is_deprecated=".. deprecated::" in (type_.__doc__ or ""),
)
)
return ModuleMembers(classes_=classes_, functions=functions)
def _load_package_modules(
package_directory: Union[str, Path],
) -> Dict[str, ModuleMembers]:
package_path = Path(package_directory)
modules_by_namespace = {}
package_name = package_path.name
for file_path in package_path.rglob("*.py"):
if file_path.name.startswith("_") or any(
part.startswith("_") for part in file_path.relative_to(package_path).parts
):
if file_path.name not in {
"_runner.py",
"_arunner.py",
"_testing.py",
"_expect.py",
"_openai.py",
}:
continue
namespace = (
str(file_path.relative_to(package_path))
.replace(".py", "")
.replace("/", ".")
)
top_namespace = namespace.split(".")[0]
if top_namespace in _EXCLUDED_MODULES:
logger.info(f"Excluding module {top_namespace}")
continue
try:
module_members = _load_module_members(
f"{package_name}.{namespace}", namespace
)
if top_namespace in modules_by_namespace:
existing = modules_by_namespace[top_namespace]
modules_by_namespace[top_namespace] = ModuleMembers(
classes_=existing["classes_"] + module_members["classes_"],
functions=existing["functions"] + module_members["functions"],
)
else:
modules_by_namespace[top_namespace] = module_members
except ImportError as e:
print(f"Error: Unable to import module '{namespace}' with error: {e}")
return modules_by_namespace
module_order = [
"client",
"async_client",
"evaluation",
"run_helpers",
"run_trees",
"schemas",
"utils",
"anonymizer",
"wrappers",
]
def _construct_doc(
package_namespace: str,
members_by_namespace: Dict[str, ModuleMembers],
package_version: str,
) -> List[tuple[str, str]]:
docs = []
index_doc = f"""\
:html_theme.sidebar_secondary.remove:
.. currentmodule:: {package_namespace}
.. _{package_namespace}:
{package_namespace.replace('_', '-')}: {package_version}
{'=' * (len(package_namespace) + len(package_version) + 2)}
.. automodule:: {package_namespace}
:no-members:
:no-inherited-members:
.. toctree::
:maxdepth: 2
"""
def _priority(mod: str):
if mod in module_order:
return module_order.index(mod)
print(mod, "not in ", module_order)
return len(module_order) + hash(mod)
for module in sorted(members_by_namespace, key=lambda x: _priority(x)):
index_doc += f" {module}\n"
module_doc = f"""\
.. currentmodule:: {package_namespace}
.. _{package_namespace}_{module}:
:mod:`{module}`
{'=' * (len(module) + 7)}
.. automodule:: {package_namespace}.{module}
:no-members:
:no-inherited-members:
"""
_members = members_by_namespace[module]
classes = [
el
for el in _members["classes_"]
if el["is_public"] and not el["is_deprecated"]
]
functions = [
el
for el in _members["functions"]
if el["is_public"] and not el["is_deprecated"]
]
deprecated_classes = [
el for el in _members["classes_"] if el["is_public"] and el["is_deprecated"]
]
deprecated_functions = [
el
for el in _members["functions"]
if el["is_public"] and el["is_deprecated"]
]
if classes:
module_doc += f"""\
**Classes**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
for class_ in sorted(classes, key=lambda c: c["qualified_name"]):
template = (
"typeddict.rst"
if class_["kind"] == "TypedDict"
else (
"enum.rst"
if class_["kind"] == "enum"
else (
"pydantic.rst"
if class_["kind"] == "Pydantic"
else "class.rst"
)
)
)
module_doc += f"""\
:template: {template}
{class_["qualified_name"]}
"""
if functions:
qualnames = "\n ".join(sorted(f["qualified_name"] for f in functions))
module_doc += f"""**Functions**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
:template: function.rst
{qualnames}
"""
if deprecated_classes:
module_doc += f"""**Deprecated classes**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
for class_ in sorted(deprecated_classes, key=lambda c: c["qualified_name"]):
template = (
"typeddict.rst"
if class_["kind"] == "TypedDict"
else (
"enum.rst"
if class_["kind"] == "enum"
else (
"pydantic.rst"
if class_["kind"] == "Pydantic"
else "class.rst"
)
)
)
module_doc += f""" :template: {template}
{class_["qualified_name"]}
"""
if deprecated_functions:
qualnames = "\n ".join(
sorted(f["qualified_name"] for f in deprecated_functions)
)
module_doc += f"""**Deprecated functions**
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
:template: function.rst
{qualnames}
"""
docs.append((f"{module}.rst", module_doc))
# docs.append(("index.rst", index_doc))
return docs
def _get_package_version(package_dir: Path) -> str:
try:
with open(package_dir.parent / "pyproject.toml") as f:
pyproject = toml.load(f)
return pyproject["tool"]["poetry"]["version"]
except FileNotFoundError:
print(f"pyproject.toml not found in {package_dir.parent}. Aborting the build.")
sys.exit(1)
def _build_index(package_version: str) -> None:
doc = f"""# LangSmith Python SDK
**Version: `{package_version}`**
Welcome to the API reference for the LangSmith Python SDK.
For user guides see [https://docs.smith.langchain.com](https://docs.smith.langchain.com).
Here are quick links to some of the key classes and functions:
| Class/function | Description |
| :- | :- |
| [Client](client/langsmith.client.Client) | Synchronous client for interacting with the LangSmith API. |
| [AsyncClient](async_client/langsmith.async_client.AsyncClient) | Asynchronous client for interacting with the LangSmith API. |
| [traceable](run_helpers/langsmith.run_helpers.traceable) | Wrapper/decorator for tracing any function. |
| [wrap_openai](wrappers/langsmith.wrappers._openai.wrap_openai) | Wrapper for OpenAI client, adds LangSmith tracing to all OpenAI calls. |
| [evaluate](evaluation/langsmith.evaluation._runner.evaluate) | Evaluate an application on a dataset. |
| [aevaluate](evaluation/langsmith.evaluation._arunner.aevaluate) | Asynchronously evaluate an application on a dataset. |
| [unit](_testing/langsmith._testing.unit) | Create a LangSmith unit test. |
```{{toctree}}
:maxdepth: 2
:hidden:
client<client>
async_client<async_client>
evaluation<evaluation>
run_helpers<run_helpers>
wrappers<wrappers>
_testing<_testing>
```
"""
with open(HERE / "reference.md", "w") as f:
f.write(doc)
dummy_index = """\
# API reference
```{toctree}
:maxdepth: 3
:hidden:
Reference<reference>
```
"""
with open(HERE / "index.md", "w") as f:
f.write(dummy_index)
def main() -> None:
print("Starting to build API reference files.")
package_members = _load_package_modules(PACKAGE_DIR)
package_version = _get_package_version(PACKAGE_DIR)
rsts = _construct_doc("langsmith", package_members, package_version)
for name, rst in rsts:
with open(HERE / name, "w") as f:
f.write(rst)
_build_index(package_version)
print("API reference files built.")
if __name__ == "__main__":
main()
|
0 | lc_public_repos/langsmith-sdk/python | lc_public_repos/langsmith-sdk/python/docs/make.bat | @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/_extensions/gallery_directive.py | """A directive to generate a gallery of images from structured data.
Generating a gallery of images that are all the same size is a common
pattern in documentation, and this can be cumbersome if the gallery is
generated programmatically. This directive wraps this particular use-case
in a helper-directive to generate it with a single YAML configuration file.
It currently exists for maintainers of the pydata-sphinx-theme,
but might be abstracted into a standalone package if it proves useful.
"""
from pathlib import Path
from typing import Any, ClassVar, Dict, List
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.application import Sphinx
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from yaml import safe_load
logger = logging.getLogger(__name__)
TEMPLATE_GRID = """
`````{{grid}} {columns}
{options}
{content}
`````
"""
GRID_CARD = """
````{{grid-item-card}} {title}
{options}
{content}
````
"""
class GalleryGridDirective(SphinxDirective):
"""A directive to show a gallery of images and links in a Bootstrap grid.
The grid can be generated from a YAML file that contains a list of items, or
from the content of the directive (also formatted in YAML). Use the parameter
"class-card" to add an additional CSS class to all cards. When specifying the grid
items, you can use all parameters from "grid-item-card" directive to customize
individual cards + ["image", "header", "content", "title"].
Danger:
This directive can only be used in the context of a Myst documentation page as
the templates use Markdown flavored formatting.
"""
name = "gallery-grid"
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec: ClassVar[dict[str, Any]] = {
# A class to be added to the resulting container
"grid-columns": directives.unchanged,
"class-container": directives.unchanged,
"class-card": directives.unchanged,
}
def run(self) -> List[nodes.Node]:
"""Create the gallery grid."""
if self.arguments:
# If an argument is given, assume it's a path to a YAML file
# Parse it and load it into the directive content
path_data_rel = Path(self.arguments[0])
path_doc, _ = self.get_source_info()
path_doc = Path(path_doc).parent
path_data = (path_doc / path_data_rel).resolve()
if not path_data.exists():
logger.info(f"Could not find grid data at {path_data}.")
nodes.text("No grid data found at {path_data}.")
return
yaml_string = path_data.read_text()
else:
yaml_string = "\n".join(self.content)
# Use all the element with an img-bottom key as sites to show
# and generate a card item for each of them
grid_items = []
for item in safe_load(yaml_string):
# remove parameters that are not needed for the card options
title = item.pop("title", "")
# build the content of the card using some extra parameters
header = f"{item.pop('header')} \n^^^ \n" if "header" in item else ""
image = f"}) \n" if "image" in item else ""
content = f"{item.pop('content')} \n" if "content" in item else ""
# optional parameter that influence all cards
if "class-card" in self.options:
item["class-card"] = self.options["class-card"]
loc_options_str = "\n".join(f":{k}: {v}" for k, v in item.items()) + " \n"
card = GRID_CARD.format(
options=loc_options_str, content=header + image + content, title=title
)
grid_items.append(card)
# Parse the template with Sphinx Design to create an output container
# Prep the options for the template grid
class_ = "gallery-directive" + f' {self.options.get("class-container", "")}'
options = {"gutter": 2, "class-container": class_}
options_str = "\n".join(f":{k}: {v}" for k, v in options.items())
# Create the directive string for the grid
grid_directive = TEMPLATE_GRID.format(
columns=self.options.get("grid-columns", "1 2 3 4"),
options=options_str,
content="\n".join(grid_items),
)
# Parse content as a directive so Sphinx Design processes it
container = nodes.container()
self.state.nested_parse([grid_directive], 0, container)
# Sphinx Design outputs a container too, so just use that
return [container.children[0]]
def setup(app: Sphinx) -> Dict[str, Any]:
"""Add custom configuration to sphinx app.
Args:
app: the Sphinx application
Returns:
the 2 parallel parameters set to ``True``.
"""
app.add_directive("gallery-grid", GalleryGridDirective)
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/_static/wordmark-api.svg | <svg width="72" height="19" viewBox="0 0 72 19" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_4088_901)">
<g clip-path="url(#clip1_4088_901)">
<path d="M21.5279 14.299C21.4257 14.469 21.1186 14.4801 20.8569 14.3228C20.7228 14.2423 20.6191 14.1313 20.5655 14.0105C20.5163 13.9009 20.5155 13.7973 20.5625 13.719C20.6162 13.6296 20.7272 13.5841 20.8584 13.5841C20.9762 13.5841 21.1104 13.6206 21.2342 13.6952C21.4958 13.8524 21.6308 14.129 21.5286 14.299H21.5279ZM36.67 9.31846C36.67 14.4563 32.4894 18.6369 27.3515 18.6369H9.31846C4.18063 18.6369 0 14.457 0 9.31846C0 4.17989 4.18063 0 9.31846 0H27.3515C32.4901 0 36.67 4.18063 36.67 9.31846ZM17.9585 13.9919C18.1054 13.8137 17.427 13.312 17.2884 13.1278C17.0066 12.8222 17.0051 12.3824 16.815 12.0253C16.3498 10.9473 15.8153 9.87757 15.0676 8.9651C14.2774 7.96691 13.3023 7.14092 12.4457 6.20386C11.8098 5.55007 11.6399 4.61897 11.0785 3.91599C10.3047 2.77317 7.85807 2.46156 7.4995 4.07552C7.50099 4.12621 7.48533 4.15827 7.44135 4.19032C7.24305 4.3342 7.06637 4.49895 6.91802 4.69799C6.55498 5.20343 6.49907 6.06073 6.95232 6.51472C6.96723 6.27542 6.97543 6.04954 7.16478 5.87808C7.51515 6.17926 8.04444 6.28511 8.45072 6.06073C9.34828 7.3422 9.12464 9.11494 9.83731 10.4956C10.0341 10.8221 10.2324 11.1553 10.4851 11.4416C10.6901 11.7606 11.3983 12.1371 11.4401 12.4323C11.4475 12.9392 11.3879 13.4931 11.7204 13.9173C11.8769 14.2349 11.4923 14.5539 11.1822 14.5144C10.7796 14.5696 10.2883 14.2438 9.93571 14.4444C9.81122 14.5793 9.56745 14.4302 9.4601 14.6173C9.42283 14.7142 9.22155 14.8506 9.34157 14.9438C9.47501 14.8424 9.59876 14.7366 9.77842 14.797C9.75158 14.9431 9.86713 14.964 9.95882 15.0064C9.95584 15.1056 9.89769 15.207 9.97373 15.2912C10.0624 15.2018 10.1154 15.075 10.2563 15.0378C10.7244 15.6617 11.2008 14.4063 12.2139 14.9714C12.0081 14.9617 11.8255 14.9871 11.6868 15.1563C11.6525 15.1943 11.6235 15.239 11.6839 15.2882C12.2303 14.9356 12.2273 15.409 12.5822 15.2636C12.855 15.1212 13.1264 14.9431 13.4506 14.9938C13.1353 15.0847 13.1226 15.3382 12.9377 15.5521C12.9064 15.5849 12.8915 15.6222 12.9281 15.6766C13.5826 15.6215 13.6363 15.4038 14.1648 15.1369C14.5592 14.8961 14.952 15.4798 15.2935 15.1473C15.3687 15.075 15.4716 15.0996 15.5648 15.0899C15.4455 14.454 14.1342 15.2062 14.1551 14.3534C14.5771 14.0664 14.4801 13.517 14.5085 13.0734C14.9938 13.3425 15.5335 13.4991 16.0091 13.7563C16.2492 14.1439 16.6256 14.6561 17.1273 14.6225C17.1407 14.5838 17.1527 14.5495 17.1668 14.51C17.3189 14.5361 17.5142 14.6367 17.5977 14.4444C17.8251 14.6822 18.1591 14.6702 18.4565 14.6091C18.6764 14.4302 18.0428 14.1752 17.9578 13.9911L17.9585 13.9919ZM24.761 10.9868L23.7636 9.64051C22.8928 10.6365 22.3114 11.1218 22.3009 11.1307C22.2957 11.1359 21.7403 11.6764 21.2342 12.1401C20.7377 12.5948 20.3455 12.9549 20.1458 13.3485C20.0906 13.4573 19.9706 13.8584 20.1391 14.2595C20.2688 14.5696 20.5334 14.7903 20.9263 14.9155C21.0441 14.9528 21.1566 14.9692 21.264 14.9692C21.9714 14.9692 22.4359 14.2595 22.4388 14.2535C22.4448 14.2453 23.0464 13.3858 23.7792 12.3004C24.023 11.9396 24.3018 11.5579 24.761 10.9861V10.9868ZM29.4508 14.0873C29.4508 13.889 29.3785 13.6974 29.2473 13.549L29.1235 13.4089C28.3758 12.5613 26.4562 10.3852 25.7093 9.53987C24.7714 8.47831 23.7226 7.12527 23.6353 7.01195L23.5094 6.75104V6.29182C23.5094 6.12335 23.4758 5.9586 23.4109 5.80354L23.1441 5.16988C23.1403 5.16094 23.1388 5.1505 23.1403 5.14081L23.1508 5.05284C23.1523 5.03868 23.159 5.026 23.1702 5.01631C23.3781 4.83293 24.152 4.23804 25.3499 4.28798C25.5065 4.29469 25.5341 4.20896 25.5385 4.17243C25.5602 3.9965 25.1583 3.78926 24.7841 3.71247C24.2697 3.60736 22.9025 3.32855 21.8089 4.04645L21.8007 4.05241C21.094 4.64283 20.526 5.09384 20.52 5.09832L20.5073 5.11099C20.4991 5.12068 20.2993 5.35998 20.3478 5.66488C20.3791 5.86317 20.2762 5.93399 20.2703 5.93772C20.2643 5.94145 20.1241 6.02942 19.9795 5.93027C19.8043 5.79906 19.5002 6.0242 19.4383 6.0734L18.9791 6.4685L18.9701 6.47745C18.9619 6.48714 18.7651 6.71749 19.0283 7.08725C19.2557 7.40706 19.3362 7.51366 19.5337 7.75967C19.7343 8.00866 20.0958 8.32399 20.1152 8.34039C20.1241 8.34785 20.3448 8.51483 20.649 8.28225C20.8987 8.09066 21.0992 7.91771 21.0992 7.91771C21.1156 7.90429 21.2603 7.78501 21.267 7.60908C21.2692 7.55839 21.267 7.51441 21.267 7.47415C21.2632 7.3504 21.2625 7.31387 21.3557 7.25423C21.4004 7.25423 21.5376 7.30418 21.6554 7.36382C21.668 7.37127 21.9625 7.53677 22.2301 7.52484C22.3986 7.54721 22.585 7.73879 22.6491 7.81707C22.655 7.82303 23.2246 8.4209 24.0267 9.46979C24.1795 9.66883 24.7409 10.4165 24.8944 10.6253C25.1509 10.9742 25.5393 11.502 25.9568 12.0708C26.6799 13.0555 27.4909 14.1596 27.8592 14.6561C27.9815 14.8208 28.1596 14.9326 28.3609 14.9714L28.4973 14.9975C28.5495 15.0072 28.6017 15.0124 28.6539 15.0124C28.8954 15.0124 29.1243 14.9066 29.2756 14.7187L29.2838 14.7083C29.3927 14.5703 29.4523 14.3937 29.4523 14.2118V14.0873H29.4508ZM30.0927 5.24517L29.9339 5.08639C29.8877 5.04017 29.8228 5.01259 29.7572 5.0178C29.6916 5.02079 29.6297 5.0506 29.5872 5.1013L28.5883 6.28288C28.56 6.31642 28.5197 6.3373 28.4765 6.34177L28.1209 6.37755C28.0754 6.38203 28.0299 6.36786 27.9949 6.33879L27.4268 5.85796C27.3903 5.82665 27.3687 5.78192 27.3672 5.73421L27.359 5.44943C27.3575 5.40843 27.3717 5.36818 27.3978 5.33687L28.3691 4.16647C28.4422 4.0785 28.4414 3.95103 28.3676 3.86306L28.27 3.74751C28.2059 3.67222 28.1 3.64464 28.0076 3.67967C27.778 3.76689 27.1995 3.99277 26.7813 4.2194C26.1901 4.53846 25.7793 5.04464 25.7085 5.54038C25.6563 5.90343 25.6779 6.50205 25.6958 6.82633C25.7025 6.95381 25.6742 7.08128 25.6123 7.19534C25.5363 7.33698 25.4021 7.56361 25.1979 7.83645C25.0935 7.98033 25.0279 8.03326 24.9377 8.13986L26.0365 9.43177C26.3012 9.12314 26.533 8.88906 26.735 8.66318C27.104 8.25243 27.2188 8.2487 27.526 8.23826C27.7153 8.23155 27.9748 8.22335 28.3855 8.12048C29.5067 7.84018 29.8616 6.62654 29.8765 6.57287L30.1553 5.47031C30.1754 5.39054 30.1516 5.30407 30.0934 5.24592L30.0927 5.24517ZM12.8513 12.9415C12.7305 13.4126 12.691 14.2155 12.0782 14.2386C12.0275 14.5107 12.2668 14.6128 12.4838 14.5256C12.6992 14.4265 12.8013 14.6039 12.8736 14.7806C13.2061 14.829 13.6981 14.6695 13.7168 14.2759C13.2203 13.9896 13.0667 13.4454 12.8505 12.9415H12.8513Z" fill="#1C3C3C"/>
</g>
<path d="M44.4294 15.2485L48.0614 3.72853H50.8854L54.5174 15.2485H52.5334L49.2374 4.91253H49.6694L46.4134 15.2485H44.4294ZM46.4534 12.7525V10.9525H52.5014V12.7525H46.4534ZM55.9638 15.2485V3.72853H60.7238C60.8358 3.72853 60.9798 3.73387 61.1558 3.74453C61.3318 3.74987 61.4945 3.76587 61.6438 3.79253C62.3105 3.89387 62.8598 4.1152 63.2918 4.45653C63.7291 4.79787 64.0518 5.22987 64.2598 5.75253C64.4731 6.26987 64.5798 6.84587 64.5798 7.48053C64.5798 8.10987 64.4731 8.68587 64.2598 9.20853C64.0465 9.72587 63.7211 10.1552 63.2838 10.4965C62.8518 10.8379 62.3051 11.0592 61.6438 11.1605C61.4945 11.1819 61.3291 11.1979 61.1478 11.2085C60.9718 11.2192 60.8305 11.2245 60.7238 11.2245H57.8918V15.2485H55.9638ZM57.8918 9.42453H60.6438C60.7505 9.42453 60.8705 9.4192 61.0038 9.40854C61.1371 9.39787 61.2598 9.37653 61.3718 9.34453C61.6918 9.26453 61.9425 9.1232 62.1238 8.92054C62.3105 8.71787 62.4411 8.48853 62.5158 8.23253C62.5958 7.97653 62.6358 7.72587 62.6358 7.48053C62.6358 7.2352 62.5958 6.98453 62.5158 6.72853C62.4411 6.4672 62.3105 6.2352 62.1238 6.03253C61.9425 5.82987 61.6918 5.68853 61.3718 5.60853C61.2598 5.57653 61.1371 5.55787 61.0038 5.55254C60.8705 5.54187 60.7505 5.53653 60.6438 5.53653H57.8918V9.42453ZM66.3426 15.2485V3.72853H68.2706V15.2485H66.3426Z" fill="#1C3C3C"/>
</g>
<defs>
<clipPath id="clip0_4088_901">
<rect width="71.9864" height="18.6369" fill="white"/>
</clipPath>
<clipPath id="clip1_4088_901">
<rect width="36.67" height="18.6369" fill="white"/>
</clipPath>
</defs>
</svg>
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/_static/wordmark-api-dark.svg | <svg width="72" height="19" viewBox="0 0 72 19" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_4088_916)">
<g clip-path="url(#clip1_4088_916)">
<path d="M21.5279 14.662C21.4257 14.832 21.1186 14.8432 20.8569 14.6859C20.7228 14.6054 20.6191 14.4943 20.5655 14.3735C20.5163 14.2639 20.5155 14.1603 20.5625 14.082C20.6162 13.9926 20.7272 13.9471 20.8584 13.9471C20.9762 13.9471 21.1104 13.9836 21.2342 14.0582C21.4958 14.2155 21.6308 14.4921 21.5286 14.662H21.5279ZM36.67 9.6815C36.67 14.8193 32.4894 19 27.3515 19H9.31846C4.18063 19 0 14.8201 0 9.6815C0 4.54292 4.18063 0.363037 9.31846 0.363037H27.3515C32.4901 0.363037 36.67 4.54367 36.67 9.6815ZM17.9585 14.3549C18.1054 14.1767 17.427 13.675 17.2884 13.4909C17.0066 13.1852 17.0051 12.7454 16.815 12.3883C16.3498 11.3104 15.8153 10.2406 15.0676 9.32814C14.2774 8.32995 13.3023 7.50396 12.4457 6.56689C11.8098 5.91311 11.6399 4.98201 11.0785 4.27903C10.3047 3.13621 7.85807 2.8246 7.4995 4.43856C7.50099 4.48925 7.48533 4.52131 7.44135 4.55336C7.24305 4.69724 7.06637 4.86199 6.91802 5.06103C6.55498 5.56646 6.49907 6.42376 6.95232 6.87776C6.96723 6.63846 6.97543 6.41258 7.16478 6.24112C7.51515 6.54229 8.04444 6.64815 8.45072 6.42376C9.34828 7.70524 9.12464 9.47798 9.83731 10.8586C10.0341 11.1851 10.2324 11.5184 10.4851 11.8046C10.6901 12.1237 11.3983 12.5001 11.4401 12.7954C11.4475 13.3023 11.3879 13.8562 11.7204 14.2803C11.8769 14.5979 11.4923 14.917 11.1822 14.8775C10.7796 14.9326 10.2883 14.6069 9.93571 14.8074C9.81122 14.9423 9.56745 14.7932 9.4601 14.9803C9.42283 15.0773 9.22155 15.2137 9.34157 15.3069C9.47501 15.2055 9.59876 15.0996 9.77842 15.16C9.75158 15.3061 9.86713 15.327 9.95882 15.3695C9.95584 15.4686 9.89769 15.57 9.97373 15.6543C10.0624 15.5648 10.1154 15.4381 10.2563 15.4008C10.7244 16.0248 11.2008 14.7694 12.2139 15.3344C12.0081 15.3248 11.8255 15.3501 11.6868 15.5193C11.6525 15.5573 11.6235 15.6021 11.6839 15.6513C12.2303 15.2987 12.2273 15.772 12.5822 15.6267C12.855 15.4843 13.1264 15.3061 13.4506 15.3568C13.1353 15.4478 13.1226 15.7012 12.9377 15.9152C12.9064 15.948 12.8915 15.9852 12.9281 16.0397C13.5826 15.9845 13.6363 15.7668 14.1648 15.4999C14.5592 15.2592 14.952 15.8429 15.2935 15.5104C15.3687 15.4381 15.4716 15.4627 15.5648 15.453C15.4455 14.8171 14.1342 15.5693 14.1551 14.7164C14.5771 14.4294 14.4801 13.88 14.5085 13.4365C14.9938 13.7056 15.5335 13.8621 16.0091 14.1193C16.2492 14.507 16.6256 15.0191 17.1273 14.9856C17.1407 14.9468 17.1527 14.9125 17.1668 14.873C17.3189 14.8991 17.5142 14.9997 17.5977 14.8074C17.8251 15.0452 18.1591 15.0333 18.4565 14.9721C18.6764 14.7932 18.0428 14.5383 17.9578 14.3541L17.9585 14.3549ZM24.761 11.3499L23.7636 10.0035C22.8928 10.9995 22.3114 11.4848 22.3009 11.4937C22.2957 11.499 21.7403 12.0394 21.2342 12.5031C20.7377 12.9579 20.3455 13.3179 20.1458 13.7115C20.0906 13.8204 19.9706 14.2214 20.1391 14.6225C20.2688 14.9326 20.5334 15.1533 20.9263 15.2785C21.0441 15.3158 21.1566 15.3322 21.264 15.3322C21.9714 15.3322 22.4359 14.6225 22.4388 14.6166C22.4448 14.6084 23.0464 13.7488 23.7792 12.6634C24.023 12.3026 24.3018 11.9209 24.761 11.3491V11.3499ZM29.4508 14.4503C29.4508 14.252 29.3785 14.0604 29.2473 13.9121L29.1235 13.7719C28.3758 12.9243 26.4562 10.7483 25.7093 9.9029C24.7714 8.84134 23.7226 7.4883 23.6353 7.37499L23.5094 7.11407V6.65486C23.5094 6.48638 23.4758 6.32163 23.4109 6.16657L23.1441 5.53292C23.1403 5.52397 23.1388 5.51354 23.1403 5.50384L23.1508 5.41588C23.1523 5.40171 23.159 5.38904 23.1702 5.37935C23.3781 5.19596 24.152 4.60107 25.3499 4.65102C25.5065 4.65773 25.5341 4.572 25.5385 4.53547C25.5602 4.35954 25.1583 4.1523 24.7841 4.07551C24.2697 3.9704 22.9025 3.69159 21.8089 4.40948L21.8007 4.41545C21.094 5.00587 20.526 5.45688 20.52 5.46135L20.5073 5.47403C20.4991 5.48372 20.2993 5.72301 20.3478 6.02791C20.3791 6.22621 20.2762 6.29703 20.2703 6.30076C20.2643 6.30449 20.1241 6.39245 19.9795 6.2933C19.8043 6.1621 19.5002 6.38723 19.4383 6.43644L18.9791 6.83154L18.9701 6.84048C18.9619 6.85018 18.7651 7.08053 19.0283 7.45028C19.2557 7.77009 19.3362 7.8767 19.5337 8.1227C19.7343 8.37169 20.0958 8.68703 20.1152 8.70343C20.1241 8.71089 20.3448 8.87787 20.649 8.64528C20.8987 8.4537 21.0992 8.28075 21.0992 8.28075C21.1156 8.26733 21.2603 8.14805 21.267 7.97212C21.2692 7.92143 21.267 7.87744 21.267 7.83719C21.2632 7.71344 21.2625 7.67691 21.3557 7.61727C21.4004 7.61727 21.5376 7.66722 21.6554 7.72686C21.668 7.73431 21.9625 7.89981 22.2301 7.88788C22.3986 7.91024 22.585 8.10183 22.6491 8.18011C22.655 8.18607 23.2246 8.78394 24.0267 9.83283C24.1795 10.0319 24.7409 10.7796 24.8944 10.9883C25.1509 11.3372 25.5393 11.865 25.9568 12.4338C26.6799 13.4186 27.4909 14.5226 27.8592 15.0191C27.9815 15.1839 28.1596 15.2957 28.3609 15.3344L28.4973 15.3605C28.5495 15.3702 28.6017 15.3754 28.6539 15.3754C28.8954 15.3754 29.1243 15.2696 29.2756 15.0817L29.2838 15.0713C29.3927 14.9334 29.4523 14.7567 29.4523 14.5748V14.4503H29.4508ZM30.0927 5.60821L29.9339 5.44942C29.8877 5.40321 29.8228 5.37562 29.7572 5.38084C29.6916 5.38382 29.6297 5.41364 29.5872 5.46433L28.5883 6.64591C28.56 6.67946 28.5197 6.70033 28.4765 6.70481L28.1209 6.74059C28.0754 6.74506 28.0299 6.7309 27.9949 6.70183L27.4268 6.22099C27.3903 6.18968 27.3687 6.14495 27.3672 6.09724L27.359 5.81247C27.3575 5.77147 27.3717 5.73121 27.3978 5.6999L28.3691 4.52951C28.4422 4.44154 28.4414 4.31406 28.3676 4.2261L28.27 4.11055C28.2059 4.03526 28.1 4.00767 28.0076 4.04271C27.778 4.12993 27.1995 4.35581 26.7813 4.58244C26.1901 4.9015 25.7793 5.40768 25.7085 5.90342C25.6563 6.26647 25.6779 6.86508 25.6958 7.18937C25.7025 7.31684 25.6742 7.44432 25.6123 7.55838C25.5363 7.70002 25.4021 7.92664 25.1979 8.19949C25.0935 8.34337 25.0279 8.39629 24.9377 8.5029L26.0365 9.79481C26.3012 9.48618 26.533 9.2521 26.735 9.02622C27.104 8.61546 27.2188 8.61174 27.526 8.6013C27.7153 8.59459 27.9748 8.58639 28.3855 8.48351C29.5067 8.20322 29.8616 6.98958 29.8765 6.93591L30.1553 5.83334C30.1754 5.75358 30.1516 5.6671 30.0934 5.60896L30.0927 5.60821ZM12.8513 13.3045C12.7305 13.7757 12.691 14.5785 12.0782 14.6016C12.0275 14.8737 12.2668 14.9759 12.4838 14.8887C12.6992 14.7895 12.8013 14.9669 12.8736 15.1436C13.2061 15.1921 13.6981 15.0325 13.7168 14.6389C13.2203 14.3527 13.0667 13.8085 12.8505 13.3045H12.8513Z" fill="#F4F3FF"/>
</g>
<path d="M44.4294 15.6116L48.0614 4.09157H50.8854L54.5174 15.6116H52.5334L49.2374 5.27557H49.6694L46.4134 15.6116H44.4294ZM46.4534 13.1156V11.3156H52.5014V13.1156H46.4534ZM55.9638 15.6116V4.09157H60.7238C60.8358 4.09157 60.9798 4.09691 61.1558 4.10757C61.3318 4.11291 61.4945 4.12891 61.6438 4.15557C62.3105 4.2569 62.8598 4.47824 63.2918 4.81957C63.7291 5.1609 64.0518 5.59291 64.2598 6.11557C64.4731 6.63291 64.5798 7.20891 64.5798 7.84357C64.5798 8.47291 64.4731 9.04891 64.2598 9.57157C64.0465 10.0889 63.7211 10.5182 63.2838 10.8596C62.8518 11.2009 62.3051 11.4222 61.6438 11.5236C61.4945 11.5449 61.3291 11.5609 61.1478 11.5716C60.9718 11.5822 60.8305 11.5876 60.7238 11.5876H57.8918V15.6116H55.9638ZM57.8918 9.78757H60.6438C60.7505 9.78757 60.8705 9.78224 61.0038 9.77157C61.1371 9.76091 61.2598 9.73957 61.3718 9.70757C61.6918 9.62757 61.9425 9.48624 62.1238 9.28357C62.3105 9.08091 62.4411 8.85157 62.5158 8.59557C62.5958 8.33957 62.6358 8.08891 62.6358 7.84357C62.6358 7.59824 62.5958 7.34757 62.5158 7.09157C62.4411 6.83024 62.3105 6.59824 62.1238 6.39557C61.9425 6.19291 61.6918 6.05157 61.3718 5.97157C61.2598 5.93957 61.1371 5.92091 61.0038 5.91557C60.8705 5.90491 60.7505 5.89957 60.6438 5.89957H57.8918V9.78757ZM66.3426 15.6116V4.09157H68.2706V15.6116H66.3426Z" fill="#F4F3FF"/>
</g>
<defs>
<clipPath id="clip0_4088_916">
<rect width="71.9864" height="18.6369" fill="white" transform="translate(0 0.363037)"/>
</clipPath>
<clipPath id="clip1_4088_916">
<rect width="36.67" height="18.6369" fill="white" transform="translate(0 0.363037)"/>
</clipPath>
</defs>
</svg>
|
0 | lc_public_repos/langsmith-sdk/python/docs/_static | lc_public_repos/langsmith-sdk/python/docs/_static/css/custom.css | @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;700&display=swap');
/*******************************************************************************
* master color map. Only the colors that actually differ between light and dark
* themes are specified separately.
*
* To see the full list of colors see https://www.figma.com/file/rUrrHGhUBBIAAjQ82x6pz9/PyData-Design-system---proposal-for-implementation-(2)?node-id=1234%3A765&t=ifcFT1JtnrSshGfi-1
*/
/**
* Function to get items from nested maps
*/
/* Assign base colors for the PyData theme */
:root {
--pst-teal-50: #f4fbfc;
--pst-teal-100: #e9f6f8;
--pst-teal-200: #d0ecf1;
--pst-teal-300: #abdde6;
--pst-teal-400: #3fb1c5;
--pst-teal-500: #0a7d91;
--pst-teal-600: #085d6c;
--pst-teal-700: #064752;
--pst-teal-800: #042c33;
--pst-teal-900: #021b1f;
--pst-violet-50: #f4eefb;
--pst-violet-100: #e0c7ff;
--pst-violet-200: #d5b4fd;
--pst-violet-300: #b780ff;
--pst-violet-400: #9c5ffd;
--pst-violet-500: #8045e5;
--pst-violet-600: #6432bd;
--pst-violet-700: #4b258f;
--pst-violet-800: #341a61;
--pst-violet-900: #1e0e39;
--pst-gray-50: #f9f9fa;
--pst-gray-100: #f3f4f5;
--pst-gray-200: #e5e7ea;
--pst-gray-300: #d1d5da;
--pst-gray-400: #9ca4af;
--pst-gray-500: #677384;
--pst-gray-600: #48566b;
--pst-gray-700: #29313d;
--pst-gray-800: #222832;
--pst-gray-900: #14181e;
--pst-pink-50: #fcf8fd;
--pst-pink-100: #fcf0fa;
--pst-pink-200: #f8dff5;
--pst-pink-300: #f3c7ee;
--pst-pink-400: #e47fd7;
--pst-pink-500: #c132af;
--pst-pink-600: #912583;
--pst-pink-700: #6e1c64;
--pst-pink-800: #46123f;
--pst-pink-900: #2b0b27;
--pst-foundation-white: #ffffff;
--pst-foundation-black: #14181e;
--pst-green-10: #f1fdfd;
--pst-green-50: #E0F7F6;
--pst-green-100: #B3E8E6;
--pst-green-200: #80D6D3;
--pst-green-300: #4DC4C0;
--pst-green-400: #4FB2AD;
--pst-green-500: #287977;
--pst-green-600: #246161;
--pst-green-700: #204F4F;
--pst-green-800: #1C3C3C;
--pst-green-900: #0D2427;
--pst-lilac-50: #f4eefb;
--pst-lilac-100: #DAD6FE;
--pst-lilac-200: #BCB2FD;
--pst-lilac-300: #9F8BFA;
--pst-lilac-400: #7F5CF6;
--pst-lilac-500: #6F3AED;
--pst-lilac-600: #6028D9;
--pst-lilac-700: #5021B6;
--pst-lilac-800: #431D95;
--pst-lilac-900: #1e0e39;
--pst-header-height: 2.5rem;
}
html {
--pst-font-family-base: 'Inter';
--pst-font-family-heading: 'Inter Tight', sans-serif;
}
/*******************************************************************************
* write the color rules for each theme (light/dark)
*/
/* NOTE:
* Mixins enable us to reuse the same definitions for the different modes
* https://sass-lang.com/documentation/at-rules/mixin
* something inserts a variable into a CSS selector or property name
* https://sass-lang.com/documentation/interpolation
*/
/* Defaults to light mode if data-theme is not set */
html:not([data-theme]) {
--pst-color-primary: #287977;
--pst-color-primary-bg: #80D6D3;
--pst-color-secondary: #6F3AED;
--pst-color-secondary-bg: #DAD6FE;
--pst-color-accent: #c132af;
--pst-color-accent-bg: #f8dff5;
--pst-color-info: #276be9;
--pst-color-info-bg: #dce7fc;
--pst-color-warning: #f66a0a;
--pst-color-warning-bg: #f8e3d0;
--pst-color-success: #00843f;
--pst-color-success-bg: #d6ece1;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #d72d47;
--pst-color-danger-bg: #f9e1e4;
--pst-color-text-base: #222832;
--pst-color-text-muted: #48566b;
--pst-color-heading-color: #ffffff;
--pst-color-shadow: rgba(0, 0, 0, 0.1);
--pst-color-border: #d1d5da;
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
--pst-color-inline-code: #912583;
--pst-color-inline-code-links: #246161;
--pst-color-target: #f3cf95;
--pst-color-background: #ffffff;
--pst-color-on-background: #F4F9F8;
--pst-color-surface: #F4F9F8;
--pst-color-on-surface: #222832;
}
html:not([data-theme]) {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html:not([data-theme]) .only-dark,
html:not([data-theme]) .only-dark ~ figcaption {
display: none !important;
}
/* NOTE: @each {...} is like a for-loop
* https://sass-lang.com/documentation/at-rules/control/each
*/
html[data-theme=light] {
--pst-color-primary: #287977;
--pst-color-primary-bg: #80D6D3;
--pst-color-secondary: #6F3AED;
--pst-color-secondary-bg: #DAD6FE;
--pst-color-accent: #c132af;
--pst-color-accent-bg: #f8dff5;
--pst-color-info: #276be9;
--pst-color-info-bg: #dce7fc;
--pst-color-warning: #f66a0a;
--pst-color-warning-bg: #f8e3d0;
--pst-color-success: #00843f;
--pst-color-success-bg: #d6ece1;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #d72d47;
--pst-color-danger-bg: #f9e1e4;
--pst-color-text-base: #222832;
--pst-color-text-muted: #48566b;
--pst-color-heading-color: #ffffff;
--pst-color-shadow: rgba(0, 0, 0, 0.1);
--pst-color-border: #d1d5da;
--pst-color-border-muted: rgba(23, 23, 26, 0.2);
--pst-color-inline-code: #912583;
--pst-color-inline-code-links: #246161;
--pst-color-target: #f3cf95;
--pst-color-background: #ffffff;
--pst-color-on-background: #F4F9F8;
--pst-color-surface: #F4F9F8;
--pst-color-on-surface: #222832;
color-scheme: light;
}
html[data-theme=light] {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html[data-theme=light] .only-dark,
html[data-theme=light] .only-dark ~ figcaption {
display: none !important;
}
html[data-theme=dark] {
--pst-color-primary: #4FB2AD;
--pst-color-primary-bg: #1C3C3C;
--pst-color-secondary: #7F5CF6;
--pst-color-secondary-bg: #431D95;
--pst-color-accent: #e47fd7;
--pst-color-accent-bg: #46123f;
--pst-color-info: #79a3f2;
--pst-color-info-bg: #06245d;
--pst-color-warning: #ff9245;
--pst-color-warning-bg: #652a02;
--pst-color-success: #5fb488;
--pst-color-success-bg: #002f17;
--pst-color-attention: var(--pst-color-warning);
--pst-color-attention-bg: var(--pst-color-warning-bg);
--pst-color-danger: #e78894;
--pst-color-danger-bg: #4e111b;
--pst-color-text-base: #ced6dd;
--pst-color-text-muted: #9ca4af;
--pst-color-heading-color: #14181e;
--pst-color-shadow: rgba(0, 0, 0, 0.2);
--pst-color-border: #48566b;
--pst-color-border-muted: #29313d;
--pst-color-inline-code: #f3c7ee;
--pst-color-inline-code-links: #4FB2AD;
--pst-color-target: #675c04;
--pst-color-background: #14181e;
--pst-color-on-background: #222832;
--pst-color-surface: #29313d;
--pst-color-on-surface: #f3f4f5;
/* Adjust images in dark mode (unless they have class .only-dark or
* .dark-light, in which case assume they're already optimized for dark
* mode).
*/
/* Give images a light background in dark mode in case they have
* transparency and black text (unless they have class .only-dark or .dark-light, in
* which case assume they're already optimized for dark mode).
*/
color-scheme: dark;
}
html[data-theme=dark] {
--pst-color-link: var(--pst-color-primary);
--pst-color-link-hover: var(--pst-color-secondary);
}
html[data-theme=dark] .only-light,
html[data-theme=dark] .only-light ~ figcaption {
display: none !important;
}
html[data-theme=dark] img:not(.only-dark):not(.dark-light) {
filter: brightness(0.8) contrast(1.2);
}
html[data-theme=dark] .bd-content img:not(.only-dark):not(.dark-light) {
background: rgb(255, 255, 255);
border-radius: 0.25rem;
}
html[data-theme=dark] .MathJax_SVG * {
fill: var(--pst-color-text-base);
}
.pst-color-primary {
color: var(--pst-color-primary);
}
.pst-color-secondary {
color: var(--pst-color-secondary);
}
.pst-color-accent {
color: var(--pst-color-accent);
}
.pst-color-info {
color: var(--pst-color-info);
}
.pst-color-warning {
color: var(--pst-color-warning);
}
.pst-color-success {
color: var(--pst-color-success);
}
.pst-color-attention {
color: var(--pst-color-attention);
}
.pst-color-danger {
color: var(--pst-color-danger);
}
.pst-color-text-base {
color: var(--pst-color-text-base);
}
.pst-color-text-muted {
color: var(--pst-color-text-muted);
}
.pst-color-heading-color {
color: var(--pst-color-heading-color);
}
.pst-color-shadow {
color: var(--pst-color-shadow);
}
.pst-color-border {
color: var(--pst-color-border);
}
.pst-color-border-muted {
color: var(--pst-color-border-muted);
}
.pst-color-inline-code {
color: var(--pst-color-inline-code);
}
.pst-color-inline-code-links {
color: var(--pst-color-inline-code-links);
}
.pst-color-target {
color: var(--pst-color-target);
}
.pst-color-background {
color: var(--pst-color-background);
}
.pst-color-on-background {
color: var(--pst-color-on-background);
}
.pst-color-surface {
color: var(--pst-color-surface);
}
.pst-color-on-surface {
color: var(--pst-color-on-surface);
}
/* Adjust the height of the navbar */
.bd-header .bd-header__inner{
height: 52px; /* Adjust this value as needed */
}
.navbar-nav > li > a {
line-height: 52px; /* Vertically center the navbar links */
}
/* Make sure the navbar items align properly */
.navbar-nav {
display: flex;
}
.bd-header .navbar-header-items__start{
margin-left: 0rem
}
.bd-header button.primary-toggle {
margin-right: 0rem;
}
.bd-header ul.navbar-nav .dropdown .dropdown-menu {
overflow-y: auto; /* Enable vertical scrolling */
max-height: 80vh
}
.bd-sidebar-primary {
width: 22%; /* Adjust this value to your preference */
line-height: 1.4;
}
.bd-sidebar-secondary {
line-height: 1.4;
}
.toc-entry a.nav-link, .toc-entry a>code {
background-color: transparent;
border-color: transparent;
}
.bd-sidebar-primary code{
background-color: transparent;
border-color: transparent;
}
.toctree-wrapper li[class^=toctree-l1]>a {
font-size: 1.3em
}
.toctree-wrapper li[class^=toctree-l1] {
margin-bottom: 2em;
}
.toctree-wrapper li[class^=toctree-l]>ul {
margin-top: 0.5em;
font-size: 0.9em;
}
*, :after, :before {
font-style: normal;
}
div.deprecated {
margin-top: 0.5em;
margin-bottom: 2em;
}
.admonition-beta.admonition, div.admonition-beta.admonition {
border-color: var(--pst-color-warning);
margin-top:0.5em;
margin-bottom: 2em;
}
.admonition-beta>.admonition-title, div.admonition-beta>.admonition-title {
background-color: var(--pst-color-warning-bg);
}
dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd {
margin-left: 1rem;
}
p {
font-size: 0.9rem;
margin-bottom: 0.5rem;
} |
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/enum.rst | {{ objname }}
{{ underline }}==============
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% block attributes %}
{% for item in attributes %}
.. autoattribute:: {{ item }}
{% endfor %}
{% endblock %}
.. example_links:: {{ objname }}
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/typeddict.rst | {{ objname }}
{{ underline }}==============
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% block attributes %}
{% for item in attributes %}
.. autoattribute:: {{ item }}
{% endfor %}
{% endblock %}
.. example_links:: {{ objname }}
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/langsmith_docs.html | <!-- This will display a link to :LangSmith docs -->
<head>
<style>
.text-link {
text-decoration: none; /* Remove underline */
color: inherit; /* Inherit color from parent element */
}
</style>
</head>
<body>
<a href="https://docs.smith.langchain.com//" class='text-link'>Docs</a>
</body>
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/redirects.html | {% set redirect = pathto(redirects[pagename]) %}
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="Refresh" content="0; url={{ redirect }}" />
<meta name="robots" content="follow, index">
<meta name="Description" content="Python SDK reference for LangSith.">
<link rel="canonical" href="{{ redirect }}" />
<title>LangSmith Python SDK Reference Documentation.</title>
</head>
<body>
<p>You will be automatically redirected to the <a href="{{ redirect }}">new location of this page</a>.</p>
</body>
</html>
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/pydantic.rst | {{ objname }}
{{ underline }}==============
.. currentmodule:: {{ module }}
.. autopydantic_model:: {{ objname }}
:model-show-json: False
:model-show-config-summary: False
:model-show-validator-members: False
:model-show-field-summary: False
:field-signature-prefix: param
:members:
:undoc-members:
:inherited-members:
:member-order: groupwise
:show-inheritance: True
:special-members: __call__
:exclude-members: construct, copy, dict, from_orm, parse_file, parse_obj, parse_raw, schema, schema_json, update_forward_refs, validate, json, is_lc_serializable, to_json, to_json_not_implemented, lc_secrets, lc_attributes, lc_id, get_lc_namespace, model_construct, model_copy, model_dump, model_dump_json, model_parametrized_name, model_post_init, model_rebuild, model_validate, model_validate_json, model_validate_strings, model_extra, model_fields_set, model_json_schema
{% block attributes %}
{% endblock %}
.. example_links:: {{ objname }}
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/function.rst | {{ objname }}
{{ underline }}==============
.. currentmodule:: {{ module }}
.. autofunction:: {{ objname }}
.. example_links:: {{ objname }} |
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/COPYRIGHT.txt | Copyright (c) 2007-2023 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/sidebar-nav-bs.html | {# Displays the TOC-subtree for pages nested under the currently active top-level TOCtree element. #}
<nav class="bd-docs-nav bd-links"
aria-label="{{ _('Modules') }}">
<p class="bd-links__title" role="heading" aria-level="1">{{ _("Modules") }}</p>
<div class="bd-toc-item navbar-nav">
{{- generate_toctree_html(
"sidebar",
show_nav_level=theme_show_nav_level | int,
maxdepth=theme_navigation_depth | int,
collapse=theme_collapse_navigation | tobool,
includehidden=theme_sidebar_includehidden | tobool,
titles_only=True
)
-}}
</div>
</nav> |
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/templates/class.rst | {{ objname }}
{{ underline }}==============
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% block attributes %}
{% if attributes %}
.. rubric:: {{ _('Attributes') }}
.. autosummary::
{% for item in attributes %}
~{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
{% block methods %}
{% if methods %}
.. rubric:: {{ _('Methods') }}
.. autosummary::
{% for item in methods %}
~{{ item }}
{%- endfor %}
{% for item in methods %}
.. automethod:: {{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
.. example_links:: {{ objname }}
|
0 | lc_public_repos/langsmith-sdk/python/docs | lc_public_repos/langsmith-sdk/python/docs/scripts/custom_formatter.py | import sys
from glob import glob
from pathlib import Path
from bs4 import BeautifulSoup
CUR_DIR = Path(__file__).parents[1]
def process_toc_h3_elements(html_content: str) -> str:
"""Update Class.method() TOC headers to just method()."""
# Create a BeautifulSoup object
soup = BeautifulSoup(html_content, "html.parser")
# Find all <li> elements with class "toc-h3"
toc_h3_elements = soup.find_all("li", class_="toc-h3")
# Process each element
for element in toc_h3_elements:
if element.a.code:
element = element.a.code.span
# Get the text content of the element
content = element.get_text()
# Apply the regex substitution
modified_content = content.split(".")[-1]
# Update the element's content
element.string = modified_content
# Return the modified HTML
return str(soup)
if __name__ == "__main__":
dir = sys.argv[1]
for fn in glob(str(f"{dir.rstrip('/')}/**/*.html"), recursive=True):
with open(fn) as f:
html = f.read()
processed_html = process_toc_h3_elements(html)
with open(fn, "w") as f:
f.write(processed_html)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/cassettes/6127babf-9e14-49e2-934a-e966feb37220.yaml | interactions:
- request:
body: '{"input": [[9642], [9642]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC6H8AAAD//+zcW4+iMBQH8Pf5FBNf2QkISsu8uaCOXAQZ
Rdlks/ECXhCqWAo2me++KZrNPu/j5rySEJIT0vb8zy9Vq57od8T8wkFG/zLz+6nKrEaJJxjf75Ed
fG/nX1LpZbhXVVpFVTO1yjyOYux+n5a+rJdxWTtktX7OH1HXsh77n1hvq1CsNzex3iBZSqaBp+CC
M67ePaPwatmXD9YMNWH4eTSUuEtYEKX1vDnH10+kj6YrZuFmjwjfXBM8iYjpL8X8ppKUkYnP+WzP
PPmczemU1xle7HbDCkllgvjYKcZ6m29kClbmdbg+xPhMHYuNmnS4qS9hKeEfA3vAhpOjbtViPzBE
/8b8Wd4gWkTOApFxwtt8mNMbYnWbX/iNp9XPvDrWzBPzqPNxYrG3eean/mevxxnpr7y2/6qknPvo
xgy1xsoyZxX6uPi8tsnFwTWRjfa8u2m2Vxvj7dSWmMmdlUs0Jpv6ojfQ2Dg6Xtz6rk0IEv1Z4E6p
umlUM54YxT4NqpWjFS5P2HGL2vPgUsWhpYn9rB/K+TgYy5qCyvCUHFHAolX7v5Y8G6QTvQiGi8rI
k6S84So74rZe9yApeH1YEgfbQ1oEH7PVYN4tfWfbb/vB4dpY8iaU7RyZqiQxlyect/NBpEbRjQ2L
sVc2on4YzZYHZl9Dz9Vce5HjqTQf+T3XSDZ17B2u+OqN5mJ+vkNUivUJnqeBWenHbXoS9a3w2cxD
9kHU7NSU9+yZHzoiL32832lVwNe3fxAFXRAFIApAFIAoAFEAogBEAYgCEAUgCkAUgCgAUQCiAEQB
iAIQBSAKQBSAKABRAKIARAGIAhAFIApAFIAoAFEAogBEwX8uCl5eX3+2dxbkZJecBQygSUPf/lCB
t/Vu/aYo6uNig+q23ied96dA6FxKkl/oL0qypLh13l/VBzXoUELX578ev4gPfb38BgAA//8DAIpg
UmUwQQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c06e916429-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=6t3x1PWP1isudSReizi41egXSRGS2KStJWhVmtVyJDk-1712872371-1.0.1.1-q9QdIvltEU8IPe70V1Q3wm.xXQ3KCRwrzhG0QP8IkFE0Ao13hyOIkIpLfhrMJvdft0.Y.B75sSa51amQt_mm9g;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=6m97vMafHqngwFlTL4inl.QL0il9RySGGdjNivGJGN8-1712872371351-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '26'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9998'
x-ratelimit-remaining-tokens:
- '9999998'
x-ratelimit-reset-requests:
- 7ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f45dabf40222c4a01a3481700d0d9ba4
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [9642]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkkF
Ye+1vrXzv//jz59/3nldXIZ//vXnn0fVD//85/ezazZk//zrz3/9x58/f/789+/1/60snnlxvVav
8rf8d7B6XYvpn3/9Ef/3k/9b9K8//yg9KPSS39NuHOTEhaVc62zbhKtuGuRzA5Gn+Mx/tFM3X0pu
w3m7bJl9NR/JaEilrDHvYODJu6zRaHFdgD1jGp7NENfiObzJCOZ4JngR82BSb7OpRTG2iXHY3oKp
XGYCJOnhxpy1qXH6OuijdiLOwJzV/ZlwXVFT9SOkb2JKqReMR7E+wmPj36jyUI/1VB0vonq4LhZU
8M5P1O+TT4Vs9fjAsznVfGz0RwMLHYd4DIQdmo+O4oMdWz2WUo3zab33YhQsrj1bl9vO4hJT9yA/
njkh86G12C6LBXVky5Lp/bO3+rRQS+SKs4VXd9tOxsEoK5gV3BP9prz4OEakR4IBPrmSgQfjyRFj
EJYPjSrHo2RRoTMu4L+1LVUPlcnF4lk34NnsRWG9MTl7jckbxuH5opq82XbTuXhhQOdCYwZ+fPJ3
t3YvKHZyjLVsWCfcU+snKqhkUWWDztacHlobrjTqSZg6NBhxMBaytzGBoll91KPZ4Ag9zueUWLuy
tajbiSEYFUuZob3LZJzCqULq8T3gqcvr7/2Y98BvOKJ8fdrn40Y4C7B9uB8WkMU16SSmHoGpDBGz
uibBZDYhheGVjWS7to81v59fqrY1LhmdNqdTPi7CNESVBCE+HI+XfAq8e4EeG/dGru3uwcfXIZDh
cV086TgE92Qq9jsX5G1yZ+ZihQJqDg6F7/PAq11p8ZUjH10A39wT55VG9birGwHM7c4nnrLsu8lX
aQ+fZRwye2GYnbT25xR1ouWRrYnvwZh5BgU3MDjtjnbH6eExlFB+zBnv8pVZD8Jd61Up4gVW5ve9
4+XSsaErHJ0qSanXfHkCG2kdPIhVzVY+euqso8IVb2TNpy5/rw1F+Hs+HHQDGh1fq0AInCfB1l5O
5mZ1sJGpRAkWXHHqBlmQXXjdLJmEQN1glXwCGU4hT7FcsYjzcnMz0XpwlwRf5S6Y3yb1YadFE3O7
VqznzdIrodhmAzEvQR+M5pA06HGnMdl22SefYmElwgObJjNLliVjqkY9RFXwJmGoWMH0aLm/0GVs
MFLeqprrr0+FZm1WSYhNp17u6jpFwt0OiEONIZ9WQU7RUo9q5mVDV1O28XRFb65HpvuREnTP9Jqh
dO+tGJkPfjAtRjlT4yQ64r2/dIPpIboXeBkHzNzl3crHQc5dyPLYw7NhBdYoaIcC4OMe8OMw42Bq
VscW5oS4BGtNlvSGVKogrVOXpBWLEG17LUZ8lkuSUtmpx2+/wqYDjOF5b9GwW0KEzsvowchwXuSM
nZ0GwtP8JMbzlibjLdNt7cx2Ol6GqzZnjX5yQTHPG+bNVpjwTGr36FtfNE9mu14J97lRhbsTMF3U
7vW8be8UueR9Zf51bwVLQctVeFy1J/HOzdviujJnmi3InG1PtyQZOxzqSDojFf/6vZX7RPirr1vJ
vAVvMmYh5GteEudmVsFYCh8Jkl2ZEnwS625+m08fSK08WaAdqm4A3wih2ysuI3gn5PO33tXtWC/o
AdVBx+GRVbAy9kuyX/d2PR+MYIad6B6wMlt9Mtf0XKEd0Rlz6ueYtCJWHZQJQkrz7SAl/aY3bDhf
zQ1WLNAt6VNPPZy3q5YFQifwMSm3OihbUWYOfZo1u23eM7TQtlR7HPKAI9sR0G99GCq1NVa3rocM
OouW/TMMOFzWGFpeYozYRshZQx0MvTGIzA00IeHsjBt14e3f7IzqRz5K5q6Hs3lW8KwZD8Tsea5Q
Hh2SQVbKS0IzVxSh5RUm/oVt+cPbeqm63x/fv9/L6Wf3ltDY5AyLpyoMWJMvj5Cpmc8CpTZzSbiL
PvKnaMPwPTmgyZSuT2Sc9ibm3fCyhvgTjaDc7y4evc2h+9tvDVUNtr0fzeBbTw6QezsQsj+GfDyr
jgDb1bli5jbUERW07IjuRhLhVmwt1GdDXfzq8Xecr+62LmkT3VQUkqC1Jml/N5FVTy1xrvtPMqof
XYW37RXMiNZm12ee18MgbFfE14tPMu+Wc69+alEip836ydnjXvXwcRaUmIz1SXf5aLbaFbZO1v6j
tuZ5kDFantMFvj9vck7RtHiiXVQ4hCjV0uqLZ9fA936QQ0CdejqdrSeUN/VANqxXEFtoRFe9tIh/
fpX0z7ST4P6pB7Iu5zLhJKhMkKKpYG5+L1C/5bMLT1xpv+fH55PWjeq4eVmEDIJYD++xmFE2bWMq
dq2K5sajOsrXU4mHwzbM53TSI02AW0lc2detlRtdChQ1hUZ/9cOJue/RRVrNeM1DyZq78jyi7/Xg
FZwFi6rH2gVNIAWxpXYIhsG4hoix44N4m1PLR0G7Fuh9158kmCbT4uh4E+HeaDc8B/2mHozc8NH7
8RSx6kZex33rhWHl4pSYS/KoaVx6KshOkWGBe9eE8sVqD86mf5ONR616ZiLdg/ZKT8QU2zGf5zg2
odTnA/HKrY0Gsd2JwJ7YI+H1uQn4nk7xT//omHZSMGyqGSPdci6E4F2RDyqKMCSzEjP9pmw4k9h8
RE6XrRg2xzaYJzXvYb5kOiHTuEfzYSVlv35m4aulSc/z6oLa2JGJnYwd7xt9aEDpBYWFK4zRjw9A
D0OZbNxHaY2FDAXY99Aj2Az2fPjUPEKH9xOT7/0O2OPe9miTbmvivnmaT+PNlWAn+gfmLh5tN3PP
pkDMqic/vuRcmjHqMKqx5mwzq7t8lrZa+ZrFiiU3LH5IEhua/Y4z3zBPHZUUI1R/eqxymqJ5Oiig
3QbrQ7zT7VyP6seVYWsUGZXJuO7mIjBjeJ4+GVWaUOP9tRQwXCK5xQLkEqfNw8Yg14lDbD3sOrZd
wxvG5swwfXKKfv6resHLZ+v1ug3465XaUAv7lgRq/+DzUXyHINhbi5gCXdXjxy1TkEkjEVcEGvBr
yWYkecKRhOxC62FtjClU2dpmDlzaZJauta5eLvsci1QO+Vjs4x510V4lW7k3OdeLfQiFIQDTV+Rd
j5kkz7/3dMGLoWZ86GW1vqA9FV+dwUfTnSTYFe6I0WdxR315Fo7q67WfmetWHHG5zzLQylJk+En0
ehr8qtJmJewx36M951uYTLgX7omEK24Hy87WfNjfxvOXD8xu5fgvG72dCyehEz3zcUiUDO5y75NA
Pp84l8wgguU5W5DtURWCnnsbEZQ064h5l6987mz+1KhTTVRd4jmgqWpJIEwGI3p4eNV89RxMtCv8
EavWYV2PyL6mYMdGTzbT9E4mfVSEXz2z9dev+/XeF6GzrjPW2t0DTaVwLNHLOGHi3SQW0NFZxGgd
7Sh9Thf52++HEFlg13g1vm3OPUsWIOmVAdcwlZxmQ3lEOkMb/Ig+fk1//boqC+lvf/DyrLxRaFsI
84t748OPh37+4rzSseZl1B9Re60+dIGWdcBXARZRfsISMwjtEa/1UgYk7T7MdJdBMsOqF9Rel3df
HtdzkXWDipjkDUz/pKr1/b4EePlp2I9vucjjCC2EyxZPDk7QTPaj+pcvCk9bWiyL9X/rt9W5Oz5d
T+z44w2qwqUJOJdUrKz1vU/FT8rRtKnUEDVvvSDmKXKtFR8aWRt5E375wwzGzVkOIdq8d+xoKZn1
zQMAj/3Sx7JxeKMeHm8BVPUIGCm1mayafLmHfk4COpjYsMbLSW2BvKuI6Ieoy+m5GFoII2RRZTUK
wXTh6wo2ZO2wTbE4IL7aX1z07R+GBWqjUa7zEr71QuzgfEz4gtwECAzSEZ1d3/m8Mm0RXAOd8HOR
POshFhYSiPtdhgVPOwRTQxMHVm6YEqtGTz5YpztGdftaYPTJ+no6aY8jhOIrJkE1ETT6R6cCzY2P
BHPvmnM0SabaTD5ipvbYB/PXH//yobUr/YAfHrUAWlmJZJ3HG2vw1LHQFH09/+XBYTqgEoyDOdDx
1b2tCZJwBHn0MeU2jhAvhTxTf/21HdWqm7ju91Ar8wXL+b3g7EiQD5hKF1yJjmnx4SLHkIztmZnR
K+h4YfAM7RpNprM5Amrtsg7Rx0sstj5sZd7n8TqGIMpc5rgRQ5zHTq+GkWLRWaCnmpv1SoJvPxAL
x3LweZK98OMFZtv13XrKy0+Evv+XwuLR5DNfLPaQ+wfKNt2g8/lsbS6QFvOauFpSdzyqyxjtYyNh
jqWo1lwdkxiUe+3SxVla8xETp0VIlRPif+txfrTKE4K9cMTTHbVff1MxhEmYEq+6dXwqIwXDN+8y
b34bnfhZ5OYvP1D+61e0vYqoxMWC6T9/DKiFf3mfuOrnxvtfftbPtk7HL6+PrBtkOFy1BTNqe1uP
TSxc4KfPxuudIy5d7xe4va85C/yVjr55ywUdZoI7WRiCsayDFkBa6mzjlFPdLZRRBNaOJ3a7Lgs+
eVPtwmyvN8Q9VQ/ERSx/5wVhT7562y239ytAQ2adOIw9g9/1oNXreMbi4IvW+A6kBrbZU/ryaJVQ
b/o8kSH3QIxTOQRMxHIJK6v1saiHXc1UO6JaIUwvYpTzs/scjO3xx+Mk13PNYuaOCgpvX0/8jJXw
m/eVCmR3SBiWTiTnkHgtfPmWmY9uHfRJudahOpQSMa+KjSSzsfyfX/78E81lbzvgpZcYL1Hd1dNi
HFMgtmgQ1yz6nMmRGML14UZ0rq7c4kq9tkHVPx5zzFHqKLpRCrQlOvnm126+nIYZ3ifVo69Z7fjI
PSKiVnYuZJ15dT1nLkjq8rRpsRQ9hXxKJ09SMv2DiLVHIp+fJMmU62V1ZJbNnwHb9Z349/qDYkG6
cf+YMzjXK0LCm0wtOoVNq6jw3m1HdV0EvXN1Ujjvmu/3ccQ7uc9SNQHHx8s8fll8m25LZXlat/g2
nG/5Xz67zEmD22Pn5PPB3+wh1WSHJeO7s4bD4+X88gQeOydGo+BUIhy8YouFqxxYQ7aYR7Bz3BB3
YJbF/ds6g80p1dmXrwKuh9YRbrWyxTNcxu7LvybyzIJiQVy7+WxmSwlNMd0yMlp1MHvHIFM/29WO
uHq4TgYRWqq+yYMQuogTa2piUNENQodcov2A2HAZY+TpwZkYfNp10y/f/+VzfeQ1+/n7r7/Dl1Na
P/6GQDpEzOH0FtBPPVHQu6PCtmtb6ujwWGawYMeQGd/80F/W9xl6lL/I9tOP37wQNGh3bipid63K
J8d4tiDb3pr5xdXp5kW76NUuOqpfvW/4ODzNCIxuKeJyeSec+9MxAuUWYWZGppR/+k+hou/8imrb
PAy+/fJU/X22YMGsPrq5w7oKTSpfiRmZx2TsTucjqLJtsa9fJPxwOb7hXC8JlmOzR3x/KVPt10/T
/KbW0Nmaq/zWBy81SeZFSiVUbNOBhFTWc8kCL4V1/LJY4JRdwpiz7KFXXMpMfGjyXrfsQksNWSDh
QifW/Kv3Fw0PxIn3J2sWsWqD8XkiipwyyGfBuQJaxvcLXvmrTTBl+c7UfvfDtmsjWG7TLIVUUx0W
ZukhGN1l+FY83Tt/5304eB+1sdXqZYiZv4jPCbuUSqXGmuR+81FpjSo6z4htFUK+eR113vTWQVFE
g1iGLvzVVxW3ESY3s3wE/cOJBbDAqZnzvu9zZkpZoXrtEDBXDx/J9KlRhA4nXybGBvp63hKrVE5G
atF9eDAS/p2PQEw/3b/zx6YaW+DJeCPkGYrJcBQ/M7w+7YaKXnftmD4ODvrxRRScoeYkaE10pXFP
iFmuAz6FtQPHdabiE8kPOS+FJEVffiY2frnJl8da+NY/VSVoEXOjywXNkUCwUD8Ajb0t9GBKIGDt
rka52J82T3hWZPvNI37H1bd6QQWhJltfG5rwInAp+uobSy7uAo2v4ozhPokvYjj8E0zf+Q6y87Ah
abe+5d/5agxxvVWZI7Z+gAYjtjUJVw1e3N9OMG/TVAQtFBvmjeqbj+JaaUH1+INCfpdrau7UCKhT
TizsDTtYLpxbhQ6DeyHr2n4l/WC8K5AD/U28tQ1owJyKaM6cjvhL/sr55vwZ4atXDL9SOxgddor/
zru+v2ct38VWRhsju1DlZWXBpB+SGRQhI7ht7tianGRRol7rASMcBzV7vVLnN4+kKGxmPlfv3ld9
c/HBvNrSoIckctAU0jedmmlR8/OhGNGleOzpj8/GxX0Vw3yorlh7HFAwvYutijSLunQ2Q1rzVeBI
6Iq0EwvQ0kvGso8AmrqIKPr6Vf/Z1QKk8+WAhVU5o0Z6ogKeTVUxki28nGcuitF0inJ29fXvvPhZ
6Yq3ahK2XxhVPf34gQv7HXFTZ5/wcnMy4dUhHS/LbRD027aXkAKOyqxQ31niIbmN8lwiTr9+YFFj
iC5/eclf8k1OBS3eq5tPqzBjh5/WiO+3Cva3+cy22bDOR8dfVuggxQpxSGF0NEalA/G4NolxRCc+
Hi4PRy1IbzKH+RHiD3IWYRnXF+I5+JW3xbO8aMu8k7EayzdryjzXVb+8x/RGDNB4LYUQcvkdkuye
gDWv9tce4oJuqOx1q3zYCFEKV8ckX/1O6tVmE6kwLDc5C9imSOYtzE+QA/PN1gvVCFSgnwy++z8k
SId3PapzWMBdypbEfzhW/ndeq6aCSZdf/x27cq9CxFTKPCE0A7HcPArwxn4kjhm+cxqs1hi0SvIY
6foh5w3FIXxu8QsvxcMZTbOXmoDrrGM+NV5Ws2Z3E33zJTs93nE9G4v7/u88ZoOWrGOs+2B4rrM7
0fVwQnOtt4XaJ55PXGdpIMm7pVR9RtOKYOY3OSfKztYO7waz0HNyzllkmdo6Sij5zqe7735SBqdw
Sv/yII23igmVv7CYZ6xrxKYCCRDpM2WOlFZ89I5prMIczUSXIav5IIc9pOT1xpLgWxZ1nlhC3/Mz
+0wE63t9F7TxS5no3mbZ9Su2MmEPrzWuw2aRj84zp//2Q7t857OQyg0S6Lhku7P+TGYSdE/45l2G
T6JVT5UdCAirZKYL0G6cXcqpgp1Ma7YWD2f+1dcndPLjxEg6VPncb1e+Ksb6nfk7hup+03v2X/87
hbqPOOjyDNHwOtPkMhwsujhEb/A39Yp423z+ztPfKkLW9cxCqdW71djdLjDRdcXs9r7KRybyEOr2
sSB2eLgnTDKDGIzocsPgdbtg/mR9BsNYb0kQf4xgjG25ga9+ffl66Oa97LoQDqNP/Hg/8k7OvBIu
TxkTP9W29fgykQ7feQ/VnJnlXz54A3z8AzPcpEpm6zNWAFpfM3N/7vgcl4aqSU9uEuv4Geq+uVij
DE2zYelDM1G/Cq6RWtFPTLaHKes68W6kasmfPz1sgt67JaJqKRxjVG3vNS1k5QjJMnz9zTuzpHgY
8EtQ8JuMj3re9UMGF8XbMEO/V8H08y8njCQqf/POd//CR5ryibByk5g1iakLMM8Hjxi//S+hCxuQ
KV3RQTJvVvdMDykExrbDC7VLu9F/n7J/7z+ipWX99b+v3tLdV2/6r96ghVBsSSjCizMuzaH2CscF
XtytCE27XVxpYrp8M3K4jcn0SNsYqevtiVkwlejN87YA9/A28fG7f0MFcW3C4xmVLFw8mv8BAAD/
/+zazY6bMBAH8HfplUpkIcHmiAghYAiE5bO3QKABFkgI2MTSvnuFmyforZIfwJeRpZn5zy+cTpS0
MLpejRkIYwmoiXpTYflGu4GbkASXWwa/JrTHh6UycnIPRgH+0mwNG1at7MnaD9R1f8PeuVvA1Mco
AoNZUpYP0+kJMGH5hbe4Mnnn1ZmsN9id0LHBmZu/81Pvc7uleNilLtu/ZqGjHnhiVSJwk3R4Bse7
R4k93BEkg6iyeTdfiocNYXGyBaxTlDqDjEVdibaajM24vjvkJVsDWPcz3zlNUr5Iemap/e/Kn1Mk
9w4tcV0ANg8mEgz28trPdoHYmb4pyhswBk1ZAx/HKfuvI221ylJ634hmtSvL8QnntoasXi+/7Cm5
JQOCtjH1/vGcauHH6KFix/ZB46ImdAlEuwO6JAjYoSWl7D4IpDh+YqM33XFZ6wfBOblh+xG4juzY
UQdPQnjwto5a5iRzbw/4cA/hej+/gknIFAuGla/PSl1UzVrfGX7pXYCPg9Q2y/hq3/khWvPSv+9/
MBXw/fMfRMEHFwVcFHBRwEUBFwVcFHBRwEUBFwVcFHBRwEUBFwX/oSj4AwAA///s3LEKgCAYReHd
pxB3IRx9lwghaShT6g9aevcoI1rbz3qX+wIfB1GAKEAUIAoQBYgCRAGiAFGAKEAUIAoQBYgCRAGi
AFGAKPgjCpTW7d0sSLmP0wUDJO5iXypgQx9s07gaNtjWMETjH4FgypJTkU7yGOfVeO0qNTCSJUyf
WV1HhzoBAAD//wMAimBSZTBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c0890a967f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=LOzleZdmcRHq_k8ivJmSpKH3A1nC2QEG.jr6QfrBEVE-1712872371-1.0.1.1-XMxrEqRrp6jU6HBLC37zKUYMf76POCBfVMhd7JHzJdWfjlsl0Nef8FqH6zmPKoWklvMvsMaxeV1UHCa4Ip2l5g;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=JbDwNhs8Wa1lIHBiAsSY.Kcld.sWpwINV.ZrNaCgWz4-1712872371362-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '18'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9994'
x-ratelimit-remaining-tokens:
- '9999247'
x-ratelimit-reset-requests:
- 33ms
x-ratelimit-reset-tokens:
- 4ms
x-request-id:
- req_b5115f65d90a7b52d62d54c99b2929df
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [2822]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkkF
Ye+1vrXzv//jz59/3nldXIZ//vXnn0fVD//85/ezazZk//zrz3/9x58/f/789+/1/60snnlxvVav
8rf8d7B6XYvpn3/9Ef/3k/9b9K8//yg9KPSS39NuHOTEhaVc62zbhKtuGuRzA5Gn+Mx/tFM3X0pu
w3m7bJl9NR/JaEilrDHvYODJu6zRaHFdgD1jGp7NENfiObzJCOZ4JngR82BSb7OpRTG2iXHY3oKp
XGYCJOnhxpy1qXH6OuijdiLOwJzV/ZlwXVFT9SOkb2JKqReMR7E+wmPj36jyUI/1VB0vonq4LhZU
8M5P1O+TT4Vs9fjAsznVfGz0RwMLHYd4DIQdmo+O4oMdWz2WUo3zab33YhQsrj1bl9vO4hJT9yA/
njkh86G12C6LBXVky5Lp/bO3+rRQS+SKs4VXd9tOxsEoK5gV3BP9prz4OEakR4IBPrmSgQfjyRFj
EJYPjSrHo2RRoTMu4L+1LVUPlcnF4lk34NnsRWG9MTl7jckbxuH5opq82XbTuXhhQOdCYwZ+fPJ3
t3YvKHZyjLVsWCfcU+snKqhkUWWDztacHlobrjTqSZg6NBhxMBaytzGBoll91KPZ4Ag9zueUWLuy
tajbiSEYFUuZob3LZJzCqULq8T3gqcvr7/2Y98BvOKJ8fdrn40Y4C7B9uB8WkMU16SSmHoGpDBGz
uibBZDYhheGVjWS7to81v59fqrY1LhmdNqdTPi7CNESVBCE+HI+XfAq8e4EeG/dGru3uwcfXIZDh
cV086TgE92Qq9jsX5G1yZ+ZihQJqDg6F7/PAq11p8ZUjH10A39wT55VG9birGwHM7c4nnrLsu8lX
aQ+fZRwye2GYnbT25xR1ouWRrYnvwZh5BgU3MDjtjnbH6eExlFB+zBnv8pVZD8Jd61Up4gVW5ve9
4+XSsaErHJ0qSanXfHkCG2kdPIhVzVY+euqso8IVb2TNpy5/rw1F+Hs+HHQDGh1fq0AInCfB1l5O
5mZ1sJGpRAkWXHHqBlmQXXjdLJmEQN1glXwCGU4hT7FcsYjzcnMz0XpwlwRf5S6Y3yb1YadFE3O7
VqznzdIrodhmAzEvQR+M5pA06HGnMdl22SefYmElwgObJjNLliVjqkY9RFXwJmGoWMH0aLm/0GVs
MFLeqprrr0+FZm1WSYhNp17u6jpFwt0OiEONIZ9WQU7RUo9q5mVDV1O28XRFb65HpvuREnTP9Jqh
dO+tGJkPfjAtRjlT4yQ64r2/dIPpIboXeBkHzNzl3crHQc5dyPLYw7NhBdYoaIcC4OMe8OMw42Bq
VscW5oS4BGtNlvSGVKogrVOXpBWLEG17LUZ8lkuSUtmpx2+/wqYDjOF5b9GwW0KEzsvowchwXuSM
nZ0GwtP8JMbzlibjLdNt7cx2Ol6GqzZnjX5yQTHPG+bNVpjwTGr36FtfNE9mu14J97lRhbsTMF3U
7vW8be8UueR9Zf51bwVLQctVeFy1J/HOzdviujJnmi3InG1PtyQZOxzqSDojFf/6vZX7RPirr1vJ
vAVvMmYh5GteEudmVsFYCh8Jkl2ZEnwS625+m08fSK08WaAdqm4A3wih2ysuI3gn5PO33tXtWC/o
AdVBx+GRVbAy9kuyX/d2PR+MYIad6B6wMlt9Mtf0XKEd0Rlz6ueYtCJWHZQJQkrz7SAl/aY3bDhf
zQ1WLNAt6VNPPZy3q5YFQifwMSm3OihbUWYOfZo1u23eM7TQtlR7HPKAI9sR0G99GCq1NVa3rocM
OouW/TMMOFzWGFpeYozYRshZQx0MvTGIzA00IeHsjBt14e3f7IzqRz5K5q6Hs3lW8KwZD8Tsea5Q
Hh2SQVbKS0IzVxSh5RUm/oVt+cPbeqm63x/fv9/L6Wf3ltDY5AyLpyoMWJMvj5Cpmc8CpTZzSbiL
PvKnaMPwPTmgyZSuT2Sc9ibm3fCyhvgTjaDc7y4evc2h+9tvDVUNtr0fzeBbTw6QezsQsj+GfDyr
jgDb1bli5jbUERW07IjuRhLhVmwt1GdDXfzq8Xecr+62LmkT3VQUkqC1Jml/N5FVTy1xrvtPMqof
XYW37RXMiNZm12ee18MgbFfE14tPMu+Wc69+alEip836ydnjXvXwcRaUmIz1SXf5aLbaFbZO1v6j
tuZ5kDFantMFvj9vck7RtHiiXVQ4hCjV0uqLZ9fA936QQ0CdejqdrSeUN/VANqxXEFtoRFe9tIh/
fpX0z7ST4P6pB7Iu5zLhJKhMkKKpYG5+L1C/5bMLT1xpv+fH55PWjeq4eVmEDIJYD++xmFE2bWMq
dq2K5sajOsrXU4mHwzbM53TSI02AW0lc2detlRtdChQ1hUZ/9cOJue/RRVrNeM1DyZq78jyi7/Xg
FZwFi6rH2gVNIAWxpXYIhsG4hoix44N4m1PLR0G7Fuh9158kmCbT4uh4E+HeaDc8B/2mHozc8NH7
8RSx6kZex33rhWHl4pSYS/KoaVx6KshOkWGBe9eE8sVqD86mf5ONR616ZiLdg/ZKT8QU2zGf5zg2
odTnA/HKrY0Gsd2JwJ7YI+H1uQn4nk7xT//omHZSMGyqGSPdci6E4F2RDyqKMCSzEjP9pmw4k9h8
RE6XrRg2xzaYJzXvYb5kOiHTuEfzYSVlv35m4aulSc/z6oLa2JGJnYwd7xt9aEDpBYWFK4zRjw9A
D0OZbNxHaY2FDAXY99Aj2Az2fPjUPEKH9xOT7/0O2OPe9miTbmvivnmaT+PNlWAn+gfmLh5tN3PP
pkDMqic/vuRcmjHqMKqx5mwzq7t8lrZa+ZrFiiU3LH5IEhua/Y4z3zBPHZUUI1R/eqxymqJ5Oiig
3QbrQ7zT7VyP6seVYWsUGZXJuO7mIjBjeJ4+GVWaUOP9tRQwXCK5xQLkEqfNw8Yg14lDbD3sOrZd
wxvG5swwfXKKfv6resHLZ+v1ug3465XaUAv7lgRq/+DzUXyHINhbi5gCXdXjxy1TkEkjEVcEGvBr
yWYkecKRhOxC62FtjClU2dpmDlzaZJauta5eLvsci1QO+Vjs4x510V4lW7k3OdeLfQiFIQDTV+Rd
j5kkz7/3dMGLoWZ86GW1vqA9FV+dwUfTnSTYFe6I0WdxR315Fo7q67WfmetWHHG5zzLQylJk+En0
ehr8qtJmJewx36M951uYTLgX7omEK24Hy87WfNjfxvOXD8xu5fgvG72dCyehEz3zcUiUDO5y75NA
Pp84l8wgguU5W5DtURWCnnsbEZQ064h5l6987mz+1KhTTVRd4jmgqWpJIEwGI3p4eNV89RxMtCv8
EavWYV2PyL6mYMdGTzbT9E4mfVSEXz2z9dev+/XeF6GzrjPW2t0DTaVwLNHLOGHi3SQW0NFZxGgd
7Sh9Thf52++HEFlg13g1vm3OPUsWIOmVAdcwlZxmQ3lEOkMb/Ig+fk1//boqC+lvf/DyrLxRaFsI
84t748OPh37+4rzSseZl1B9Re60+dIGWdcBXARZRfsISMwjtEa/1UgYk7T7MdJdBMsOqF9Rel3df
HtdzkXWDipjkDUz/pKr1/b4EePlp2I9vucjjCC2EyxZPDk7QTPaj+pcvCk9bWiyL9X/rt9W5Oz5d
T+z44w2qwqUJOJdUrKz1vU/FT8rRtKnUEDVvvSDmKXKtFR8aWRt5E375wwzGzVkOIdq8d+xoKZn1
zQMAj/3Sx7JxeKMeHm8BVPUIGCm1mayafLmHfk4COpjYsMbLSW2BvKuI6Ieoy+m5GFoII2RRZTUK
wXTh6wo2ZO2wTbE4IL7aX1z07R+GBWqjUa7zEr71QuzgfEz4gtwECAzSEZ1d3/m8Mm0RXAOd8HOR
POshFhYSiPtdhgVPOwRTQxMHVm6YEqtGTz5YpztGdftaYPTJ+no6aY8jhOIrJkE1ETT6R6cCzY2P
BHPvmnM0SabaTD5ipvbYB/PXH//yobUr/YAfHrUAWlmJZJ3HG2vw1LHQFH09/+XBYTqgEoyDOdDx
1b2tCZJwBHn0MeU2jhAvhTxTf/21HdWqm7ju91Ar8wXL+b3g7EiQD5hKF1yJjmnx4SLHkIztmZnR
K+h4YfAM7RpNprM5Amrtsg7Rx0sstj5sZd7n8TqGIMpc5rgRQ5zHTq+GkWLRWaCnmpv1SoJvPxAL
x3LweZK98OMFZtv13XrKy0+Evv+XwuLR5DNfLPaQ+wfKNt2g8/lsbS6QFvOauFpSdzyqyxjtYyNh
jqWo1lwdkxiUe+3SxVla8xETp0VIlRPif+txfrTKE4K9cMTTHbVff1MxhEmYEq+6dXwqIwXDN+8y
b34bnfhZ5OYvP1D+61e0vYqoxMWC6T9/DKiFf3mfuOrnxvtfftbPtk7HL6+PrBtkOFy1BTNqe1uP
TSxc4KfPxuudIy5d7xe4va85C/yVjr55ywUdZoI7WRiCsayDFkBa6mzjlFPdLZRRBNaOJ3a7Lgs+
eVPtwmyvN8Q9VQ/ERSx/5wVhT7562y239ytAQ2adOIw9g9/1oNXreMbi4IvW+A6kBrbZU/ryaJVQ
b/o8kSH3QIxTOQRMxHIJK6v1saiHXc1UO6JaIUwvYpTzs/scjO3xx+Mk13PNYuaOCgpvX0/8jJXw
m/eVCmR3SBiWTiTnkHgtfPmWmY9uHfRJudahOpQSMa+KjSSzsfyfX/78E81lbzvgpZcYL1Hd1dNi
HFMgtmgQ1yz6nMmRGML14UZ0rq7c4kq9tkHVPx5zzFHqKLpRCrQlOvnm126+nIYZ3ifVo69Z7fjI
PSKiVnYuZJ15dT1nLkjq8rRpsRQ9hXxKJ09SMv2DiLVHIp+fJMmU62V1ZJbNnwHb9Z349/qDYkG6
cf+YMzjXK0LCm0wtOoVNq6jw3m1HdV0EvXN1Ujjvmu/3ccQ7uc9SNQHHx8s8fll8m25LZXlat/g2
nG/5Xz67zEmD22Pn5PPB3+wh1WSHJeO7s4bD4+X88gQeOydGo+BUIhy8YouFqxxYQ7aYR7Bz3BB3
YJbF/ds6g80p1dmXrwKuh9YRbrWyxTNcxu7LvybyzIJiQVy7+WxmSwlNMd0yMlp1MHvHIFM/29WO
uHq4TgYRWqq+yYMQuogTa2piUNENQodcov2A2HAZY+TpwZkYfNp10y/f/+VzfeQ1+/n7r7/Dl1Na
P/6GQDpEzOH0FtBPPVHQu6PCtmtb6ujwWGawYMeQGd/80F/W9xl6lL/I9tOP37wQNGh3bipid63K
J8d4tiDb3pr5xdXp5kW76NUuOqpfvW/4ODzNCIxuKeJyeSec+9MxAuUWYWZGppR/+k+hou/8imrb
PAy+/fJU/X22YMGsPrq5w7oKTSpfiRmZx2TsTucjqLJtsa9fJPxwOb7hXC8JlmOzR3x/KVPt10/T
/KbW0Nmaq/zWBy81SeZFSiVUbNOBhFTWc8kCL4V1/LJY4JRdwpiz7KFXXMpMfGjyXrfsQksNWSDh
QifW/Kv3Fw0PxIn3J2sWsWqD8XkiipwyyGfBuQJaxvcLXvmrTTBl+c7UfvfDtmsjWG7TLIVUUx0W
ZukhGN1l+FY83Tt/5304eB+1sdXqZYiZv4jPCbuUSqXGmuR+81FpjSo6z4htFUK+eR113vTWQVFE
g1iGLvzVVxW3ESY3s3wE/cOJBbDAqZnzvu9zZkpZoXrtEDBXDx/J9KlRhA4nXybGBvp63hKrVE5G
atF9eDAS/p2PQEw/3b/zx6YaW+DJeCPkGYrJcBQ/M7w+7YaKXnftmD4ODvrxRRScoeYkaE10pXFP
iFmuAz6FtQPHdabiE8kPOS+FJEVffiY2frnJl8da+NY/VSVoEXOjywXNkUCwUD8Ajb0t9GBKIGDt
rka52J82T3hWZPvNI37H1bd6QQWhJltfG5rwInAp+uobSy7uAo2v4ozhPokvYjj8E0zf+Q6y87Ah
abe+5d/5agxxvVWZI7Z+gAYjtjUJVw1e3N9OMG/TVAQtFBvmjeqbj+JaaUH1+INCfpdrau7UCKhT
TizsDTtYLpxbhQ6DeyHr2n4l/WC8K5AD/U28tQ1owJyKaM6cjvhL/sr55vwZ4atXDL9SOxgddor/
zru+v2ct38VWRhsju1DlZWXBpB+SGRQhI7ht7tianGRRol7rASMcBzV7vVLnN4+kKGxmPlfv3ld9
c/HBvNrSoIckctAU0jedmmlR8/OhGNGleOzpj8/GxX0Vw3yorlh7HFAwvYutijSLunQ2Q1rzVeBI
6Iq0EwvQ0kvGso8AmrqIKPr6Vf/Z1QKk8+WAhVU5o0Z6ogKeTVUxki28nGcuitF0inJ29fXvvPhZ
6Yq3ahK2XxhVPf34gQv7HXFTZ5/wcnMy4dUhHS/LbRD027aXkAKOyqxQ31niIbmN8lwiTr9+YFFj
iC5/eclf8k1OBS3eq5tPqzBjh5/WiO+3Cva3+cy22bDOR8dfVuggxQpxSGF0NEalA/G4NolxRCc+
Hi4PRy1IbzKH+RHiD3IWYRnXF+I5+JW3xbO8aMu8k7EayzdryjzXVb+8x/RGDNB4LYUQcvkdkuye
gDWv9tce4oJuqOx1q3zYCFEKV8ckX/1O6tVmE6kwLDc5C9imSOYtzE+QA/PN1gvVCFSgnwy++z8k
SId3PapzWMBdypbEfzhW/ndeq6aCSZdf/x27cq9CxFTKPCE0A7HcPArwxn4kjhm+cxqs1hi0SvIY
6foh5w3FIXxu8QsvxcMZTbOXmoDrrGM+NV5Ws2Z3E33zJTs93nE9G4v7/u88ZoOWrGOs+2B4rrM7
0fVwQnOtt4XaJ55PXGdpIMm7pVR9RtOKYOY3OSfKztYO7waz0HNyzllkmdo6Sij5zqe7735SBqdw
Sv/yII23igmVv7CYZ6xrxKYCCRDpM2WOlFZ89I5prMIczUSXIav5IIc9pOT1xpLgWxZ1nlhC3/Mz
+0wE63t9F7TxS5no3mbZ9Su2MmEPrzWuw2aRj84zp//2Q7t857OQyg0S6Lhku7P+TGYSdE/45l2G
T6JVT5UdCAirZKYL0G6cXcqpgp1Ma7YWD2f+1dcndPLjxEg6VPncb1e+Ksb6nfk7hup+03v2X/87
hbqPOOjyDNHwOtPkMhwsujhEb/A39Yp423z+ztPfKkLW9cxCqdW71djdLjDRdcXs9r7KRybyEOr2
sSB2eLgnTDKDGIzocsPgdbtg/mR9BsNYb0kQf4xgjG25ga9+ffl66Oa97LoQDqNP/Hg/8k7OvBIu
TxkTP9W29fgykQ7feQ/VnJnlXz54A3z8AzPcpEpm6zNWAFpfM3N/7vgcl4aqSU9uEuv4Geq+uVij
DE2zYelDM1G/Cq6RWtFPTLaHKes68W6kasmfPz1sgt67JaJqKRxjVG3vNS1k5QjJMnz9zTuzpHgY
8EtQ8JuMj3re9UMGF8XbMEO/V8H08y8njCQqf/POd//CR5ryibByk5g1iakLMM8Hjxi//S+hCxuQ
KV3RQTJvVvdMDykExrbDC7VLu9F/n7J/7z+ipWX99b+v3tLdV2/6r96ghVBsSSjCizMuzaH2CscF
XtytCE27XVxpYrp8M3K4jcn0SNsYqevtiVkwlejN87YA9/A28fG7f0MFcW3C4xmVLFw8mv8BAAD/
/5xaS9OyMJf8L7NlqkBuCUvkfjNRbuJOFBEUkUsCpGr++5TP+y1nNUuKyqZP55zuzknmA1teML3f
LQK4sQLMCT6O+pdvvAQoJMvp+izgew5Maq8Pq1y+p5GDF93XqeU1qrn85oH2828UHbsVzJ8sSEHv
VOwvH2bzBOjyl1+gNZKW/+TVhWS0NJoDt6VFVP4nP0WxLDPaK+foz38RrmMITFQTFyjkHSXA/SK2
+P03gEvPa396t1xvgw/h7eBz1GDBOewlyhtqKusSdbLmGy6b5PXg589weJjFchWNwtM+9QOTcyB9
QlbR5gb+9GAuwpMp/eaZcuI7Bzu8JIDx1FYNwDQ7//F1ZC/94akfbKVE66pqnCB5NfAPrw1XH7Y8
8z6AvjV/sHs868luRMFN+fOD1lXL2Xri/Q4YIsfRkFWM/b0PAjHLJmp9nGhcf/hBcMyf1B9OUSiF
ftrBA5fYSA61qlyK6DnAIbKT3/v5HcxcoXoweWCDqM3t0f7wJfBtdCfq9uKrXcft9Z/8MPjlpf/O
/9ffVsD//Pf/Y6Ng939vFLRK8EFK4ScluQSKDtt5A9QGFIdbvZ9ucK99S2qOuA9XqAc1mE9ySfFL
aczlNQsLjN/vK9nuZ7ftI6WOoR2lFXYXwWtFbPsv0HJGhb2h5sBqxkMHZQ1B7FqXOOzY49VBXmUB
kaxLbK4pnDyY4G6k3tn/sjU04RW+JXePQ/XiASZ/Tp2mSuBGdg/VCBc+QFdVsVNC3X7BbIu+tgyT
9NbiyDzV5nqWvQlOwDex+T6MjE7XKIDJJb+SXRXb5oIFKVCvamHTcvqqyWa/rwIU0rTGx686JttO
CBBQAvSmxtoAk+rzEsH5sn2JDNOuZEHXG+B0WUwELzJNlpLMAZDBi8dIyxsw9XDZAJjjC0WiiMqh
9qwjMHJ9IaJrGuZO+/gFRKlICcEJDbcRxAUIX2tM3ePTHHd3ddfBl9Uk1KVRWK7ffvDA8/b6UJS+
bbCdxTKFO9A2SNprYjkMbinDd/EosXvKjJBwD/UKWHYQCGv5mW27vS/DKUpUxNe2ORIuVhdYcvZA
944mmssiDFfw+08oX5/MJbSfC+D1VaGuJwXh1ny/DVRCsfnVawtnbe/9siWQYPtRN2DZjDrWroax
Yq9iZrmQy4vTPvllhyPRBeM3a0dLjnx1Iq2ldmDtPdNRVyVQsXPVJTDHKbXgc/0peu49mGuQZz2c
l3pPzzix222T5wE8n9UDyRNU2+1++aQwsKc9ftx2fkl4RRnUH38wSt9v1ts9lWGfq4Bah5c/Stzr
JcBrK5/wvZKLkcXyUYXKTAyyBOWLkW07CJAVd0K4stRbctrqGL4c74gP+z0u101XZZhc4RlplaWG
fU8VDwDJ8Kg5jyb7suvmQbArQiRJx5f5afRdDAaFozg89SNYJPlewWvYQBxK2myulvKsYBuXRyQp
sT4yBEQPsuJBMErOLhC/DBbQsPwjtk6oNRfqmgTUC3/ByFircu3mQweRSDB1Ry9u1662IRyPxMX4
eplHJk4AAd/MSmxowzTS0hx0uDajQybDNJI6Yt4NGkp9QJ+neAs3dDIN/ok7jihFG7fL5eo28A04
GQH+47TSVLcQClN1ItKPD4IrzgToKP/gIOqbdr3vswry/HdPQ3kNwWZJ0IHXonkTa+f2YHJwJ0Dn
bk4IDIdqXA0+tKCSwBR7BzMFFH9TGYiGeUbP3dUPJylON1iJR4XqefNopw++OlB8GDyOUHA3mTft
exjYZI8fVzaBZddKoppJ9RlXkTuz5e4PPbwVDkSqrh7L1af5BA+Px0o0VhnhMhVkARHVHXweSFJu
U3NJ/90Ptb/jdpScrQb1kYrUud3Y+PG27w1c0vVNZmYZTHyd9RhyvnCh+966sM1V5EqNS2kkQAnH
cDs3TIVQpTORlSpM1khSVVgY1RUtxjomjGb1pDJ5PtFfPZO1U20Eg8ML08BpDuUAT/AF+7ofcLG7
buP3c5gITF6Tg44PITeJBHoEM37wqFOi3UjlWtnASdcz6nPWrlw5pKcwN1STjFJ7AFtr1gKwYyHB
WRot4YakkEBhup0oUp2+XYJxqOCYcHvCy8dzuK0hguAskxGxYeexlZHLC97t1KP4wqOE3qS+BpcJ
5DTi+I0tPhdO0FvcJzW7/ZDQ89NpoBv+novuqwO2/s11qli0ITVfl0OybL5nqWboBWR5xaRcACmu
qu/vMA4EXk9Yo2sxcFPjg1Gfq+aix2UHe22I6T4e+3J7lu0EL3JioBWXxPzHJ3CSf46nEtpp8XxZ
FXfLGePTUyxpgeajyp9PM3XeJAjXp1UY4LmPIFpc/msuEdMrkCleSlO6i8MluasVMHXDoJEKriad
06ZWqQAx9Y3caFfBO8rw8LivRObrz7hM4SeAEm7sH/5Zya64raA/OBs2n22cbOCcI1i1Oo+97DkB
2riZAxEn6gQcdcKoNjgV5K09o95OWNkcSZuqVc+LQRbjHJnUHuwjfFqVSJ5sGxmz/A6pv/tO+NNT
TMjuY7z+ziOpGa7jV1NrAZ7rl4ofZ+3EfvgWMH5/rji6pWNILAss0Bn2GdH2uwNb5YH0oLOnDw4v
8TuZfVoRYN0fGUbOXh6nlLvU0FPeE3ad3aEdLpqSw6chKgjcPD1ZvOF1VD/yKaN+fYjM9ZsZRANL
neLiEjols9t3BY72WtOoyd1kuX3NGormmBKl4n/1k8cYfN1ij++uGbbLVHQbyOalp3u8+znUXs2B
MAgpWZ7iNs41XKG2l/zPX39l0ydbVbAcLJ0GbSiD5dOsNRxAN/3q47ZMSZccfjd+R+q9NjGyRbYH
cu4h4v13ysGW91kN0Ji3GB9MLlnVKOQAllcJm7BX2v66Rzfw7NKcRrvkHk56AQpwzeKRmmfzztZP
GtWge+UHHDJFKLuszFTYxpcjEcelS+YKsAVGWVygJr4zxhK7eEFrcgA22zcaN8dWILQWjqGzKFrg
fagUEVyhYaDi3H7Nn746/vXrf/xbsnE5qt0g7PGtilE7/fXrpMtNwrjoztbSkSxwOT1HutcsYxRK
w+q1/XyRqXfb78tN3OlXzZVqi7ro4YZfGt85gFzJwf4w1wnl7RD+9S9qlDRI3urbNqCaljxRT2Kd
rNGaOPCcYYS0V4ATVk/nDkYHr0Ncyr9N0m8igsZ6vSEuDbt2flA1VY/B9/zjxxSyVxarUOZEBf/4
UK6lwztAn+ABH37zeftwfA8sIZppuSQJ+C5vrQCU5TEi2eVVLvd8iGEveC4NlTA0p8rydW0e9Aif
D2PfLtFU9ypdSUZE/eO039W5NeBlqDmNBr1mKzwYAXxS8Untp1ebW26kAugR8zGCnh1SweI80PdL
idbeUgCrlTOCTp18KE4eF7CU4LWBx0n2kPZMhWT53C4E+L6E//FlFko5/cMPe/G4AuY7HgSGGCfY
OnAeY7VTyfDd4ICsXKwnO+2zL6Ce3y7Uvlddu0294oF5HHz8p8+GwlcF4JgJRz7IdpnEVQcDunEx
UEQ7wWRHX9DVp3UTkWS+k2Rm1y0AQNI9pHzuXTgt3l6GpnbYiFK9CVhj91NByddP2LNCbZzwlVf/
9SPJ/lBGhjRpYKCLAaI/fSUW92v875s/mFy5Pm6xDAZO6PBx25pxRfLqaEN7A9hFSpewUgs8iN+t
hv/m2SaWvgcx3K846PaMTfe8iTUEXg7G8vFsTtkoxxAcjysSNf/MSGv2IhTW3Ec74bEzh1bz4D89
cVhF29xBZ8jhrY4IEgKCRuau7RHShyhg/b5dAMtV/FIH//HFtvDIwlUTfB1k2DdIL6JTyC79q4Hh
4nHkm5wAW9FDNoDKlUe0K449mIfWa+DodRla9j5JxmMLjpCOqMZWLuomu+bhEWScEZKPeHu3hD+c
Amifp4AmvH5L1gH6HfibDwspzJKVZ/8GzWhx6fkImp9+T1Igtw9E3XedjTR+KRWgju1Q49b6oeBz
IYHdyUlQl0VKwsZfojFxhUZd9r8AAAD//0ycS++CQLbt5/0pOj0lHfABVfSMl4CAVQiImNzcAD4A
ReRRBVRyvvsN/M89OVMHIqT2Xmv99pa67ZhBiQTE3jjQw6yLGbsbLySDi93QaAOsbKmfFkSbLEL7
m1uHZEuaGjjwJlNHMSdA7UBLgL4dOMThgWTj0dVTqL46Rjg51timfp0gfCpag3Ur1jKmfJMEBIfr
juLR4zK6+lXr6hfUNnio95qVxJDW8Q87Tl3rMy9ODVz9kqiWhtt621MJt23aYTNKUjCnXWLC2dVS
ilf/FvqSAH78IC355gOoddBmkH2/W6odP7CjtGwiGKroh7GNn6xf6g3m0UbDi9/IxpJ/CoAv1Jza
8/vajUselQ253xO+1L9u58eglcQrQkQSPqo7OrPmg3iiE+JvLgXksJ998BIt5++8sOfzyQG30T2s
fI0x64Mc+iC89ncawByBcTAPJbxKUUMtF/esr5SHCcn8+ZEd9bpsetI5lq82/pA7TY8hFaN9BBc/
g8RIFPUZvO8CPORvldoxbTtmng0PhppQ0wPH/7LJa4EP81svYu1+/VZkgEktbdukI/ugm9hcHp0H
VMjboc9kGrsu/bVEOrpPD3FL3pxg+INAHo2Oap66D98P46jBs/k6Ls+PB6wP3zHcWWlEXRnrbFY1
PoXurztix7SR3pwbuQf+UU+pU2/mbDp9bi1c6gmNbeRUZNDGXE5PtkhPv+TkjpfH2ILF/2L8GV4d
G4bqDap3MJLnygs+KJNgWBlHrE6xH46e2Pig0Tv1Ly8veXOGWbsZqQY12s1ubgbgGL45bIiuWs3m
7ZbCx2lZ2jwQBAQuxlsgnM2aCFq7YUNgffP1fJHRvhfhcEd3c73/tR+w2WksAp4fWyMcEilb/Qp8
fXFLtTOO9dFTFBuaNzYv/WcHBu3jCiBqsg7JNP1lTN4hAxr+4KMNuGAw7zTbhEv+Q5uktXVGMi2S
vNJX6bmHkz6KHrDB0l+wTsMX+xJQpKteYYcc+o6lo/aC+Zificg9oqqJdlMPJHCMseapSciQLQvA
Em2VRsMdZ7Mx5YF0NPgEH5+3U7bwhREKz7xBv8uw60jl2C1ErqXg4+9QheSbcBzM2t2InYrfhdPp
PCNANruKSE/AVd0p2gXwEqQyqRY+8nvmgSQXlZmTSaX7bGiIO0IvbDLswuyTjXmuS8DNuxcZ3ELv
psvdDQAr8Z0UQKv1QQ/aN+wOUYa6yPh2bPHbcCp/JhLGsA1HExMBBgN1ieTUtUvZnUfwy24P7J5R
3zFfzxHMw9MBL/klZAasX2B3kmx86LM8HLVa/Ms7BPDfupvmKIihkL4n7PG3WZ8CQiJ4ubQ7auWI
d98kPDiQlac72rNk67Lfg9Zg+1k2fqz26bJgmkzp4PQFVaPBADTozj68uvcnVRULhERQ3B5alsaR
tf9P4FkloMod7u+8/6BZRqv+UEfv54yKKHnBfWKV1Jkro2O/7osAOI4Q7cjcu+Pin4EEp/Avz1OL
a6U1X2Gl2M4VK14PB9B33//p+8RSyZaC3UNCANc4Y8Hj1gBpJ+ZkZtKdke2y0botA4kqbdR28/RB
Hlh4DkZ4IOH8AsYbEnohmPKSHzJ+6h9wyS/4udTn/HX5CLALFrD2OpVd03PfHLQ36pFN3HTV3EZZ
CcIvp1A76M5sTBEyYP3MLzSXrxdG+c5rYZ3WD6qV/LsbVV4KAEMDTw+c6zA2WK0Enrc5xmY3miG7
Gw2C1e7V/vHBXSZrDnzEakwNkX/pA+muCrRoElC85hVLPrxgxlktOce00CnUrTdMEg5hYxx+HX1v
RAiOhpxQXN7rbATffQ0/wyWg6r7Vs8k42Rysv7WF+lPMVYTVXgpr7TGQPW/a4eYuyW+45f0Sq81+
n/Vg+QcLaTYSgfpZcYWlfreYP3fUK8e6YkUYl1Ai7oC2avZxpyOMo1XfKPbDpKNO3ShwzYunudU7
dhTVQD7Wwuq3i6xd/XY5ViO1RqGpxv0zSmTp/SvR5vkqWRNN5z24ZfqWtAKvZCx0xxQueYfa6c1w
BR9xOdyQSiTSF5+7P15XfaThxEH7486sNlKZppxMlVFRqkEWVA2Cu7ZF8CLE1Rj6swDW6/Goy7JJ
HgpPXngImlSahKP4Smvgmm6PT0UUhaRNWQz7gFEibOmHtTHZRFDU+4ma1l0Nh8mMXrB4oDd2pkvN
BnK+zlBNXY7stElgRMu5BvDfyifj4g/n01hJcAOKkszb+NYR4wxSGBa5R5VCE9yh1X45uOu9S4Op
zPSWP90cKEU3Hh/CZMrms6iY6/NFnR1vst/Sz6B2YCfS76td1t/vmgmra2gR5o5FRxXBV6B7y2oi
qPneHXNaRfKij2R2ZT5jnHGYIec+YqoU26Aaz9pGg4kVn5GAkkZf/KojXkQnooafvrv5F15iMLki
wTaN7tmoiOkDcraX47v56zMWS1YNr+H3jJr44mbij6pQbuQmwGjgru6YItMALXj39BB5oz4n98AH
0wcoRKqzUJ9WfiiasPvLj2zlAdn7Aunaz6fD8NLgUq+Imxq/mq6P4wMs/gnb4XPM2BC1L/iIbjeq
ScK5GzVlLEFyNhTqnpFXjRobt+B8m3UitsMrmy66p0lqBQZs2XeHzTfj5oFGuVnYFh9dRixzdOCE
3YYe20EJd/49y8Gj0nh8mPVbxsKDX8OQoDMq75PJdp98v4f8t/BJsfcNfTp8FSSv+sByGgJGc6UE
7Qur2HAiGnZh4HvAaB8cYXX2q4jBhXsIwrIj4OrbrG+O4wOO+05AR9nfgrnh5e3K58huv1ezJW++
ASqEhMYm1ivBE5tAXM7j6m/Z3F/OPWigdsHufurYYNvPLRge7Ytw0usLfiEYOHG2xtdav9WGQ3b8
1y8E3bjok1AmNtRRapBNprWgCX5gL7nTDKi2jW8VE3bcAwaHy446u6nMxl2Qj+B9TB18qsvA/W7m
XADB0/og/v0sQd+7X3vVU6r64aGbmCoaoH4+Ltg4Sr5ec72kwNqK39j6SAKb1PM7hev5MURecbs+
7COoi/weW/u9Gm5+pRbJyX17IHMk4oodTQWCNrse6anhTmx3lqUSNnPpUMOOO0ZX/7HwNmoverhZ
8g5MtDzF3nuu9LnU2hZ+jrvN33l6RcaUgtAhAj3er2ed5ZwdSQ+na//0bvVnsJXRDzE9+FXd+v2L
3lJVASIYEJ/soaPlFnbNaA/mguoOvCT7iboL/51Px48GlXNd0lOnODrdKWkPj11PsUokoeo+mtiu
8yDsLH59+7roEkSJ+SOAnHfZJB3eNhSvHsKxNEB35Cucw+T50NY8nhHenGL5UigVuXsXLdsp3ySF
u92G/+uHf/OoJX8u84/YHcbnpYaASDFdeePkGfMbrjxsqfeQbfZCs/LnhY/uXOLmyAd+kVBq1qVd
bRPIbOAf2Iu8UdK4/d13HvAE7yk+Lv5s2t+pBnVR3mNr11YZHaQOiYZOvxR5xUln1wKVEDqBRvVC
nrrS4DJpJ5HjQAYnPIBRmaVmzTuEG4VAH8vb05Zc3O3IftuMYf/+RnDNm9TwlU84crElwINDCuz6
KXGX/lDChQ9jS5W3IT1UQw7UnfvF6uwgNsnfYwpXfyewg5dtBDT2a7+k7v7ad2PFHgm4RIq0PP9r
Nq28SDG9LVa36NuxK0EOXPnZqqfDNzJeUIbCSE2+kLpZqq7Nyp/J7TJcq6mo9ntoVkWBwOkduuPz
rDRwmQ8hgJKp+xZh/KcvVMOnW8WU11jDq3360EPi20yoCxbLl7J5LfXtMrryNEG7KxTzr8mlS94D
tRW9MXIOUzhNNXgD0GpP6kqscXsfcQ+48MhFryK2/H5bmq35RaaZG0LWh30MVn7pBVxWMUXwNdnB
yKNut8m6saGTIy/1jvYX0LuT5wYpWO9nuliGLqx+aeEFy0YoAn98Qp76huqKaYe1tpX3MNrcIhIE
etRNV64p4VltAyTE25e+8PsYmlVVoFHu+Ipy1w+CxvMQ4L/zsn9No+wYpkHOy/OmnThokMLtHSsf
s2dTCD4Q3kcRol0T+10/fEgKk9O7xOql0P9bTwdFAwv/+7qDXjsa8GLpslxfcTefEEVb5BEZMV6Q
2HCJuxwqxfZAeN6cGPHoh0CUGD+qX3cwHN2gSOUwryOqk0Pl1s289WD7OqmIRPxnnUcmUH39GEaA
HLq5Go+xtM6bl3kvmPlw5KA1zBds82YT0lPB7cHFFW2Mr/IEFn4bydpd4shc8u9Fr2UNPpxfi02d
fcGs/bY+BOrPpYaaJ+4Qjy35qyfDIagaXB0mq36jbQ2/gG3mSJBvXXgjQnG5r+c/keivrrD1Mbfh
knf2az7Gujuq3Wadv3RP84jV6/EVvlJkmkA7qhbZ0K9cDYU8OvCyK69oCs4/fX4WhxhaheIv/avQ
J+2jC9KiL3jVm2lWZmnt52Txb/rsbQ8vMMepSdHH5NnvI1oK7PvqTrgmTt0lbxowB+8bdsjTBWu9
gW3q2oSD5Tv7jf3Fg8u8lyJAPtWY564Ed0ft/DevJ1Y/bsHKLx1y8LpJ8nTu/28U/OOf//w/6zsL
6ub++CyLAcNjGv79P6sC/07v6b8FYfv3YgPSp6/Hv/7z3xsI//p1Tf0b/u/QvB/f/l//+ef2b9Xg
X0MzpJ//9fE/lgv91z/+HwAAAP//AwCM575JMEEAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c08895cec1-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=GjIa.U9Ux2eLgd2vhsSvnEXQtrIvsZMYw2Ft2JOAW24-1712872371-1.0.1.1-HD9XZEKU6nJdM9A90MSP_0qEd.I.paprWpLaxKEWXzmcz2ysvIjXS6lxdh2_JpljXqXfYQDo_kvTVSM06BoHhw;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=JbDwNhs8Wa1lIHBiAsSY.Kcld.sWpwINV.ZrNaCgWz4-1712872371362-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '16'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9993'
x-ratelimit-remaining-tokens:
- '9999837'
x-ratelimit-reset-requests:
- 36ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_91c5a94c9be7fa8fbe52366d437b7ccd
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [9642]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC/wEAAP//7NrNkqIwEAfwd9krWwWCknB0AJHvj0GEvY0K
iggokgRTNe++FcYn2Kq95QG4dFHd6X//Yhkt2b7D7hcu0Fb3OFxVMjYmqbAhfL0yJ/qY71/C4Ddw
iZCCRlmvjKEtsgJ6H8EQiupQDMTt86/3/REsDONn/rF+ixLWb56s3wBRKIPIl2BHMZVfvtb5RAzF
ixGDKUk+a00qFj2Osoqk0614fAJ1E+TYgNMZ9PTwKKGd9Xq4Z/cbJEgbHd7a+Ix98dakY0BJA3en
k4mAMJSAWm5nqXO+0UhQSknydSngbXQNvJkq80DuySDAP2tnjU27Vg3C5oHG9jccxu0Exi5zd6C3
Sjrnw3R8Akzm/CKcfIW88+pC0a/YH93tFRf+4Z2fhp/LJcX9Kvfn/QsJLQ3BE2sygdK+xQhs7yEl
Tn93IelFbX7vHqbjw4HwGDgC1qmbe72CRV3dLdcKtrL67pGXYveA7WeRF4zyYZL1wta6cxWh3FU6
j5a4PoL5PbiXYWIobJ6tErG1IktUJDAk17IGEc7y+X8daLOubLWLzB3S2rIcnhA1NZzr9YrKjpLL
vnehY45dtI3zdboYQve4mvdB80vb0ykRnRbosiBgj5aUzvdBIGfZE5ud5Q8Tqx8E8f6CnUfie4rn
7FoYCOkmXHpaeSCFf3nAh79J2f38BEahUG2YVpGO1PpYXVl9EbzpbYK3vdxcp+HVvPNDl+WlP9//
mlXA9+9/EAULLgq4KOCigIsCLgq4KOCigIsCLgq4KOCigIsCLgq4KOCigIuC/ywK/gIAAP//7NxB
CoAgFIThvaeQtxfEpXeJEHy0KFPyBW26e5gR3aDNbGczF/j4IQogCiAKIAogCiAKIAogCiAKIAog
CiAKIAogCiAK/hYFSuvhbhakHHlpMED4EPNSARNiMNa6HjbYa5iY/CMQqGw5FRklz7xW8tp1akCS
JSyfWbWjU10AAAD//wMAimBSZTBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c08c68254e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=2fVOE5YMZtczL5SufxrPA1d6GEdo1LWmgfzIipPl.1g-1712872371-1.0.1.1-KGXYZS0rOEcsMBKpoVwamhRa.2117ncdV9N2mUOwfAucYYbiB74xQWsxN97CMz.YFBjGmoL_CMFYB6Gm4_Qrgg;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=rv8w90yMYXGI.ly_TuaNH01WH5YU_8oFbQ81fuwOfFk-1712872371365-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '23'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9996'
x-ratelimit-remaining-tokens:
- '9999912'
x-ratelimit-reset-requests:
- 18ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_be77e3e33ef16e8dd0cac4a4bfc6f601
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [9642]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZSxOqPLet+/tXrHq7nCoRgYSvx03kmiCI4mmBIgIickmA7D9/StdbZ9fuWKVG
wTjnHM8Y+e//+vPnny6vi9v0z3/+/POqxumf//N97Z5N2T//+fN//+vPnz9//vv3+L9WFm1e3O/V
u/wt/71Zve/F8s9//vD//5X/WfSfP/9II5TILX+mwzyJsQ23Yq3SoPF3wzKJ1waGjuRS99Uvw3or
mQmvwban5l1/xbMmlKJCnURDi3Pbg9lgKgdPlCpo1X1U81f/IQK4RitGm4h5i/xYdSWMkIm1JHh4
S7nNOBinyYNae11h5J2os3LB1kSt3bONmSrJqfzh0g7rQup485mvz/B1cB9EesnneqnON15O7psN
4ZxrC8ZT/KmAKZ9faNWXms2N+mrgRkU+mj3uCNazJbnQjIwRCanC2LI/ORHwNveR7stgMJhA5RMU
X22O8Zr0Bj1mESfPdFtSdWxHY0wLuQQ2vxpo9zTNeJ60soKrhEasPqQ3m+cQj4DToIvveGLefLH4
CHLbl0Kk81kwCDdoN+h2SkDkpNIZX7R1Ax2TvgncH3RG33PcwXlq30QRD8GwXIs3guBaKFRDr0/e
DXv7BiIrR0jJpn3MHLluQUEEg0gHcDXWNOlNeCfhiP3UIt6MvLkQnYMOCVjlVz3rDQrB63pNsXEs
e4PYA+9DraIp1ZSujOfFXyogn7sJLUNef/djPUH2QCFh+8spnw/clYPBy/5QD2/u8SBQ+QypTAHW
q3vsLXrjEzi9sxkHe/Ncs+f1LSuBdsvIcrhc8nnjpz6oBOij5Hy+5YvnPAvwOtgPfO+PLza/E0+E
r/umJfPkPeOlOB1tKAbxk+qbHfCIPlkEfv8PtDuWBttZ4tmG0NVP2HqnYT0f64aDenB0sSNtx2Fx
ZTLCzzbyqbnR9EHYu2sKBt5wcKCjpzdnjkag7WmMDGdzYCR5TSUsP/qKjvlOryfuqYyyELICSWv3
HFi5tUw4FJZKpLhUa7a9QBMoA3xho1qNfHbkVQWFzT/wni1D3u01ift7PeQNE5gtV6kg51ktRsZJ
jNdml5hAl8IYcTa/DJPIiTZ8PwwR+5DY3i7+eCK8+CxFYkVDxsrDQwf7yd5idBcHb+104sKjEi7U
Hnq+Xg9bp4RFkE1Yv3mjN+tT3IDXk0Q4GLJPvkTcjocvpOtUL2kWz6kcjjCsvA77vmR4y6tn7kYV
kUZx+ahqpr4/FViVVcY+0q16e6zrFHBP08MW0aZ82Xk5AVs1rKmTTUNN6MFRJbW5n6nqhpI3tOk9
A+nJ2VG8Jq63bGYxk6M4PKOTu7W95cXbN/jWEkTt7dPI50nMbZjlkYNWzfCMmVOSAsKPnaBXsiJv
aXbnHq4xtjFSmiweNaGUobBPbZxWNASkH5UIsFUscUpEq56//QoPA0QIts8eTMctDMF1G74onq6b
nNKr1UD/srZYax9pPD8y1VSu9Kiirb/rc9qoFxtK+vVAndXwY5YJ/Ql864vk8WrWO+65NjL3tDyq
8sqzXoP+SYCNuzt17yfD23JKLsPXXWmxc206g6nSmikmJzIaXB5xPA/IV4FwBTL69XsvjjH3d74G
gv7wOjxnPsz3rMTWQ6+8ueQ+AoyPZYrRha+HtdNbF+JaaqmnJNUwQVfz4XCSbIrRkcvXb73LwVxv
SAJqb2DwlVVwp522+LQfzXpNNG+FR95OkLQaY7zW5FqBI1Yptep2jnseyRbIOC4leTAJ8XgYNRNe
7/oBSQZUDeFTLyO8BrueetzAsTkuAxVKAS9Si7R6TR+HboU97HuivJLcY8C0OPBb7/tSbczVYxhh
BgeDlGPrewze9gj2rEQI0AOX04ZYCI7axFPbU7iY0Stq5I1z6ugV1K98FvTjCK/6VUKror0ANde1
AnmYxJMolbeYZDbPw55VCLs3GrCXEzipfDqdu9/35eRz7AQwNzlF/KXyPdrk2zPM5MylnlTrucA9
eRe4S3ig6BknYNGFewu0y0lHbJjexhR9whlKz6eNZueQDH/7rSGyRoPnWfe+9WRB/OwnjE9nn81X
2eJgsLtWVA98FRBOyc7gqcUh6vneAGM21cWvHn/vs93TVAVlIYeKwNjrjUU4PXVg1EuPrfvpE8/y
R5VhZzoF1cK9PoyZ44xw4oIddtXiE6/H7TrKn5oX8OWwbxl9PasRfqwNwTqlYzzcPoopD4Wp4r37
qo11nUQEttd0g57tQ8wJWDYtOIaFhbFUbY2xaIcGfvcDJx6x6uVyNVpYPuQEH+goAbpRsCo7aRH9
9Coe23QQ4PNTT3hfrmXMsFfpUAiXgtr5swBjwFYbtqhSfv8fWy/KMMvz4W1gPHF8PXVzsYJsCSLC
D70M1sYhKsj3S4mmJPDzNV3UUOHgo8S26KrGzg5vBQibQiG/+mFYP43gJuxWtGe+YKxDeZ3B937Q
Dl45g8jn2oYKhwtsCv3kTZN29wGl5xd2DpeezZxyL0D3VFvsLYtuMHB+8PDZKA+0euOhnrRcc0H3
ankk26EzMNd4I7izUYr1LX7VJCodGYpWkSGOOfeYsM3uBK3D2OGDQ4x6pTw5QeWdXrDO93O+rlGk
w1JdE+yUgQkmvj/ykLbIwf69PXjsRJboN//InA6CNx2qFQHVsG4Yo2ORTzIIEYxXKaLqQzowKtD1
DKwh21Gkz723LnI+wvWWqRgv8wmsyU7Ifv1M/XdP4pHl1Q30kSViM54HNjbq1EBp5CTq7xACPz6A
qu+L+GC/SmMuRFhA8+k7GOneiU2fmoUg6VqEv/vt0dezH8EhDWpsdyzNl/lhC/DIuwm1N69+WJlj
Eoj1asQ/vmRMWBEYEKiRYgWZMdw+W1OuXMWgxZZpBkvi2ITN6cioq+mXgQiS5su/eSwzkoJ1SSSo
PCbjg53L41rP8scWYaAVGRHxvB/WwtMj2F4+GZEaX2HjveQQvIVijziYC4w0LxNBsY4tbKr+MNBg
Dzs4N1eKSMsI+Omv7Hhvl+73+95j73dqwpo79diTxxdbz3znQ84MDKxzZFfPH7tMoYgbAds8JB67
l3QFgsOdsU9vpJ722pzCKtub1IK3Pl6Fe63Kt9spRzwRfTYXp2gEQ3iScSCOOmNqcfJhoXGQqjvc
1XMmiOvvOdmwYqopm0ZRrm/gRPj3oLFZtxcBHgt7RuCzeYKxvHJn+f0+rdS2KwaYOGYZVMqSp6jF
ar1MblUpq+SPiJ3AibEALjp8FvYF+ztmetvBVFx4eszXLx/ow85y3yborBvDvhW2+TzFUgaf4uhi
T7xeGBN0L4Tba7bBwVnmvJE5Bx5KaTZg/Sne2TqYrFWIVS1E3qLVI6lsCJBbNIpVP3nXbNdOOjgW
7oxkI9nXMzDvKTQjbcSHZeniRZ0l7lfPdP/V63F/cnk4GPcVKf3xBZaSO5fgrV0Qdh4C9chsbSKw
D4+EtMtN/PZ74gMDmjXazZ3JmGOIHIxHaUI1XEpGsqk8A5WCA3qFH7cmv37dlYXwtz9YeZU64JsG
QOxmP9j046GfvljvdK5ZGY5n0N+rD9mAbe2xnYd4kF+QQDVMRsBqtRQhEI4fqttbL17hbuTkURWP
Xx5Xc54Okwyo4ExU/aSy8f28ANH209Af3zKeRSHYcLcALRaKwYpPs/yXLwpH2Ro0i9R/57cx2Ee2
3C/0/OMNIsNb4zEmyEjaqyeX8J+UgeVQyT5oOrXA+iW0jR2bGlGZWeN/+UP35sNV9GF46I70bEiZ
8fUDEL5OWxeJWtKBEb46DsryGSIg1Xq8a/LtCY5r7JFJR5ox3y5yD3FXhVhNwiEn12LqoR8Cg0i7
mfOWG9tX8ID3Fj0UmwSw3elmg2//UMQRE8xinZfwWy/Y9K7nmG3wg4Oehges0nuXrzvd5KGtgQtq
N3FbTxG3ESB/OmaIc5TEWxoSW3Bn+yk2atCyybg8Eaj79waBTzbWy0V5naHPvyPsVQsGs3u2KqjY
0Rkj5txzBhZBl5vFBVRXXidv/erjXz40jqXrseRVc1ApKx7v8+hgTI48F4qk7te/PDgtCSihlugT
md9DZyww9mcozi4izEQhYCWXZ/Kvv4JZroaFqe4Ia2m9ITF/FoyeMXAhIsINVbylG2y6iRGM5/5K
9fDtDazQWAaOjSKSVZ8h6M2y9sHHiQ26TwKRjXm0j6AXZja17JACxiJrlP1QMsjKkUvN9HonwG8/
YANFovdp8Yn78QI1zfpptOL2E4Lv7yVw82rylW02J5i7CaGHYVLZejUON5gW6x7bSlwPLKzLCJwi
LaaWIcnGWp3jCErP2iabq7BnM8JWD4Asxtj91uP66qUWeifujJYn6L/6JiPox36KneoxsKUMJQS/
fpc6a6cN/GeT6z//QNivX0Fw50GJig1Vf/roEQP9/D625c+DjT//rF5NlcxfXp/pMIkwuSsbqtVm
UM9NxN3gbz5r7y4HTLg/b/DR3XPquTsVfP2WDVW4YjSI3OTNZe31EApblR6scqmHjTTzkPbzhT7u
24ItzlLbcDX3B2xfqhdgPBK/eYE/4u+8HbbB8w5hg1cVW5S23u9+wO59viJ+cnlj7jyhgUHWCl8e
rWLiLJ8WaOIIsXYpJ4/ySCzhzuhdxKv+UFPZDIlScMsba+XaDp9EC84/Hse5misG1Y+Ek1j/blEb
Sf7X70sVFO0ppki44JzB2Onhl2+p/hr23hiXexVWSSlg/S6ZQNAbw/3p5U8/wVqOpgWd9BahLaiH
etnMcwqxyWvY1osxp2LI+/D+skOyVndmMKnem1BWPw619FkYCHgQAkmPVfz1r8N6u0wr7C6yQ96r
PLCZOZgHvWjd8D5z6nrNbCjI28uhR0LYcvmSLo4gZeoHYOMEeLa2OM6k+213pobJWo8ex4H/e/9e
scHDfHqtGbzWO4z9h0gMsvhNL8mwOwazvC+80bpbKbwem+/nUcgGccxSOYaWi7Z59DZYkAaltL3s
e/SYro/8L5/d1rhB/Xmw8jVxDyeYKqJF47kbjCl5va2fn0DzYEVg5qyKh4lTBIi7i54xZZt1hmaO
GmxP1DCY+9hn8HBJVfrlK4+pvnGGj1oK0Apv8/DlXx04ekEQx+/tfNWzrQCWiAQUz0btrc7Zy+RP
sDtiW/X38cTDnsgdfmFMNlFsLE0EZfCAvoVv4WkCdLrNEXBU74o1thyH5efv//K5OrOa/vT919/+
2yqNH39DT0hCajHy8MinXghUh7NEg70pDGR6bTO4oWefal//MN72zxWOIH/j4DPOX7/gNeB4bSps
Dr3MFktreyiazp66xd0a1k2/GeUhPMvfed+weWr1EGrDlkfl9okZc5dzCKVHiKge6kL+GT+FDL75
FVGC3Pe+/dLK7inbUG+VX8M6IFWGTSresR7q53geLtczlEXToF+9iFlyO3fwWm8xEiN9BOx0K1Pl
10/L2hFjGkzFln7rvbccx+smJQIognTCPhHVXDCgk8J99DaoZ5VDTKm1HeEo2YTqKGnyUTXMQkk1
kcP+RsXG+qv3N/ETbEWni7HySDah9mkBAVbp5Stn3SHYRs8b2rm7g7dk+VFXfvthmrXmbYM0S2Gq
yBb1szTxZnvrd5KjOtdv3oe87qzMvVJvfUTdTXSN6a2UKjlSBPvrj0pjlsF1BTSQMP76dTA4S6dC
SeI1bGgq93e+yqgPEX7o5csbX1bEQQNaNbW65ymnupAVstNPHrVV/xUvnxqEILm4ItYOcKzXABul
dNFSg5z8RIvZNx+BEfkM//qPQzX3kMXzA+PW5+PpzH9W+P70B8I7w32g6jxZ4McXoXeFNcNer4M7
iUaM9XLvscWvLXjeZzK64DzJWcnFKfjyMzbR246/PNbDb/0TWYA9oHZ4u4E15DDi6hcE82hyI9QF
yCHlKYc5P14OLWwrHHz9iDswuZNvoMBEp/t7Q2JWeDYB3/lG45u9AfO7uCL4XPg31iz28ZZvvgPM
3G9wOuwf+TdfjWBUBzK1+N71wKRFpiKgqkGbZ2d5a5CmPFR8vqHOLHds5vdSD2WHvQjMn2JN9KMc
QmKVC/VHzfS2G+tRgWSyb3hfm+94nLSugqKndtjZmxBMiBEerJk1YHfL3jk7XD8z/M4rit6p6c0W
vUR/867v9xnbrghEcNCyG5HeRuYtahKvUOIyjPrmiYzFijclGJURIoAir6bvd2r98kgC/GZla9WN
ruzqmw9iVUC8EcahBRafdGRplk3Nrkkxg1vxOpEfn82b5y6Ca1LdkfJKgLd0RSADxSA2WXWf1Gzn
WQK4A+VCPbB14rkcQwibuggJ+OrV+DnWHEzXW4K4XbmCRmhBAdumqijONk7OMhtEYLmEOb276jcv
bitVcnZNTE8braqXHz8w7nTEdmqdYlYeLjp8D0BF2zLwvDHoRwFI0JKp4atHg0/ixyyuJWDkqwcG
0abw9peX3C075IRTopN8+PQS1Y6oNWb0fFTw9FivNMimfT5b7rYCiRBJ2MKFNpAIlBaM5r2OtTO4
sDm5vSy5wKNOLeqGgL3wlYfbqL5hx0LvvC/a8qZs80FEciQ+jCVzbFv+8h5VG94D873kfJiLnY+z
ZwyNdXe6jzAqyIGIzrDLpwMXpvBu6fg7v+N6dziEMpy2h5x69FDEawDXFoqe3tH9RtY8GZJPBr/n
P9hLp66e5dUv4FPItth9WUb+N6+VU04n26/+zkN5kmFIZUIdztc9vjy8CujM44wt3e9y4u32CCqV
4FA8jFPOGoJ8+HlEb7TlkytYVifVIaqzgbpEexvNnj518PWX9PLqonrVNs/T3zzmALZ0oHT4INju
sydWVX8Ba632hTzGjotta6sBwXmkRG7DZYcRdZucYeloKknXIOo7Vs4YDQ1d2Ycxwd98evieJ2Xw
4i/pXx4kUSDpsHI3BnW0fQ3oUgAOhupKqCWkFZudcxrJcA1XrIowq9kk+iNM8btDAucaBrFaJIDv
9al5xZzxvb8bOLiliFXnsB3GHd3p8ATfe1T7zSafrTYn/+qhWXb5yqViAzgyb+nxqrbxir2hhV+/
S9GFN+qlMj0OIBmvZAOVB6O3cqngUSQ13fPJlX3nawsH8XWhOJ2qfB2DnSvzkfqk7pGCejyMjvlX
/y6+6gIGVXGF4fS+kvg2JQbZJGEH3UO9w06Qr988vZMBMO5X6gu9Ouzm4XGDC9lX1Oyfu3ymPPNh
3b822PSTZ0wF3YugFt4eCDrD0Vs/2ZjBaa4D7EUfzZsjU2zgd359+Xoa1pNo29CfZhe70Wlmg5g5
Jby1IsJuqgT1/NaBCr95D1GsleZfPugg/LgJ1ey4ilfjM1cQKmNN9dN1YGtUarIitEzHxvkz1WNz
M2YRNs2Bpi9FB+POu4dyRT4RDpIlGwb+qaVyydrfPGy80XnEvGxIDCFQBc+aFKJ0hvHWf//1O6sg
OQiiNyehDs+vej2OUwZvknOgmvqsvOWnX5YfCkT8+p3v+YULFOkTIukhUGPhUxvCdU0crP3Ov7jB
b6BIyI5Mgv4whjZNUuhpwYA28pAOs9tdsn/PH8HWMP7q33fekuN33ozfeQM2XBFgn4dvRpmw+srb
nzdo8zRCsByPUaXw6bajOHnM8fJK+wjI++BCDbiUoGN5X0A76XR0/p7fEI7f6/DVhiX1N68mngI2
N/B0v5sEcEMBmOW+LfmXbzQ85OP5mD1T+Jpcg+6Xh5nPn+PAwavqqNS0K9mYv3qgfP0bRWG7gOmd
uCfQWQX75cNsGgGdf/kFWvzd/G9ene70mvqTe6hp6uf/5qcoEkVGO+ni//wX4VqGwEgVYYb8uaUE
HD6IzU73ceHcbZQf7+bLrXcgvAUOR3XmXrxuRze6fBLVHbWS6uPN687uwNefYS+YhHwR9NRW3uUD
k4u7e3usoNUN/HjwLMCjsfvqmXTctBa2NjseDMe6qACmyeVXrwNr1Ictv7F5IkpbFMMISVPB336t
uHiz+XnuXOiY0xsfwosabwfk3qSfHzQz5cyW48ZpgS5wHPVYwdjvfBAISTLS/wcAAP//cs1z9y2q
AIWfhXlgeEaZV2GQr4+xj1doroWfdoibv4mPZWpSeaRvRqFFoa9bCGj+PMW8RDvSzNMiJC3AudQs
MzktCxS+pRY5zrlBZR75RtlZFUWV2dDxQ2/QeClEvxJ4VUCtDhkrCgxHVxSMrigYXVEwuqJgdEXB
6IqC0RUFoysKRlcUjK4oGF1RMLqiYHRFweiKgtEVBRSsKAAAAAD//+zcQQqAIBSE4b2nkLcXxKV3
iRB8tChT8gVtunuYEV0hmO1s5gIfP0QBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUTBH0SB
0nq4mwUpR14aDBA+xLxUwIQYjLWuhw32GiYm/wgEKltORUbJM6+VvHadGpBkCctnVu3oVBcAAAD/
/wMAimBSZTBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c09c1415a4-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=.BDNmN1yTewmv2k5bcIIWnuaGOLSo_VQuMVdbkfADXQ-1712872371-1.0.1.1-u6aSK1zNI7dIiBslNgn6x3dcZ5NnrknbDjvI3zbb9o.HZbNSjpgDnOtuO6L0w2LHXNYz42N.frX6vMKRqRmpVA;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=T4sDDTq3o1.X3WdyXU3mpzCYUwWgpTvhpokK_lU3iRk-1712872371367-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '19'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9992'
x-ratelimit-remaining-tokens:
- '9999517'
x-ratelimit-reset-requests:
- 44ms
x-ratelimit-reset-tokens:
- 2ms
x-request-id:
- req_ce4e43fe6b01063fcfa60447897ba9ca
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [2822]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC6H8AAAD//5x8SZOCwLbm/v6KG3dLvwCZMnk7JpnNVEDE
iI4OcEBRRIZMICPef3+BdbujF73qZVWFUiTnfNM5KhJ59Tvr/CIAmvLdI+UuUmsWcg/CZTn62PjN
v7g+ekGZEImMonm3+iY/5jA0dj3i1T7vp6A9Ff+eP4KNZf3x34q35LDizbDiDeC52w5HAvwwysQl
0j7RxCP+Ye3BfDjET03INy3Fx/uUzO+8i4G63Z2oBecKtKzsbtA7tibK1vkN4YStCd/NvqIR/34l
445NL5herzYBXH8DzAk+jvrLN14CFJLpUDxy+B4Di27nu11O30PPwbPu69T2nqo1rXygrf6Non0z
g/FzDFLQOjf2y4fZOAA6/fILNEfS9O+8OpfMmkZj4NY0j8p/56colmVGW+UU/fwX4RqGwEA1cYJC
1lAC3C9ik99+Azi1vPbTu+V86XwILzufoyYLTmErUd5UU1mXqHN8fsNpkbwWrP4Mh7tRLGfRzD3t
U90xOQXSJ2Q3+ryAnx7MRHiwpJXPlAPfONjhJQH0h/r2BJgeT7967dlLv3vqB9sp0ZrbrR8geT3h
77wWfPuw6ZG1AfTt8YPd/UlPNj0KLsrPD9qFlrH5wPsNMEWOoyG7MfabDwLxeByo/XGifl7PD4J9
9qB+d4hCKfTTBu64ZIvkULuVUx49OthF22Sdn1/ByOWqB5M7Non6vNzr9XwJfJvNgbqt+Krnfnn9
Oz8M1rz07/X/+m0F/Nf/+P/YKNj8vzcKaiX4ICX3k5KcA0WH9bgAugUUh0tlDBdoaN+SWj1uwxnq
QQXGg1xS/FKe1vQahQnG73dBluvJrdtIqWK4jdIbdifBq0W89V+g5swb9rqKA7MVdw2UNQSxa5/j
sGH3VwN5lQVEss+xNadw8GCCm556J//L5tCCBXxLroFD9ewBJn8OjaZK4EI2d9UMJz5AhapsU0Ld
dsJsib5bGSbppcaRdais+SR7AxyAb2HrvesZHYoogMk5K8jmFm+tCQtSoBZqvqXl8FWTZfsuBCik
aYX3X7VPlo0QIKAE6E3N+Qksqo9TBMfz8iUyTJuSBU1rgsN5shA8yzSZSjIGQAYvHiMte4KhhdMC
wBifKRJFVHaVZ++BmekTEV3LtDbax88hSkVKCE5ouPQgzkH4mmPq7h9Wv7mqmwa+7GdCXRqF5fxt
Ow88Lq8PRel7C5aTWKZwA+onkgxNLLvOLWX4zu8ldg9HMyTcXS0AO+4Ewmp+ZMvG8GU4RImK+Gpr
9YSL1QmW3LajhqOJ1jQJXQHWvxPKVwdrCrePCfD6rFDXk4JweX6/T6iE4nN9Xks4aoa3Zksgwdt7
9QTTYlaxVpjmjL0bs8qJnF+c9snOGxyJLui/x7q35chXB1LbagPm1rMcdVYCFTuFLoExTqkNH/Oq
6Ll3Z81BdmzhOFUGPeFkWy+LPHbg8bjdkTxAtV6u508Kg+1g4Ptl45eEV5ROXesHo/T9Zu22pTJs
MxVQe/fye4l7vQRY1PIBX29y3rNY3qtQGYlJpqB8MbIsOwGy/EoIV5Z6TQ5LFcOX4+3xzjBwOS+6
KsOkgCek3Ww1bFuqeABIpketsbfYlxWLB8EmD5Ek7V/W56lvYtApHMXhoe3BJMnXGyzCJ8ShpI3W
bCuPG6zjco8kJdZ7hoDoQZbfCUbJyQXil8Ecmra/x/YB1dZEXYuAauLPGJnzrZybcddAJBJM3d6L
67mpthD2e+JiXJzHnokDQMC3jiU2tW7oaWl1OpyfvUMG0zKTKmLeBZpKtUOfh3gJF3SwTP6BG44o
eR3X07lwn/ANOBkB/uPU0lDVEArD7UCktR4EVxwJ0FH2wUHUPuv5ahxvkOe/Bg3lOQSLLUEHFvnz
TeyN24LBwY0Anas1INDtbv1s8qENlQSm2NtZKaD4m8pANK0TemwKPxykOF3gTdwrVM+e93r44MKB
4t3kcYSCq8W8wWhhsCUGvhdsANOmlkT1KFUnfIvckU1Xv2vhJXcgUnV1X84+zQa4u99norGbGU5D
TiYQUd3Bp44k5TI8z+lff6jtFde95CwVqPZUpM7lwvqPt3wv4JzObzIy22Ti66THkPOFMzVa+8wW
V5FvalxKPQFK2IfL6clUCFU6Elm5hckcSaoKc/NWoMmc+4TRYzWoTB4PdH2eydyoWwSD3QvTwHnu
yg4e4Au2VdvhfFMs/fezGwhMXoOD9nchs4gEWgSPfOdRp0SbnsqVsoCDrh+pz9mbcuaQnsLMVC3S
S/UOLLVVCWAbCwk+ptEULkgKCRSGy4Ei1WnrKei7G+wTziC8vD+FyxwiCE4y6RHrNh6bGTm/4HWb
ehSfeZTQi9RW4DyAjEYcv7DJ58IBepP7oFZjdAk9PZwndMN1XHSdHbC0b65RxbwOqfU675Jp8T1b
tUIvINMrJuUESF6ovr/BOBB4PWFPXYuBm5ofjNpMtSY9LhvYal1Mjbhvy+VR1gM8y4mJZlwS66+e
wEFeHc9NqIfJ82VV3EwnjA8PsaQ5GvcqfzqM1HmTIJwfdm6ChxFBNLn815oipt/AUfFSmtJNHE7J
Vb0BSzdNGqmgsOiYPiuVChBT38zMeha8vQx39+tMZL769NMQfgIo4ed2Pf9jyQpc36DfOQu2HnWc
LOCUIXirdR57x8cA6NM9OhBxok7AXieMap1zg7xtMOpthJmNkbSo2u1xNslkniKLbrvtHj7sm0ge
bOkZs/0GqWu/E/7wEBOy+Ziv3+uR9OyK/quplQBP1UvF95N2YOv55jB+fwocXdI+JLYNJuh0xpFo
xmbHZrkjLWi2wweH5/idjD69EWBf70eMHEPuh5Q7V9BT3gN2nc2u7s6aksGHKSoIXDw9mbzutVc/
8uFI/WoXWfP3aBINTFWK83PolGxbv29gv50rGj0zN5kuX6uCotWnRLnx6/OT+xh83dzAV9cK62nI
mwUcx6mlBt6sDrVVMyB0Qkqmh7j0YwVnqBmS//nhKxs+x1kF087WaVCHMpg+z7mCHWiG9fm4NVPS
KYPfhd+QytAGRpZo64GMu4vY+A4ZWLL2WAHUZzXGO4tLZjUKOYDlWcIWbJW6LQx0AY8mzWi0Sa7h
oOcgB8Ux7ql1sq5s/qRRBZpXtsMhU4SyOZZHFdbxeU/EfmqS8QbYBKNjnKNnfGWMJdv8Be3BAdiq
36hfnK0CoT1xDJ1E0Qbv3U0RQQFNE+Wn+mut+mr/w+u/+puO/bRXm04w8OUWo3r44XXSZBZhXHRl
c+lINjgfHj01NNvshdK0W80YzzL1LoZRLuJGLzRXqmzqorsbfml85QByJQf73VgllN+G8Idf1Cxp
kLzV99aEalryRD2IVTJHc+LA0xEjpL0CnLBqODUw2nkN4lL+bZF2ERE05+KCuDRs6vFO1VTdB9/T
Wh9DyF7HWIUyJyp4rYdyLh3eAfoAd3i38vPy4fgW2EI00nJKEvCd3loOKMtiRI7nVzldsy6GreC5
NFTC0Bputq9rY6dH+LTr23qKhqpV6UyORNQ/Tv2dncsTvEw1o1GnV2yGOzOADyo+6PbhVdaSmakA
WsR8jKC3Dalgcx5o26lEc2srgFXKCUGnSj4UJ/czmErwWsD9IHtIe6RCMn0uZwJ8X8J/9TIKpZz+
zg97cT8D5jseBKYYJ9jecR5jlXOT4fuJAzJzsZ5stI+RQz27nOn2emvqZWgVD4x95+OfPutyXxWA
YyUc+aCtyyTutjOhG+cdRbQRLLb3BV192BcRSdY7SUZWLAEAku4h5XNtwmHyDBla2m4hyu1NwBy7
nxuUfP2APTvU+gEXvPqHR9L2Qxnp0uQJA10MEF31lZhfi/jvZ35nceV8v8Qy6DihwftlefYzkmdH
6+oLwC5SmoSVWuBB/K41/OOzRSx9D2JozDhoDMaGa/aMNQReDsby/mQNx16OIdjvZyRq/omR2mpF
KMyZjzbCfWN1tebBPz2xm8WttYFOl8FLFREkBAT1zJ3rPaR3UcD6dTkDlqn4pXb+/Yu3wv0Yzprg
6+CIfZO0IjqE7Ny+njCcPI58kwNgM7rLJlC5co82+b4FY1d7T9h7zRFNhk+Sfl+DPaQ9qrCdibrF
iizcgyNnhuQjXt414XeHAG5PQ0ATXr8kcwf9Bvz4YSK5VbLy5F+gFU0uPe3Bc9XvSQrk+o6o+66O
PY1fyg1QZ+tQ81L7oeBzIYHNwUlQc4yUhPVrojFwuUZd1nQ9sylRgTLYW7pdLKVkV7tCGjh6LU03
wC3X/ulAuilTJJ/DJiEiaRsQwLNGA92ZAfViMweWOHKIwyMpJz+0CmhUPSOclpls01Q7CO+62WLL
zcyS6Z88B/H2JFE8RVxJf3rVPe0f1LN5aA2mm2eQNtkXB0HTWAuvzC386SXFeNphF4m7JxS7osdO
mhdgKfrcgUtoFhT/9FuyVwXw5Ud19TdvQN2tuYDy8xGp6b9hT+mzTWFioC/GHr6zYe03eEk3Jl71
Rjk9+bsA+Idxod7yOvXT6kc1Wxtkwj+tT9jvM9CpygkhogpvI5yCxdyDbKYz4s8hBWQrL3tQKW7w
Vy/sfr9zIGytCOsfeyqH+AL3IDkNVxrDCwLT6Gyf8KSmLXVDPLCh1m8OJMv7SyQa9eV8p0umnTz8
Jlda+AlVUjmFq55BSqoo1gJeVwFuLy+DehnteuYc7AgmptDQLcd/yznqwB5ezoOCzevpU5MR5o0q
dnlP5Lif2fL0gxvUySug93ye+r74dkT1w3uEuNVvzjD5QqBNdk/NyJCT1832TXhwKn89Px6wIXll
UHKLlIYatthimHwBw2/v48DxkNUeWm0Ae98qaNBslnLevc8dXPsJTV0a1GQ0p4tW7DyF7r75LpyO
t6kDq/7F+D1WPRvH+gXqVzyR+y8veKNShUlt+9iYs30yRUq7B63VG39+efWbCyy7zURNaNJ+CS9O
DPzkxWFbCY16cc7nAt5269LmliAgcBkWgXBwGiKY3YaNsfu5/OqLTN71kYxXdHV+9//DA7YErUvA
/e2ZhEMKZT+9AqsP7qh5wJk1RbruQefMlhV/JDCa71AAaVv2SKPFt2SahGxo78c92oAjBotkeg5c
/R/a5J1nMVKaqRo99wY9DHC2JiUCHljxBVs0qdiHgEfx4ysckO3Qs2IyK3iZLgeicLe0blNpHoAK
/AybkZEnDHmaAFzFM2g6XnG52PMlVn2bz7F/P+/KNV+YoHC/tOh7HKWe1IHXQRS6Ova/2zohn5zj
YNlJEw5qXkrm3WFBgGykmqh3wNX9LpVieIwLjdRrPvK9X2JVe9TOhcwGlcuxJeEEo6QtcQjLdzld
LpYKwktfkTF8WP18vIYxYE98JQ9gNtZoxd0L9tu0RH1qf3q26m04P78OEqakSyYHEwHGIw2JGjRN
SNmVR/DDzjccHtDQs711QfCS7LZ49S8Js2FTAWmneng7lJdkMhvlz+8QwH+afl7SOINC8ZpxxJ8X
a44JSeHx2EnUvSA+fJFkG0D23F2RzHIxZN8bbYD4Xjd+3O4esnieHXUbDA9qpKMNaNwf9vAUXu/U
0F2QEEEPB+i6Jkd++D+De52D+hJwf/X+hc4z/fEPDaxhKamC8grKufukwVLbPfv2HwSAP0EkkWUI
p1U/AxXOyZ+fpy7XqT9/hfWHuNTsUd0CQF/D8MfvMytUT42lm4oAbnDJ4tu5BaqkXMjC1Csj4rrR
Kj5jlepd2vXL/EYRWPMcjPBIkqUC9gsSeiSY8uo+Yfw83ODqX/B97c/lE/IpYEcsYLPaPft24D4X
0J1pRDZZ29dLl5ZPkHw4nXpxf2BTgZANm/vlSC/a6cgo30cdbIrmRs0n/+ong1djwNDI0y0XBoyN
bqeC+3nJsNNPTsKudotgLVXdXz4olZoZwFtmZNRW+MoaSX/SoUvzmOKfX3G1bQVLzu3IIaMPi0LL
fcE85xC2p/Hb09dGgcC3tZzi57UpJ/CRG/gejzE15M4qZ3vncbD5NC4adhlXE9ZEBWzM20hk3vGS
zVXVXlDk909stLJcDmD9BAtpNyqB1kEPhbV/Rcwfeho9p6ZmjyR7QpWEIxKN8h3OPszSH79RvE/y
ngZNq8OfX9wtndUzXzFizW+En95+lN1Pbz+neqLuJLT1JN/TXFNf3yfa3Ksna9P5IINzaYmkE3i9
ZEk4FXD1O9QrznYo7BF3gRtSK0T94EP/l9fVb3XccdB7hwtr7EKjBadRfdL1etQEw4TgaooIHoWs
npL9IoDf9XjUl+WsjY9IW/MQNBs0TyalKhoQOuGAd480TUhXsAwOMaNEEOmbdRnZpFCxhpk67tVI
xtlJK/i4oRcO5mPDRnI4LdAoQo5I5iwwYl64FvCfek+mVR8uu6lW4QY8nmQRs3NP7AMoYPK4RFR/
mEI4dub3Aq7WENJ4fpZWx+/OAVTTM4+3ST6Xy0HRnd/5ot7LNuV3xTNobtmODHItlcP1ajqwPiUu
YeH06Kku7HUYnsuGCMZFDqcLrVNt5UeyhBpfMs7eLpALbxnVH2JcTwdzY8LczQ5IQHlrrXo1UI5K
kFJ7X7z65ZscMzCHCsEeTa/lpCvFDXJedMFX5zuULFPdBp6SzwG12TEslS81oNZqbYzRyJ3CqUCO
DTrwGug2jSZrya/xHsxvoBO1KRNr/uWHigP7P//IfnlA+TpC+sPzeTtWJlz7FXFzu6/n082/gVU/
YS+5TyUb066Ct/R8pqYqHPrJ1KcnyA+2TsMDiurJZJMIDufFIko3VuV8tCJTNWowYte7Bmw52+cI
tPrZxZ5y60viOlMAZxy21O9GPZH21/ICbrXJ4+1inUuWbPcNTAg6oOd1dpj0vsgy5D+PPXnIe9ua
tx8daT9+YBeaAEYv+hN0FTawHaQ06ZN4HwG7u3GENeW3JjaXyBAkz56A095jQ+tPNzjJvYB8bS+C
peU18ZfPEUmWjXL1my+AHkJOMwdbtRApbays9fjTt2wZjocBtNA84lCeezZ63l0E462rCKdWH/BN
wMgpiztVv/6tNxzysj+8ECz7aM3CM/eghQqbbEqzA238BbIazgugppidayZI3A3G26NEA2l+lpMU
Xybw8osA75pnHH42y0UA8d19I/51f4JhCD/ej0+psU+2/cwMxQbN/XbEtq/urYYbVB02bvbC7lsV
2GwcXgX81Y+t8HrYD8mQQkvhZezKspFsvk8z1fKruCVLquCa+Y4OQVeefLpruR2TDpr6hO3yDKjt
ZT2jP/2x5m3UW/lws/odmJuXAkevpbaWp9l18O1Lm796qlJ7LkASEIH619PBYhfOS9Vb0Hd/fPfT
Z7DT0BcxK/7W/e/9V76lhg4UMCI+l2FgXlwcOqkMlge1AnjM5ZmGa/677Py3CfVD86S7Xg8sKunF
AP1+oNggqlD3b1PpfvMgHKx6XayOlgpR7nwJIAepnNXty4PKKUI4U0cYTnyNLzC/38yfHy8J78yZ
dnzoNblGR7OU9E9eQEna8H94+DePWv3nOv/IwnG6HxsIiJrRX944R/bygr88bO33hG1kof3lz2s+
KoUkvKA92D9ySp3m6dViDpkH9ltWkRfK23C47oMb3MFrgf1Vn83ylZrQUjQZu1JXl3RUe6TYFv1Q
FD12Fjs90BPCIDap9dDm/mlzpSqpxB/JGCRbMOmL2v78DuEmIbam5/nuqSHuJSKL7ZQMr08Kf36T
2nv9nUxc5gpwG5AHDvcFCVd8eMI1H8auoYkJ3dbjBRhS+MHGEiA2ax+/gD99J7BtVG4ENA0/vKSh
fBr6qWa3HBxTXV3P/1TOv7xIdyIRGyL69OxEUAB/+dmPT8dPaldQg8JEHf6h9otan9pf/kzOx/FU
z49alqFTPx4I7F5JON0PegvX+RACKJ/7zyPJ/viFmnh3rpleTQ08ebs33eZ7jwnNg2Xa8dlWa3+H
jP7yNMG86hTz1RzS1e+Bxk1fGAXbOZnnBrwA6Mw7DVXWhsMecTe45pErX6Vs/f89dXGXiswLNyZs
SIYM/PLLKObKmunC3tQCjCIa9puyn1o6B9ra70g+giGcozAuwO9+5qNrW8JPL615wboRisBfPqHN
Q0st3fGSxhQ1Gaabc0ri2Er7+cS1T3gwuhgJmVhZa36fQaeuH2jSer6m3OmNoH3fxvivXuRqnrTA
dmxyWM+b9spoQgrFK9bfzsDmBLwhvE4KRFKb7fthfJMC5rvXExvHh/VvPh11E6z53yccrSYwQZSp
x/X6erh5JygVUUQ0xHhBZeMx6y9Qf4hbwvPOzEhE3wSi3P5S6yTBZArjR6EllyalFtnWYdMuYgS7
amcgkvLv3zwyh0b1ZRgBsu2XevIz9TdvXue9YOGTiYPuuByxxzttQncPTgbHUPEwPmkzWPPbVDOv
KkeWJ/9a+Voz4S34dtix2Acs5lfcQ2B8Q2oblzwcs6kjf/1kBwTVY2jB/MffSGzgB7DNkgrauU/O
RHgcr7/6z1X6bWrsvh0xWf2O/PPH2Aono9/85i/93fGxcfKrpCqQ4wDTN1yyoR+tHh/aFMCj9Dyh
OT58reX+2GbQfej7Fb8e1my+LUFd+QX/+GZe9EX94TlZ9Zu1ROK2AktWOBS9HZ5934qrw2Gor4Rr
syJc/aYNL+B1xgG5h+DXb0AsQo9w8Pkqv9NwjOA676UIkHc9XS6hCiXfPPzN64k7TCL45ZcB2Ub9
rEYW9783Cv7xz3/+z993FjTt9fZeFwPG2zz+x/9ZFfiP4lr8hyCIf19sQIaiuv3rP/+9gfCvb982
3/F/je3r9hn+9Z//FP9WDf41tmPx/r9+/Y/1Qv/1j/8GAAD//wMAjOe+STBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c06ec39e6e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=mACXFdzUU5gzexc8CNhknuOeCkPvi2.Kyt6jXreWWqY-1712872371-1.0.1.1-br8VVQQdkEoEgc0jo246cgcSTuHzxZj_cHdgJ1uO_BD69hKbtayvEYoK7Hy3vPuWcJ1C3EGHKLND68GVNOuMMg;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=NJaZiQKL21ZBfqRUZ8SnS3LjiBoDe0CTOT92mUjz28o-1712872371379-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '18'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999998'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_50b04626d72e28d91cd233d90d4d9065
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [9642]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC/wEAAP//7Nxbj6IwFAfw9/kUE1/ZCQhKy7y5oI5cBBlF
2WSz8QJeEKpYCjaZ774pms0+7+PmvJIQkhPS9vzPL52pVU/0O2J+4SCjf5n5/VRlVqPEE4zv98gO
vrfzL6n0MtyrKq2iqplaZR5HMXa/T0tf1su4rB2yWj/nj6hrWY/9T6y3VSjWm5tYb5AsJdPAU3DB
GVfvnlF4tezLB2uGmjD8PBpK3CUsiNJ63pzj6yfSR9MVs3CzR4RvrgmeRMT0l2J+U0nKyMTnfLZn
nnzO5nTK6wwvdrthhaQyQXzsFGO9zTcyBSvzOlwfYnymjsVGTTrc1JewlPCPgT1gw8lRt2qxHxii
f2P+LG8QLSJngcg44W0+zOkNsbrNL/zG0+pnXh1r5ol51Pk4sdjbPPNT/7PX44z0V17bf1VSzn10
Y4ZaY2WZswp9XHxe2+Ti4JrIRnve3TTbq43xdmpLzOTOyiUak0190RtobBwdL2591yYEif4scKdU
3TSqGU+MYp8G1crRCpcn7LhF7XlwqeLQ0sR+1g/lfByMZU1BZXhKjihg0ar9X0ueDdKJXgTDRWXk
SVLecJUdcVuve5AUvD4siYPtIS2Cj9lqMO+WvrPtt/3gcG0seRPKdo5MVZKYyxPO2/kgUqPoxobF
2CsbUT+MZssDs6+h52quvcjxVJqP/J5rJJs69g5XfPVGczE/3yEqxfoEz9PArPTjNj2J+lb4bOYh
+yBqdmrKe/bMDx2Rlz7e77Qq4OvbP4iCLogCEAUgCkAUgCgAUQCiAEQBiAIQBSAKQBSAKABRAKIA
RAGIAhAFIApAFIAoAFEAogBEAYgCEAUgCkAUgCgAUQCi4D8XBS+vrz/bOwtyskvOAgbQpKFvf6jA
23q3flMU9XGxQXVb75PO+1MgdC4lyS/0FyVZUtw676/qgxp0KKHr81+PX8SHvl5+AwAA//8DAIpg
UmUwQQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c07c2667a8-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=ss8Dfnw2dCnMKMCi.zfWS0BJk3Nya2MozNAf3ZEbAFc-1712872371-1.0.1.1-7VuhrvtkTtQGdmJrWFZXfwA8qU6jrfB.YvM3fzxOi5ZpTHcR_wJ5SDu__rNLjonYngTNON5JGX2h5xHlZPDQSQ;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=J8niYZvhXxO6enLta_jyadIvgONlgnxKdomInKFTRQo-1712872371391-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '17'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9997'
x-ratelimit-remaining-tokens:
- '9999879'
x-ratelimit-reset-requests:
- 12ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8687041c6a37dc7f1917a53a9eecad5a
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [2822]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC/wEAAP//nHxJk4LAtub+/oobd0u/AJkyeTsmmc1UQMSI
jg5wQFFEhkwgI95/f4F1u6MXveplVYVSJOd80zm6F4m8+p11fhEATfnukXIXqTULuQfhshx9bPzm
X1wfvaBMiERG0bxbfZMfcxgaux7xap/3U9Cein/PH8HGsv74b8VbcljxZljxBvDcbYcjAX4YZeIS
aZ9o4hH/sPZgPhzipybkm5bi431K5nfexUDd7k7UgnMFWlZ2N+gdWxNl6/yGcMLWhO9mX9GIf7+S
ccemF0yvV5sArr8B5gQfR/3lGy8BCsl0KB45fI+BRbfz3S6n76Hn4Fn3dWp7T9WaVj7QVv9G0b6Z
wfg5BilonRv75cNsHACdfvkFmiNp+ndenUtmTaMxcGuaR+W/81MUyzKjrXKKfv6LcA1DYKCaOEEh
aygB7hexyW+/AZxaXvvp3XK+dD6El53PUZMFp7CVKG+qqaxL1Dk+v+G0SF4LVn+Gw90olrNo5p72
qe6YnALpE7IbfV7ATw9mIjxY0spnyoFvHOzwkgD6Q317AkyPp1+99uyl3z31g+2UaM3t1g+QvJ7w
d14Lvn3Y9MjaAPr2+MHu/qQnmx4FF+XnB+1Cy9h84P0GmCLH0ZDdGPvNB4F4PA7U/jhRP6/nB8E+
e1C/O0ShFPppA3dcskVyqN3KKY8eHeyibbLOz69g5HLVg8kdm0R9Xu71er4Evs3mQN1WfNVzv7z+
nR8Ga1769/p//bYC/ut//H9sFGz+3xsFtRJ8kJL7SUnOgaLDelwA3QKKw6Uyhgs0tG9JrR634Qz1
oALjQS4pfilPa3qNwgTj97sgy/Xk1m2kVDHcRukNu5Pg1SLe+i9Qc+YNe13FgdmKuwbKGoLYtc9x
2LD7q4G8ygIi2efYmlM4eDDBTU+9k/9lc2jBAr4l18ChevYAkz+HRlMlcCGbu2qGEx+gQlW2KaFu
O2G2RN+tDJP0UuPIOlTWfJK9AQ7At7D13vWMDkUUwOScFWRzi7fWhAUpUAs139Jy+KrJsn0XAhTS
tML7r9ony0YIEFAC9Kbm/AQW1ccpguN5+RIZpk3JgqY1weE8WQieZZpMJRkDIIMXj5GWPcHQwmkB
YIzPFIkiKrvKs/fAzPSJiK5lWhvt4+cQpSIlBCc0XHoQ5yB8zTF19w+r31zVTQNf9jOhLo3Ccv62
nQcel9eHovS9BctJLFO4AfUTSYYmll3nljJ85/cSu4ejGRLurhaAHXcCYTU/smVj+DIcokRFfLW1
esLF6gRLbttRw9FEa5qErgDr3wnlq4M1hdvHBHh9VqjrSUG4PL/fJ1RC8bk+ryUcNcNbsyWQ4O29
eoJpMatYK0xzxt6NWeVEzi9O+2TnDY5EF/TfY93bcuSrA6lttQFz61mOOiuBip1Cl8AYp9SGj3lV
9Ny7s+YgO7ZwnCqDnnCyrZdFHjvweNzuSB6gWi/X8yeFwXYw8P2y8UvCK0qnrvWDUfp+s3bbUhm2
mQqovXv5vcS9XgIsavmArzc571ks71WojMQkU1C+GFmWnQBZfiWEK0u9JoeliuHL8fZ4Zxi4nBdd
lWFSwBPSbrYati1VPAAk06PW2Fvsy4rFg2CTh0iS9i/r89Q3MegUjuLw0PZgkuTrDRbhE+JQ0kZr
tpXHDdZxuUeSEus9Q0D0IMvvBKPk5ALxy2AOTdvfY/uAamuirkVANfFnjMz5Vs7NuGsgEgmmbu/F
9dxUWwj7PXExLs5jz8QBIOBbxxKbWjf0tLQ6Hc7P3iGDaZlJFTHvAk2l2qHPQ7yECzpYJv/ADUeU
vI7r6Vy4T/gGnIwA/3FqaahqCIXhdiDSWg+CK44E6Cj74CBqn/V8NY43yPNfg4byHILFlqADi/z5
JvbGbcHg4EaAztUaEOh2t342+dCGSgJT7O2sFFD8TWUgmtYJPTaFHw5SnC7wJu4VqmfPez18cOFA
8W7yOELB1WLeYLQw2BID3ws2gGlTS6J6lKoTvkXuyKar37XwkjsQqbq6L2efZgPc3e8z0djNDKch
JxOIqO7gU0eSchme5/SvP9T2iutecpYKVHsqUudyYf3HW74XcE7nNxmZbTLxddJjyPnCmRqtfWaL
q8g3NS6lngAl7MPl9GQqhCodiazcwmSOJFWFuXkr0GTOfcLosRpUJo8Huj7PZG7ULYLB7oVp4Dx3
ZQcP8AXbqu1wvimW/vvZDQQmr8FB+7uQWUQCLYJHvvOoU6JNT+VKWcBB14/U5+xNOXNIT2Fmqhbp
pXoHltqqBLCNhQQf02gKFySFBArD5UCR6rT1FPTdDfYJZxBe3p/CZQ4RBCeZ9Ih1G4/NjJxf8LpN
PYrPPEroRWorcB5ARiOOX9jkc+EAvcl9UKsxuoSeHs4TuuE6LrrODljaN9eoYl6H1Hqdd8m0+J6t
WqEXkOkVk3ICJC9U399gHAi8nrCnrsXATc0PRm2mWpMelw1stS6mRty35fIo6wGe5cREMy6J9VdP
4CCvjucm1MPk+bIqbqYTxoeHWNIcjXuVPx1G6rxJEM4POzfBw4ggmlz+a00R02/gqHgpTekmDqfk
qt6ApZsmjVRQWHRMn5VKBYipb2ZmPQveXoa7+3UmMl99+mkIPwGU8HO7nv+xZAWub9DvnAVbjzpO
FnDKELzVOo+942MA9OkeHYg4USdgrxNGtc65Qd42GPU2wszGSFpU7fY4m2QyT5FFt912Dx/2TSQP
tvSM2X6D1LXfCX94iAnZfMzX7/VIenZF/9XUSoCn6qXi+0k7sPV8cxi/PwWOLmkfEtsGE3Q640g0
Y7Njs9yRFjTb4YPDc/xORp/eCLCv9yNGjiH3Q8qdK+gp7wG7zmZXd2dNyeDDFBUELp6eTF732qsf
+XCkfrWLrPl7NIkGpirF+Tl0Srat3zew384VjZ6Zm0yXr1VB0epTotz49fnJfQy+bm7gq2uF9TTk
zQKO49RSA29Wh9qqGRA6ISXTQ1z6sYIz1AzJ//zwlQ2f46yCaWfrNKhDGUyf51zBDjTD+nzcminp
lMHvwm9IZWgDI0u09UDG3UVsfIcMLFl7rADqsxrjncUlsxqFHMDyLGELtkrdFga6gEeTZjTaJNdw
0HOQg+IY99Q6WVc2f9KoAs0r2+GQKULZHMujCuv4vCdiPzXJeANsgtExztEzvjLGkm3+gvbgAGzV
b9QvzlaB0J44hk6iaIP37qaIoICmifJT/bVWfbX/4fVf/U3HftqrTScY+HKLUT388DppMoswLrqy
uXQkG5wPj54amm32QmnarWaMZ5l6F8MoF3GjF5orVTZ10d0NvzS+cgC5koP9bqwSym9D+MMvapY0
SN7qe2tCNS15oh7EKpmjOXHg6YgR0l4BTlg1nBoY7bwGcSn/tki7iAiac3FBXBo29Xinaqrug+9p
rY8hZK9jrEKZExW81kM5lw7vAH2AO7xb+Xn5cHwLbCEaaTklCfhOby0HlGUxIsfzq5yuWRfDVvBc
GiphaA0329e1sdMjfNr1bT1FQ9WqdCZHIuofp/7OzuUJXqaa0ajTKzbDnRnABxUfdPvwKmvJzFQA
LWI+RtDbhlSwOQ+07VSiubUVwCrlhKBTJR+Kk/sZTCV4LeB+kD2kPVIhmT6XMwG+L+G/ehmFUk5/
54e9uJ8B8x0PAlOME2zvOI+xyrnJ8P3EAZm5WE822sfIoZ5dznR7vTX1MrSKB8a+8/FPn3W5rwrA
sRKOfNDWZRJ325nQjfOOItoIFtv7gq4+7IuIJOudJCMrlgAASfeQ8rk24TB5hgwtbbcQ5fYmYI7d
zw1Kvn7Anh1q/YALXv3DI2n7oYx0afKEgS4GiK76SsyvRfz3M7+zuHK+X2IZdJzQ4P2yPPsZybOj
dfUFYBcpTcJKLfAgftca/vHZIpa+BzE0Zhw0BmPDNXvGGgIvB2N5f7KGYy/HEOz3MxI1/8RIbbUi
FObMRxvhvrG6WvPgn57YzeLW2kCny+CliggSAoJ65s71HtK7KGD9upwBy1T8Ujv//sVb4X4MZ03w
dXDEvklaER1Cdm5fTxhOHke+yQGwGd1lE6hcuUebfN+Csau9J+y95ogmwydJv6/BHtIeVdjORN1i
RRbuwZEzQ/IRL++a8LtDALenIaAJr1+SuYN+A378MJHcKll58i/QiiaXnvbguer3JAVyfUfUfVfH
nsYv5Qaos3Woean9UPC5kMDm4CSoOUZKwvo10Ri4XKMua7qe2ZSoQBnsLd0ullKyq10hDRy9lqYb
4JZr/3Qg3ZQpks9hkxCRtA0I4Fmjge7MgHqxmQNLHDnE4ZGUkx9aBTSqnhFOy0y2aaodhHfdbLHl
ZmbJ9E+eg3h7kiieIq6kP73qnvYP6tk8tAbTzTNIm+yLg6BprIVX5hb+9JJiPO2wi8TdE4pd0WMn
zQuwFH3uwCU0C4p/+i3ZqwL48qO6+ps3oO7WXED5+YjU9N+wp/TZpjAx0BdjD9/ZsPYbvKQbE696
o5ye/F0A/MO4UG95nfpp9aOarQ0y4Z/WJ+z3GehU5YQQUYW3EU7BYu5BNtMZ8eeQArKVlz2oFDf4
qxd2v985ELZWhPWPPZVDfIF7kJyGK43hBYFpdLZPeFLTlrohHthQ6zcHkuX9JRKN+nK+0yXTTh5+
kyst/IQqqZzCVc8gJVUUawGvqwC3l5dBvYx2PXMOdgQTU2joluO/5Rx1YA8v50HB5vX0qckI80YV
u7wnctzPbHn6wQ3q5BXQez5PfV98O6L64T1C3Oo3Z5h8IdAmu6dmZMjJ62b7Jjw4lb+eHw/YkLwy
KLlFSkMNW2wxTL6A4bf3ceB4yGoPrTaAvW8VNGg2Sznv3ucOrv2Epi4NajKa00Urdp5Cd998F07H
29SBVf9i/B6rno1j/QL1K57I/ZcXvFGpwqS2fWzM2T6ZIqXdg9bqjT+/vPrNBZbdZqImNGm/hBcn
Bn7y4rCthEa9OOdzAW+7dWlzSxAQuAyLQDg4DRHMbsPG2P1cfvVFJu/6SMYrujq/+//hAVuC1iXg
/vZMwiGFsp9egdUHd9Q84MyaIl33oHNmy4o/EhjNdyiAtC17pNHiWzJNQja09+MebcARg0UyPQeu
/g9t8s6zGCnNVI2ee4MeBjhbkxIBD6z4gi2aVOxDwKP48RUOyHboWTGZFbxMlwNRuFtat6k0D0AF
fobNyMgThjxNAK7iGTQdr7hc7PkSq77N59i/n3flmi9MULhfWvQ9jlJP6sDrIApdHfvfbZ2QT85x
sOykCQc1LyXz7rAgQDZSTdQ74Op+l0oxPMaFRuo1H/neL7GqPWrnQmaDyuXYknCCUdKWOITlu5wu
F0sF4aWvyBg+rH4+XsMYsCe+kgcwG2u04u4F+21aoj61Pz1b9Tacn18HCVPSJZODiQDjkYZEDZom
pOzKI/hh5xsOD2jo2d66IHhJdlu8+peE2bCpgLRTPbwdyksymY3y53cI4D9NPy9pnEGheM044s+L
NceEpPB47CTqXhAfvkiyDSB77q5IZrkYsu+NNkB8rxs/bncPWTzPjroNhgc10tEGNO4Pe3gKr3dq
6C5IiKCHA3RdkyM//J/Bvc5BfQm4v3r/QueZ/viHBtawlFRBeQXl3H3SYKntnn37DwLAnyCSyDKE
06qfgQrn5M/PU5fr1J+/wvpDXGr2qG4BoK9h+OP3mRWqp8bSTUUAN7hk8e3cAlVSLmRh6pURcd1o
FZ+xSvUu7fplfqMIrHkORngkyVIB+wUJPRJMeXWfMH4ebnD1L/i+9ufyCfkUsCMWsFntnn07cJ8L
6M40Ipus7eulS8snSD6cTr24P7CpQMiGzf1ypBftdGSU76MONkVzo+aTf/WTwasxYGjk6ZYLA8ZG
t1PB/bxk2OknJ2FXu0WwlqruLx+USs0M4C0zMmorfGWNpD/p0KV5TPHPr7jatoIl53bkkNGHRaHl
vmCecwjb0/jt6WujQODbWk7x89qUE/jIDXyPx5gacmeVs73zONh8GhcNu4yrCWuiAjbmbSQy73jJ
5qpqLyjy+yc2WlkuB7B+goW0G5VA66CHwtq/IuYPPY2eU1OzR5I9oUrCEYlG+Q5nH2bpj98o3id5
T4Om1eHPL+6WzuqZrxix5jfCT28/yu6nt59TPVF3Etp6ku9prqmv7xNt7tWTtel8kMG5tETSCbxe
siScCrj6HeoVZzsU9oi7wA2pFaJ+8KH/y+vqtzruOOi9w4U1dqHRgtOoPul6PWqCYUJwNUUEj0JW
T8l+EcDvejzqy3LWxkekrXkImg2aJ5NSFQ0InXDAu0eaJqQrWAaHmFEiiPTNuoxsUqhYw0wd92ok
4+ykFXzc0AsH87FhIzmcFmgUIUckcxYYMS9cC/hPvSfTqg+X3VSrcAMeT7KI2bkn9gEUMHlcIqo/
TCEcO/N7AVdrCGk8P0ur43fnAKrpmcfbJJ/L5aDozu98Ue9lm/K74hk0t2xHBrmWyuF6NR1YnxKX
sHB69FQX9joMz2VDBOMih9OF1qm28iNZQo0vGWdvF8iFt4zqDzGup4O5MWHuZgckoLy1Vr0aKEcl
SKm9L1798k2OGZhDhWCPptdy0pXiBjkvuuCr8x1KlqluA0/J54Da7BiWypcaUGu1NsZo5E7hVCDH
Bh14DXSbRpO15Nd4D+Y30InalIk1//JDxYH9n39kvzygfB0h/eH5vB0rE679iri53dfz6ebfwKqf
sJfcp5KNaVfBW3o+U1MVDv1k6tMT5Adbp+EBRfVkskkEh/NiEaUbq3I+WpGpGjUYsetdA7ac7XME
Wv3sYk+59SVxnSmAMw5b6nejnkj7a3kBt9rk8XaxziVLtvsGJgQd0PM6O0x6X2QZ8p/HnjzkvW3N
24+OtB8/sAtNAKMX/Qm6ChvYDlKa9Em8j4Dd3TjCmvJbE5tLZAiSZ0/Aae+xofWnG5zkXkC+thfB
0vKa+MvniCTLRrn6zRdADyGnmYOtWoiUNlbWevzpW7YMx8MAWmgecSjPPRs97y6C8dZVhFOrD/gm
YOSUxZ2qX//WGw552R9eCJZ9tGbhmXvQQoVNNqXZgTb+AlkN5wVQU8zONRMk7gbj7VGigTQ/y0mK
LxN4+UWAd80zDj+b5SKA+O6+Ef+6P8EwhB/vx6fU2CfbfmaGYoPmfjti21f3VsMNqg4bN3th960K
bDYOrwL+6sdWeD3sh2RIoaXwMnZl2Ug236eZavlV3JIlVXDNfEeHoCtPPt213I5JB019wnZ5BtT2
sp7Rn/5Y8zbqrXy4Wf0OzM1LgaPXUlvL0+w6+PalzV89Vak9FyAJiED96+lgsQvnpeot6Ls/vvvp
M9hp6IuYFX/r/vf+K99SQwcKGBGfyzAwLy4OnVQGy4NaATzm8kzDNf9ddv7bhPqhedJdrwcWlfRi
gH4/UGwQVaj7t6l0v3kQDla9LlZHS4Uod74EkINUzur25UHlFCGcqSMMJ77GF5jfb+bPj5eEd+ZM
Oz70mlyjo1lK+icvoCRt+D88/JtHrf5znX9k4Tjdjw0ERM3oL2+cI3t5wV8etvZ7wjay0P7y5zUf
lUISXtAe7B85pU7z9Goxh8wD+y2ryAvlbThc98EN7uC1wP6qz2b5Sk1oKZqMXamrSzqqPVJsi34o
ih47i50e6AlhEJvUemhz/7S5UpVU4o9kDJItmPRFbX9+h3CTEFvT83z31BD3EpHFdkqG1yeFP79J
7b3+TiYucwW4DcgDh/uChCs+POGaD2PX0MSEbuvxAgwp/GBjCRCbtY9fwJ++E9g2KjcCmoYfXtJQ
Pg39VLNbDo6prq7nfyrnX16kO5GIDRF9enYiKIC//OzHp+MntSuoQWGiDv9Q+0WtT+0vfybn43iq
50cty9CpHw8Edq8knO4HvYXrfAgBlM/955Fkf/xCTbw710yvpgaevN2bbvO9x4TmwTLt+Gyrtb9D
Rn95mmBedYr5ag7p6vdA46YvjILtnMxzA14AdOadhiprw2GPuBtc88iVr1K2/v+eurhLReaFGxM2
JEMGfvllFHNlzXRhb2oBRhEN+03ZTy2dA23tdyQfwRDOURgX4Hc/89G1LeGnl9a8YN0IReAvn9Dm
oaWW7nhJY4qaDNPNOSVxbKX9fOLaJzwYXYyETKysNb/PoFPXDzRpPV9T7vRG0L5vY/xXL3I1T1pg
OzY5rOdNe2U0IYXiFetvZ2BzAt4QXicFIqnN9v0wvkkB893riY3jw/o3n466Cdb87xOOVhOYIMrU
43p9Pdy8E5SKKCIaYrygsvGY9ReoP8Qt4XlnZiSibwJRbn+pdZJgMoXxo9CSS5NSi2zrsGkXMYJd
tTMQSfn3bx6ZQ6P6MowA2fZLPfmZ+ps3r/NesPDJxEF3XI7Y4502obsHJ4NjqHgYn7QZrPltqplX
lSPLk3+tfK2Z8BZ8O+xY7AMW8yvuITC+IbWNSx6O2dSRv36yA4LqMbRg/uNvJDbwA9hmSQXt3Cdn
IjyO11/95yr9NjV2346YrH5H/vljbIWT0W9+85f+7vjYOPlVUhXIcYDpGy7Z0I9Wjw9tCuBRep7Q
HB++1nJ/bDPoPvT9il8PazbflqCu/IJ/fDMv+qL+8Jys+s1aInFbgSUrHIreDs++b8XV4TDUV8K1
WRGuftOGF/A644DcQ/DrNyAWoUc4+HyV32k4RnCd91IEyLueLpdQhZJvHv7m9cQdJhH88suAbKN+
ViOL+98bBf/45z//5+87C5r2enuviwHjbR7/4/+sCvxHcS3+QxDEvy82IENR3f71n//eQPjXt2+b
7/i/xvZ1+wz/+s9/in+rBv8a27F4/1+//sd6of/6x38DAAD//wMAjOe+STBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c08dd8fa56-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=2kFuauO.O5ci8J5HVplzLXEY8LVHZZkvJOJ8Hb0yhCc-1712872371-1.0.1.1-vqdyu9angaN80zBy2in5hm3j_Z4zO9c3m2hurmmbgsc9vnMJnR4h.OG9DGBqHcLdFMeMdqCjpjuar4uqPBkjIQ;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=eC31E2qAfREFYM4a4l91ZiSzMkL5BHBdzxTqvrIRJWs-1712872371392-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '18'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9995'
x-ratelimit-remaining-tokens:
- '9999928'
x-ratelimit-reset-requests:
- 24ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_142cf77ae8bf0fb2ea86ea3c36b88aac
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [2822]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkka
Ye+1vrX57//48+efd14Xl+Gff/3551H1wz//+f3tmg3ZP//681//8efPnz///fv8fyuLZ15cr9Wr
/C3/baxe12L6519/xP/95f8W/evPP0oPCr3k97QbBzlxYSnXOts24aqbBvncQOQpPvMf7dTNl5Lb
cN4uW2ZfzUcyGlIpa8w7GHjyLms0WlwXYM+YhmczxLV4Dm8ygjmeCV7EPJjU22xqUYxtYhy2t2Aq
l5kASXq4MWdtapy+DvqonYgzMGd1fyZcV9RU/Qjpm5hS6gXjUayP8Nj4N6o81GM9VceLqB6uiwUV
vPMT9fvkUyFbPT7wbE41Hxv90cBCxyEeA2GH5qOj+GDHVo+lVON8Wu+9GAWLa8/W5bazuMTUPciP
Z07IfGgttstiQR3ZsmR6/+ytPi3UErnibOHV3baTcTDKCmYF90S/KS8+jhHpkWCAT65k4MF4csQY
hOVDo8rxKFlU6IwL+G9tS9VDZXKxeNYNeDZ7UVhvTM5eY/KGcXi+qCZvtt10Ll4Y0LnQmIEfn/zd
rd0Lip0cYy0b1gn31PqJCipZVNmgszWnh9aGK416EqYODUYcjIXsbUygaFYf9Wg2OEKP8zkl1q5s
Lep2YghGxVJmaO8yGadwqpB6fA946vL6ez/mPfAbjihfn/b5uBHOAmwf7ocFZHFNOompR2AqQ8Ss
rkkwmU1IYXhlI9mu7WPN7+eXqm2NS0anzemUj4swDVElQYgPx+MlnwLvXqDHxr2Ra7t78PF1CGR4
XBdPOg7BPZmK/c4FeZvcmblYoYCag0Ph+zzwaldafOXIRxfAN/fEeaVRPe7qRgBzu/OJpyz7bvJV
2sNnGYfMXhhmJ639OUWdaHlka+J7MGaeQcENDE67o91xengMJZQfc8a7fGXWg3DXelWKeIGV+X3v
eLl0bOgKR6dKUuo1X57ARloHD2JVs5WPnjrrqHDFG1nzqcvfa0MR/p4PB92ARsfXKhAC50mwtZeT
uVkdbGQqUYIFV5y6QRZkF143SyYhUDdYJZ9AhlPIUyxXLOK83NxMtB7cJcFXuQvmt0l92GnRxNyu
Fet5s/RKKLbZQMxL0AejOSQNetxpTLZd9smnWFiJ8MCmycySZcmYqlEPURW8SRgqVjA9Wu4vdBkb
jJS3qub661OhWZtVEmLTqZe7uk6RcLcD4lBjyKdVkFO01KOaednQ1ZRtPF3Rm+uR6X6kBN0zvWYo
3XsrRuaDH0yLUc7UOImOeO8v3WB6iO4FXsYBM3d5t/JxkHMXsjz28GxYgTUK2qEA+LgH/DjMOJia
1bGFOSEuwVqTJb0hlSpI69QlacUiRNteixGf5ZKkVHbq8duvsOkAY3jeWzTslhCh8zJ6MDKcFzlj
Z6eB8DQ/ifG8pcl4y3RbO7Odjpfhqs1Zo59cUMzzhnmzFSY8k9o9+tYXzZPZrlfCfW5U4e4ETBe1
ez1v2ztFLnlfmX/dW8FS0HIVHlftSbxz87a4rsyZZgsyZ9vTLUnGDoc6ks5Ixb9+b+U+Ef7q61Yy
b8GbjFkI+ZqXxLmZVTCWwkeCZFemBJ/Eupvf5tMHUitPFmiHqhvAN0Lo9orLCN4J+fytd3U71gt6
QHXQcXhkFayM/ZLs171dzwcjmGEnugeszFafzDU9V2hHdMac+jkmrYhVB2WCkNJ8O0hJv+kNG85X
c4MVC3RL+tRTD+ftqmWB0Al8TMqtDspWlJlDn2bNbpv3DC20LdUehzzgyHYE9FsfhkptjdWt6yGD
zqJl/wwDDpc1hpaXGCO2EXLWUAdDbwwicwNNSDg740ZdePs3O6P6kY+SuevhbJ4VPGvGAzF7niuU
R4dkkJXyktDMFUVoeYWJf2Fb/vC2Xqru98f373g5/ezeEhqbnGHxVIUBa/LlETI181mg1GYuCXfR
R/4UbRi+Jwc0mdL1iYzT3sS8G17WEH+iEZT73cWjtzl0f/utoarBtvejGXzryQFybwdC9seQj2fV
EWC7OlfM3IY6ooKWHdHdSCLciq2F+myoi189/rbz1d3WJW2im4pCErTWJO3vJrLqqSXOdf9JRvWj
q/C2vYIZ0drs+szzehiE7Yr4evFJ5t1y7tVPLUrktFk/OXvcqx4+zoISk7E+6S4fzVa7wtbJ2n/U
1jwPMkbLc7rA9+dNzimaFk+0iwqHEKVaWn3x7Br43g9yCKhTT6ez9YTyph7IhvUKYguN6KqXFvHP
r5L+mXYS3D/1QNblXCacBJUJUjQVzM3vBeq3fHbhiSvt9/z4fNK6UR03L4uQQRDr4T0WM8qmbUzF
rlXR3HhUR/l6KvFw2Ib5nE56pAlwK4kr+7q1cqNLgaKm0Oivfjgx9z26SKsZr3koWXNXnkf0vR68
grNgUfVYu6AJpCC21A7BMBjXEDF2fBBvc2r5KGjXAr3v+pME02RaHB1vItwb7YbnoN/Ug5EbPno/
niJW3cjruG+9MKxcnBJzSR41jUtPBdkpMixw75pQvljtwdn0b7LxqFXPTKR70F7piZhiO+bzHMcm
lPp8IF65tdEgtjsR2BN7JLw+NwHf0yn+6R8d004Khk01Y6RbzoUQvCvyQUURhmRWYqbflA1nEpuP
yOmyFcPm2AbzpOY9zJdMJ2Qa92g+rKTs188sfLU06XleXVAbOzKxk7HjfaMPDSi9oLBwhTH68QHo
YSiTjfsorbGQoQD7HnoEm8GeD5+aR+jwfmLyvd8Be9zbHm3SbU3cN0/zaby5EuxE/8DcxaPtZu7Z
FIhZ9eTHl5xLM0YdRjXWnG1mdZfP0lYrX7NYseSGxQ9JYkOz33HmG+apo5JihOpPj1VOUzRPBwW0
22B9iHe6netR/bgybI0iozIZ191cBGYMz9Mno0oTary/lgKGSyS3WIBc4rR52BjkOnGIrYddx7Zr
eMPYnBmmT07Rz39VL3j5bL1etwF/vVIbamHfkkDtH3w+iu8QBHtrEVOgq3r8uGUKMmkk4opAA34t
2YwkTziSkF1oPayNMYUqW9vMgUubzNK11tXLZZ9jkcohH4t93KMu2qtkK/cm53qxD6EwBGD6irzr
MZPk+fedLngx1IwPvazWF7Sn4qsz+Gi6kwS7wh0x+izuqC/PwlF9vfYzc92KIy73WQZaWYoMP4le
T4NfVdqshD3me7TnfAuTCffCPZFwxe1g2dmaD/vbeP7ygdmtHP9lo7dz4SR0omc+DomSwV3ufRLI
5xPnkhlEsDxnC7I9qkLQc28jgpJmHTHv8pXPnc2fGnWqiapLPAc0VS0JhMlgRA8Pr5qvnoOJdoU/
YtU6rOsR2dcU7NjoyWaa3smkj4rwq2e2/vp1v977InTWdcZau3ugqRSOJXoZJ0y8m8QCOjqLGK2j
HaXP6SJ/+/0QIgvsGq/Gt825Z8kCJL0y4BqmktNsKI9IZ2iDH9HHr+mvX1dlIf3tD16elTcKbQth
fnFvfPjx0M9fnFc61ryM+iNqr9WHLtCyDvgqwCLKT1hiBqE94rVeyoCk3YeZ7jJIZlj1gtrr8u7L
43ousm5QEZO8gemfVLW++0uAl5+G/fiWizyO0EK4bPHk4ATNZD+qf/mi8LSlxbJY/7d+W52749P1
xI4/3qAqXJqAc0nFylrf+1T8pBxNm0oNUfPWC2KeItda8aGRtZE34Zc/zGDcnOUQos17x46Wklnf
PADw2C99LBuHN+rh8RZAVY+AkVKbyarJl3vo5ySgg4kNa7yc1BbIu4qIfoi6nJ6LoYUwQhZVVqMQ
TBe+rmBD1g7bFIsD4qv9xUXf/mFYoDYa5Tov4VsvxA7Ox4QvyE2AwCAd0dn1nc8r0xbBNdAJPxfJ
sx5iYSGBuN9lWPC0QzA1NHFg5YYpsWr05IN1umNUt68FRp+sr6eT9jhCKL5iElQTQaN/dCrQ3PhI
MPeuOUeTZKrN5CNmao99MH/98S8fWrvSD/jhUQuglZVI1nm8sQZPHQtN0dfzXx4cpgMqwTiYAx1f
3duaIAlHkEcfU27jCPFSyDP111/bUa26iet+D7UyX7Cc3wvOjgT5gKl0wZXomBYfLnIMydiemRm9
go4XBs/QrtFkOpsjoNYu6xB9vMRi68NW5n0er2MIosxljhsxxHns9GoYKRadBXqquVmvJPj2A7Fw
LAefJ9kLP15gtl3frae8/ETo+38pLB5NPvPFYg+5f6Bs0w06n8/W5gJpMa+JqyV1x6O6jNE+NhLm
WIpqzdUxiUG51y5dnKU1HzFxWoRUOSH+tx7nR6s8IdgLRzzdUfv1NxVDmIQp8apbx6cyUjB88y7z
5rfRiZ9Fbv7yA+W/fkXbq4hKXCyY/vPHgFr4l/eJq35uvP/lZ/1s63T88vrIukGGw1VbMKO2t/XY
xMIFfvpsvN454tL1foHb+5qzwF/p6Ju3XNBhJriThSEYyzpoAaSlzjZOOdXdQhlFYO14YrfrsuCT
N9UuzPZ6Q9xT9UBcxPJ3XhD25Ku33XJ7vwI0ZNaJw9gz+F0PWr2OZywOvmiN70BqYJs9pS+PVgn1
ps8TGXIPxDiVQ8BELJewslofi3rY1Uy1I6oVwvQiRjk/u8/B2B5/PE5yPdcsZu6ooPD29cTPWAm/
eV+pQHaHhGHpRHIOidfCl2+Z+ejWQZ+Uax2qQykR86rYSDIby//55c8/0Vz2tgNeeonxEtVdPS3G
MQViiwZxzaLPmRyJIVwfbkTn6sotrtRrG1T94zHHHKWOohulQFuik29+7ebLaZjhfVI9+prVjo/c
IyJqZedC1plX13PmgqQuT5sWS9FTyKd08iQl0z+IWHsk8vlJkky5XlZHZtn8GbBd34l/rz8oFqQb
9485g3O9IiS8ydSiU9i0igrv3XZU10XQO1cnhfOu+e6PI97JfZaqCTg+Xubxy+LbdFsqy9O6xbfh
fMv/8tllThrcHjsnnw/+Zg+pJjssGd+dNRweL+eXJ/DYOTEaBacS4eAVWyxc5cAassU8gp3jhrgD
syzu39YZbE6pzr58FXA9tI5wq5UtnuEydl/+NZFnFhQL4trNZzNbSmiK6ZaR0aqD2TsGmfrZrnbE
1cN1MojQUvVNHoTQRZxYUxODim4QOuQS7QfEhssYI08PzsTg066bfvn+L5/rI6/Zz99//R2+nNL6
8TcE0iFiDqe3gH7qiYLeHRW2XdtSR4fHMoMFO4bM+OaH/rK+z9Cj/EW2n3785oWgQbtzUxG7a1U+
OcazBdn21swvrk43L9pFr3bRUf3qfcPH4WlGYHRLEZfLO+Hcn44RKLcIMzMypfzTfwoVfedXVNvm
YfDtl6fq77MFC2b10c0d1lVoUvlKzMg8JmN3Oh9BlW2Lff0i4YfL8Q3nekmwHJs94vtLmWq/fprm
N7WGztZc5bc+eKlJMi9SKqFimw4kpLKeSxZ4Kazjl8UCp+wSxpxlD73iUmbiQ5P3umUXWmrIAgkX
OrHmX72/aHggTrw/WbOIVRuMzxNR5JRBPgvOFdAyvl/wyl9tginLd6b2ux+2XRvBcptmKaSa6rAw
Sw/B6C7Dt+Lp3vk778PB+6iNrVYvQ8z8RXxO2KVUKjXWJPebj0prVNF5RmyrEPLN66jzprcOiiIa
xDJ04a++qriNMLmZ5SPoH04sgAVOzZz3fZ8zU8oK1WuHgLl6+EimT40idDj5MjE20NfzllilcjJS
i+7Dg5Hw73wEYvrp/p0/NtXYAk/GGyHPUEyGo/iZ4fVpN1T0umvH9HFw0I8vouAMNSdBa6IrjXtC
zHId8CmsHTiuMxWfSH7IeSkkKfryM7Hxy02+PNbCt/6pKkGLmBtdLmiOBIKF+gFo7G2hB1MCAWt3
NcrF/rR5wrMi228e8TuuvtULKgg12fra0IQXgUvRV99YcnEXaHwVZwz3SXwRw+GfYPrOd5Cdhw1J
u/Ut/85XY4jrrcocsfUDNBixrUm4avDi/naCeZumImih2DBvVN98FNdKC6rHHxTyu1xTc6dGQJ1y
YmFv2MFy4dwqdBjcC1nX9ivpB+NdgRzob+KtbUAD5lREc+Z0xF/yV843588IX71i+JXaweiwU/x3
3vU9nrV8F1sZbYzsQpWXlQWTfkhmUISM4La5Y2tykkWJeq0HjHAc1Oz1Sp3fPJKisJn5XL17X/XN
xQfzakuDHpLIQVNI33RqpkXNz4diRJfisac/PhsX91UM86G6Yu1xQMH0LrYq0izq0tkMac1XgSOh
K9JOLEBLLxnLPgJo6iKi6OtX/WdXC5DOlwMWVuWMGumJCng2VcVItvBynrkoRtMpytnV17/z4mel
K96qSdh+YVT19OMHLux3xE2dfcLLzcmEV4d0vCy3QdBv215CCjgqs0J9Z4mH5DbKc4k4/fqBRY0h
uvzlJX/JNzkVtHivbj6twowdflojvt8q2N/mM9tmwzofHX9ZoYMUK8QhhdHRGJUOxOPaJMYRnfh4
uDwctSC9yRzmR4g/yFmEZVxfiOfgV94Wz/KiLfNOxmos36wp81xX/fIe0xsxQOO1FELI5XdIsnsC
1rzaX3uIC7qhstet8mEjRClcHZN89TupV5tNpMKw3OQsYJsimbcwP0EOzDdbL1QjUIF+Mvi+/yFB
OrzrUZ3DAu5StiT+w7Hyv/NaNRVMuvz679iVexUiplLmCaEZiOXmUYA39iNxzPCd02C1xqBVksdI
1w85bygO4XOLX3gpHs5omr3UBFxnHfOp8bKaNbub6Jsv2enxjuvZWNz3f+cxG7RkHWPdB8Nznd2J
rocTmmu9LdQ+8XziOksDSd4tpeozmlYEM7/JOVF2tnZ4N5iFnpNzziLL1NZRQsl3Pt193ydlcAqn
9C8P0nirmFD5C4t5xrpGbCqQAJE+U+ZIacVH75jGKszRTHQZspoPcthDSl5vLAm+ZVHniSX0PT+z
z0Swvtd3QRu/lInubZZdv2IrE/bwWuM6bBb56Dxz+m8/tMt3Pgup3CCBjku2O+vPZCZB94Rv3mX4
JFr1VNmBgLBKZroA7cbZpZwq2Mm0ZmvxcOZffX1CJz9OjKRDlc/9duWrYqzfmb9jqO43vWf/9b9T
qPuIgy7PEA2vM00uw8Gii0P0Bn9Tr4i3zefvPP2tImRdzyyUWr1bjd3tAhNdV8xu76t8ZCIPoW4f
C2KHh3vCJDOIwYguNwxetwvmT9ZnMIz1lgTxxwjG2JYb+OrXl6+Hbt7LrgvhMPrEj/cj7+TMK+Hy
lDHxU21bjy8T6fCd91DNmVn+5YM3wMc/MMNNqmS2PmMFoPU1M/fnjs9xaaia9OQmsY6foe6bizXK
0DQblj40E/Wr4BqpFf3EZHuYsq4T70aqlvz508Mm6L1bIqqWwjFG1fZe00JWjpAsw9ffvDNLiocB
vwQFv8n4qOddP2RwUbwNM/R7FUw//3LC6H8AAAD//5x8SZOCwLbm/v6KG3dLvwCZMnk7JpnNVEDE
iI4OcEBRRIZMICPef3+BdbujF73qZVWFUiTnfNM5KhJ59Tvr/CIAmvLdI+UuUmsWcg/CZTn62PjN
v7g+ekGZEImMonm3+iY/5jA0dj3i1T7vp6A9Ff+eP4KNZf3x34q35LDizbDiDeC52w5HAvwwysQl
0j7RxCP+Ye3BfDjET03INy3Fx/uUzO+8i4G63Z2oBecKtKzsbtA7tibK1vkN4YStCd/NvqIR/34l
445NL5herzYBXH8DzAk+jvrLN14CFJLpUDxy+B4Di27nu11O30PPwbPu69T2nqo1rXygrf6Non0z
g/FzDFLQOjf2y4fZOAA6/fILNEfS9O+8OpfMmkZj4NY0j8p/56colmVGW+UU/fwX4RqGwEA1cYJC
1lAC3C9ik99+Azi1vPbTu+V86XwILzufoyYLTmErUd5UU1mXqHN8fsNpkbwWrP4Mh7tRLGfRzD3t
U90xOQXSJ2Q3+ryAnx7MRHiwpJXPlAPfONjhJQH0h/r2BJgeT7967dlLv3vqB9sp0ZrbrR8geT3h
77wWfPuw6ZG1AfTt8YPd/UlPNj0KLsrPD9qFlrH5wPsNMEWOoyG7MfabDwLxeByo/XGifl7PD4J9
9qB+d4hCKfTTBu64ZIvkULuVUx49OthF22Sdn1/ByOWqB5M7Non6vNzr9XwJfJvNgbqt+Krnfnn9
Oz8M1rz07/X/+m0F/Nf/+P/YKNj8vzcKaiX4ICX3k5KcA0WH9bgAugUUh0tlDBdoaN+SWj1uwxnq
QQXGg1xS/FKe1vQahQnG73dBluvJrdtIqWK4jdIbdifBq0W89V+g5swb9rqKA7MVdw2UNQSxa5/j
sGH3VwN5lQVEss+xNadw8GCCm556J//L5tCCBXxLroFD9ewBJn8OjaZK4EI2d9UMJz5AhapsU0Ld
dsJsib5bGSbppcaRdais+SR7AxyAb2HrvesZHYoogMk5K8jmFm+tCQtSoBZqvqXl8FWTZfsuBCik
aYX3X7VPlo0QIKAE6E3N+Qksqo9TBMfz8iUyTJuSBU1rgsN5shA8yzSZSjIGQAYvHiMte4KhhdMC
wBifKRJFVHaVZ++BmekTEV3LtDbax88hSkVKCE5ouPQgzkH4mmPq7h9Wv7mqmwa+7GdCXRqF5fxt
Ow88Lq8PRel7C5aTWKZwA+onkgxNLLvOLWX4zu8ldg9HMyTcXS0AO+4Ewmp+ZMvG8GU4RImK+Gpr
9YSL1QmW3LajhqOJ1jQJXQHWvxPKVwdrCrePCfD6rFDXk4JweX6/T6iE4nN9Xks4aoa3Zksgwdt7
9QTTYlaxVpjmjL0bs8qJnF+c9snOGxyJLui/x7q35chXB1LbagPm1rMcdVYCFTuFLoExTqkNH/Oq
6Ll3Z81BdmzhOFUGPeFkWy+LPHbg8bjdkTxAtV6u508Kg+1g4Ptl45eEV5ROXesHo/T9Zu22pTJs
MxVQe/fye4l7vQRY1PIBX29y3rNY3qtQGYlJpqB8MbIsOwGy/EoIV5Z6TQ5LFcOX4+3xzjBwOS+6
KsOkgCek3Ww1bFuqeABIpketsbfYlxWLB8EmD5Ek7V/W56lvYtApHMXhoe3BJMnXGyzCJ8ShpI3W
bCuPG6zjco8kJdZ7hoDoQZbfCUbJyQXil8Ecmra/x/YB1dZEXYuAauLPGJnzrZybcddAJBJM3d6L
67mpthD2e+JiXJzHnokDQMC3jiU2tW7oaWl1OpyfvUMG0zKTKmLeBZpKtUOfh3gJF3SwTP6BG44o
eR3X07lwn/ANOBkB/uPU0lDVEArD7UCktR4EVxwJ0FH2wUHUPuv5ahxvkOe/Bg3lOQSLLUEHFvnz
TeyN24LBwY0Anas1INDtbv1s8qENlQSm2NtZKaD4m8pANK0TemwKPxykOF3gTdwrVM+e93r44MKB
4t3kcYSCq8W8wWhhsCUGvhdsANOmlkT1KFUnfIvckU1Xv2vhJXcgUnV1X84+zQa4u99norGbGU5D
TiYQUd3Bp44k5TI8z+lff6jtFde95CwVqPZUpM7lwvqPt3wv4JzObzIy22Ti66THkPOFMzVa+8wW
V5FvalxKPQFK2IfL6clUCFU6Elm5hckcSaoKc/NWoMmc+4TRYzWoTB4PdH2eydyoWwSD3QvTwHnu
yg4e4Au2VdvhfFMs/fezGwhMXoOD9nchs4gEWgSPfOdRp0SbnsqVsoCDrh+pz9mbcuaQnsLMVC3S
S/UOLLVVCWAbCwk+ptEULkgKCRSGy4Ei1WnrKei7G+wTziC8vD+FyxwiCE4y6RHrNh6bGTm/4HWb
ehSfeZTQi9RW4DyAjEYcv7DJ58IBepP7oFZjdAk9PZwndMN1XHSdHbC0b65RxbwOqfU675Jp8T1b
tUIvINMrJuUESF6ovr/BOBB4PWFPXYuBm5ofjNpMtSY9LhvYal1Mjbhvy+VR1gM8y4mJZlwS66+e
wEFeHc9NqIfJ82VV3EwnjA8PsaQ5GvcqfzqM1HmTIJwfdm6ChxFBNLn815oipt/AUfFSmtJNHE7J
Vb0BSzdNGqmgsOiYPiuVChBT38zMeha8vQx39+tMZL769NMQfgIo4ed2Pf9jyQpc36DfOQu2HnWc
LOCUIXirdR57x8cA6NM9OhBxok7AXieMap1zg7xtMOpthJmNkbSo2u1xNslkniKLbrvtHj7sm0ge
bOkZs/0GqWu/E/7wEBOy+Ziv3+uR9OyK/quplQBP1UvF95N2YOv55jB+fwocXdI+JLYNJuh0xpFo
xmbHZrkjLWi2wweH5/idjD69EWBf70eMHEPuh5Q7V9BT3gN2nc2u7s6aksGHKSoIXDw9mbzutVc/
8uFI/WoXWfP3aBINTFWK83PolGxbv29gv50rGj0zN5kuX6uCotWnRLnx6/OT+xh83dzAV9cK62nI
mwUcx6mlBt6sDrVVMyB0Qkqmh7j0YwVnqBmS//nhKxs+x1kF087WaVCHMpg+z7mCHWiG9fm4NVPS
KYPfhd+QytAGRpZo64GMu4vY+A4ZWLL2WAHUZzXGO4tLZjUKOYDlWcIWbJW6LQx0AY8mzWi0Sa7h
oOcgB8Ux7ql1sq5s/qRRBZpXtsMhU4SyOZZHFdbxeU/EfmqS8QbYBKNjnKNnfGWMJdv8Be3BAdiq
36hfnK0CoT1xDJ1E0Qbv3U0RQQFNE+Wn+mut+mr/w+u/+puO/bRXm04w8OUWo3r44XXSZBZhXHRl
c+lINjgfHj01NNvshdK0W80YzzL1LoZRLuJGLzRXqmzqorsbfml85QByJQf73VgllN+G8Idf1Cxp
kLzV99aEalryRD2IVTJHc+LA0xEjpL0CnLBqODUw2nkN4lL+bZF2ERE05+KCuDRs6vFO1VTdB9/T
Wh9DyF7HWIUyJyp4rYdyLh3eAfoAd3i38vPy4fgW2EI00nJKEvCd3loOKMtiRI7nVzldsy6GreC5
NFTC0Bputq9rY6dH+LTr23qKhqpV6UyORNQ/Tv2dncsTvEw1o1GnV2yGOzOADyo+6PbhVdaSmakA
WsR8jKC3Dalgcx5o26lEc2srgFXKCUGnSj4UJ/czmErwWsD9IHtIe6RCMn0uZwJ8X8J/9TIKpZz+
zg97cT8D5jseBKYYJ9jecR5jlXOT4fuJAzJzsZ5stI+RQz27nOn2emvqZWgVD4x95+OfPutyXxWA
YyUc+aCtyyTutjOhG+cdRbQRLLb3BV192BcRSdY7SUZWLAEAku4h5XNtwmHyDBla2m4hyu1NwBy7
nxuUfP2APTvU+gEXvPqHR9L2Qxnp0uQJA10MEF31lZhfi/jvZ35nceV8v8Qy6DihwftlefYzkmdH
6+oLwC5SmoSVWuBB/K41/OOzRSx9D2JozDhoDMaGa/aMNQReDsby/mQNx16OIdjvZyRq/omR2mpF
KMyZjzbCfWN1tebBPz2xm8WttYFOl8FLFREkBAT1zJ3rPaR3UcD6dTkDlqn4pXb+/Yu3wv0Yzprg
6+CIfZO0IjqE7Ny+njCcPI58kwNgM7rLJlC5co82+b4FY1d7T9h7zRFNhk+Sfl+DPaQ9qrCdibrF
iizcgyNnhuQjXt414XeHAG5PQ0ATXr8kcwf9Bvz4YSK5VbLy5F+gFU0uPe3Bc9XvSQrk+o6o+66O
PY1fyg1QZ+tQ81L7oeBzIYHNwUlQc4yUhPVrojFwuUZd1nQ9sylRgTLYW7pdLKVkV7tCGjh6LU03
wC3X/ulAuilTJJ/DJiEiaRsQwLNGA92ZAfViMweWOHKIwyMpJz+0CmhUPSOclpls01Q7CO+62WLL
zcyS6Z88B/H2JFE8RVxJf3rVPe0f1LN5aA2mm2eQNtkXB0HTWAuvzC386SXFeNphF4m7JxS7osdO
mhdgKfrcgUtoFhT/9FuyVwXw5Ud19TdvQN2tuYDy8xGp6b9hT+mzTWFioC/GHr6zYe03eEk3Jl71
Rjk9+bsA+Idxod7yOvXT6kc1Wxtkwj+tT9jvM9CpygkhogpvI5yCxdyDbKYz4s8hBWQrL3tQKW7w
Vy/sfr9zIGytCOsfeyqH+AL3IDkNVxrDCwLT6Gyf8KSmLXVDPLCh1m8OJMv7SyQa9eV8p0umnTz8
Jlda+AlVUjmFq55BSqoo1gJeVwFuLy+DehnteuYc7AgmptDQLcd/yznqwB5ezoOCzevpU5MR5o0q
dnlP5Lif2fL0gxvUySug93ye+r74dkT1w3uEuNVvzjD5QqBNdk/NyJCT1832TXhwKn89Px6wIXll
UHKLlIYatthimHwBw2/v48DxkNUeWm0Ae98qaNBslnLevc8dXPsJTV0a1GQ0p4tW7DyF7r75LpyO
t6kDq/7F+D1WPRvH+gXqVzyR+y8veKNShUlt+9iYs30yRUq7B63VG39+efWbCyy7zURNaNJ+CS9O
DPzkxWFbCY16cc7nAt5269LmliAgcBkWgXBwGiKY3YaNsfu5/OqLTN71kYxXdHV+9//DA7YErUvA
/e2ZhEMKZT+9AqsP7qh5wJk1RbruQefMlhV/JDCa71AAaVv2SKPFt2SahGxo78c92oAjBotkeg5c
/R/a5J1nMVKaqRo99wY9DHC2JiUCHljxBVs0qdiHgEfx4ysckO3Qs2IyK3iZLgeicLe0blNpHoAK
/AybkZEnDHmaAFzFM2g6XnG52PMlVn2bz7F/P+/KNV+YoHC/tOh7HKWe1IHXQRS6Ova/2zohn5zj
YNlJEw5qXkrm3WFBgGykmqh3wNX9LpVieIwLjdRrPvK9X2JVe9TOhcwGlcuxJeEEo6QtcQjLdzld
LpYKwktfkTF8WP18vIYxYE98JQ9gNtZoxd0L9tu0RH1qf3q26m04P78OEqakSyYHEwHGIw2JGjRN
SNmVR/DDzjccHtDQs711QfCS7LZ49S8Js2FTAWmneng7lJdkMhvlz+8QwH+afl7SOINC8ZpxxJ8X
a44JSeHx2EnUvSA+fJFkG0D23F2RzHIxZN8bbYD4Xjd+3O4esnieHXUbDA9qpKMNaNwf9vAUXu/U
0F2QEEEPB+i6Jkd++D+De52D+hJwf/X+hc4z/fEPDaxhKamC8grKufukwVLbPfv2HwSAP0EkkWUI
p1U/AxXOyZ+fpy7XqT9/hfWHuNTsUd0CQF/D8MfvMytUT42lm4oAbnDJ4tu5BaqkXMjC1Csj4rrR
Kj5jlepd2vXL/EYRWPMcjPBIkqUC9gsSeiSY8uo+Yfw83ODqX/B97c/lE/IpYEcsYLPaPft24D4X
0J1pRDZZ29dLl5ZPkHw4nXpxf2BTgZANm/vlSC/a6cgo30cdbIrmRs0n/+ong1djwNDI0y0XBoyN
bqeC+3nJsNNPTsKudotgLVXdXz4olZoZwFtmZNRW+MoaSX/SoUvzmOKfX3G1bQVLzu3IIaMPi0LL
fcE85xC2p/Hb09dGgcC3tZzi57UpJ/CRG/gejzE15M4qZ3vncbD5NC4adhlXE9ZEBWzM20hk3vGS
zVXVXlDk909stLJcDmD9BAtpNyqB1kEPhbV/Rcwfeho9p6ZmjyR7QpWEIxKN8h3OPszSH79RvE/y
ngZNq8OfX9wtndUzXzFizW+En95+lN1Pbz+neqLuJLT1JN/TXFNf3yfa3Ksna9P5IINzaYmkE3i9
ZEk4FXD1O9QrznYo7BF3gRtSK0T94EP/l9fVb3XccdB7hwtr7EKjBadRfdL1etQEw4TgaooIHoWs
npL9IoDf9XjUl+WsjY9IW/MQNBs0TyalKhoQOuGAd480TUhXsAwOMaNEEOmbdRnZpFCxhpk67tVI
xtlJK/i4oRcO5mPDRnI4LdAoQo5I5iwwYl64FvCfek+mVR8uu6lW4QY8nmQRs3NP7AMoYPK4RFR/
mEI4dub3Aq7WENJ4fpZWx+/OAVTTM4+3ST6Xy0HRnd/5ot7LNuV3xTNobtmODHItlcP1ajqwPiUu
YeH06Kku7HUYnsuGCMZFDqcLrVNt5UeyhBpfMs7eLpALbxnVH2JcTwdzY8LczQ5IQHlrrXo1UI5K
kFJ7X7z65ZscMzCHCsEeTa/lpCvFDXJedMFX5zuULFPdBp6SzwG12TEslS81oNZqbYzRyJ3CqUCO
DTrwGug2jSZrya/xHsxvoBO1KRNr/uWHigP7P//IfnlA+TpC+sPzeTtWJlz7FXFzu6/n082/gVU/
YS+5TyUb066Ct/R8pqYqHPrJ1KcnyA+2TsMDiurJZJMIDufFIko3VuV8tCJTNWowYte7Bmw52+cI
tPrZxZ5y60viOlMAZxy21O9GPZH21/ICbrXJ4+1inUuWbPcNTAg6oOd1dpj0vsgy5D+PPXnIe9ua
tx8daT9+YBeaAEYv+hN0FTawHaQ06ZN4HwG7u3GENeW3JjaXyBAkz56A095jQ+tPNzjJvYB8bS+C
peU18ZfPEUmWjXL1my+AHkJOMwdbtRApbays9fjTt2wZjocBtNA84lCeezZ63l0E462rCKdWH/BN
wMgpiztVv/6tNxzysj+8ECz7aM3CM/eghQqbbEqzA238BbIazgugppidayZI3A3G26NEA2l+lpMU
Xybw8osA75pnHH42y0UA8d19I/51f4JhCD/ej0+psU+2/cwMxQbN/XbEtq/urYYbVB02bvbC7lsV
2GwcXgX81Y+t8HrYD8mQQkvhZezKspFsvk8z1fKruCVLquCa+Y4OQVeefLpruR2TDpr6hO3yDKjt
ZT2jP/2x5m3UW/lws/odmJuXAkevpbaWp9l18O1Lm796qlJ7LkASEIH619PBYhfOS9Vb0Hd/fPfT
Z7DT0BcxK/7W/e/9V76lhg4UMCI+l2FgXlwcOqkMlge1AnjM5ZmGa/677Py3CfVD86S7Xg8sKunF
AP1+oNggqlD3b1PpfvMgHKx6XayOlgpR7nwJIAepnNXty4PKKUI4U0cYTnyNLzC/38yfHy8J78yZ
dnzoNblGR7OU9E9eQEna8H94+DePWv3nOv/IwnG6HxsIiJrRX944R/bygr88bO33hG1kof3lz2s+
KoUkvKA92D9ySp3m6dViDpkH9ltWkRfK23C47oMb3MFrgf1Vn83ylZrQUjQZu1JXl3RUe6TYFv1Q
FD12Fjs90BPCIDap9dDm/mlzpSqpxB/JGCRbMOmL2v78DuEmIbam5/nuqSHuJSKL7ZQMr08Kf36T
2nv9nUxc5gpwG5AHDvcFCVd8eMI1H8auoYkJ3dbjBRhS+MHGEiA2ax+/gD99J7BtVG4ENA0/vKSh
fBr6qWa3HBxTXV3P/1TOv7xIdyIRGyL69OxEUAB/+dmPT8dPaldQg8JEHf6h9otan9pf/kzOx/FU
z49alqFTPx4I7F5JON0PegvX+RACKJ/7zyPJ/viFmnh3rpleTQ08ebs33eZ7jwnNg2Xa8dlWa3+H
jP7yNMG86hTz1RzS1e+Bxk1fGAXbOZnnBrwA6Mw7DVXWhsMecTe45pErX6Vs/f89dXGXiswLNyZs
SIYM/PLLKObKmunC3tQCjCIa9puyn1o6B9ra70g+giGcozAuwO9+5qNrW8JPL615wboRisBfPqHN
Q0st3fGSxhQ1Gaabc0ri2Er7+cS1T3gwuhgJmVhZa36fQaeuH2jSer6m3OmNoH3fxvivXuRqnrTA
dmxyWM+b9spoQgrFK9bfzsDmBLwhvE4KRFKb7fthfJMC5rvXExvHh/VvPh11E6z53yccrSYwQZSp
x/X6erh5JygVUUQ0xHhBZeMx6y9Qf4hbwvPOzEhE3wSi3P5S6yTBZArjR6EllyalFtnWYdMuYgS7
amcgkvLv3zwyh0b1ZRgBsu2XevIz9TdvXue9YOGTiYPuuByxxzttQncPTgbHUPEwPmkzWPPbVDOv
KkeWJ/9a+Voz4S34dtix2Acs5lfcQ2B8Q2oblzwcs6kjf/1kBwTVY2jB/MffSGzgB7DNkgrauU/O
RHgcr7/6z1X6bWrsvh0xWf2O/PPH2Aono9/85i/93fGxcfKrpCqQ4wDTN1yyoR+tHh/aFMCj9Dyh
OT58reX+2GbQfej7Fb8e1my+LUFd+QX/+GZe9EX94TlZ9Zu1ROK2AktWOBS9HZ5934qrw2Gor4Rr
syJc/aYNL+B1xgG5h+DXb0AsQo9w8Pkqv9NwjOA676UIkHc9XS6hCiXfPPzN64k7TCL45ZcB2Ub9
rEYW9783Cv7xz3/+z993FjTt9fZeFwPG2zz+x/9ZFfiP4lr8hyCIf19sQIaiuv3rP/+9gfCvb982
3/F/je3r9hn+9Z//FP9WDf41tmPx/r9+/Y/1Qv/1j/8GAAD//wMAjOe+STBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c07af71754-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=1RVzeqm003kDD5kAk._MtZS1D7NNCIKXez_eMV0wWXs-1712872371-1.0.1.1-l6FtdYlkVNVDu7QU1IZ.FIzXNTCiHWY89UfOZMXxxXdEbFIOgMHZUoHY3hTrPoxp2xCeXl0jxOHyeSRlglZ4Bw;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=8Ou2w20OaaDmDHijuPj32K17XBNlds4J9z79rZGaGU8-1712872371393-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '19'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999998'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_88bfda0af149b8c1e82f346ebf4bd93a
status:
code: 200
message: OK
- request:
body: '{"input": [[9642], [2822]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SZyRKqzJaF5/UUJ/4pFSEikJs7oxPpzESwwRqBIgJ2NJlA1stX6LlRFTUxQkkF
Ye+1vrXzv//jz59/3nldXIZ//vXnn0fVD//85/ezazZk//zrz3/9x58/f/789+/1/60snnlxvVav
8rf8d7B6XYvpn3/9Ef/3k/9b9K8//yg9KPSS39NuHOTEhaVc62zbhKtuGuRzA5Gn+Mx/tFM3X0pu
w3m7bJl9NR/JaEilrDHvYODJu6zRaHFdgD1jGp7NENfiObzJCOZ4JngR82BSb7OpRTG2iXHY3oKp
XGYCJOnhxpy1qXH6OuijdiLOwJzV/ZlwXVFT9SOkb2JKqReMR7E+wmPj36jyUI/1VB0vonq4LhZU
8M5P1O+TT4Vs9fjAsznVfGz0RwMLHYd4DIQdmo+O4oMdWz2WUo3zab33YhQsrj1bl9vO4hJT9yA/
njkh86G12C6LBXVky5Lp/bO3+rRQS+SKs4VXd9tOxsEoK5gV3BP9prz4OEakR4IBPrmSgQfjyRFj
EJYPjSrHo2RRoTMu4L+1LVUPlcnF4lk34NnsRWG9MTl7jckbxuH5opq82XbTuXhhQOdCYwZ+fPJ3
t3YvKHZyjLVsWCfcU+snKqhkUWWDztacHlobrjTqSZg6NBhxMBaytzGBoll91KPZ4Ag9zueUWLuy
tajbiSEYFUuZob3LZJzCqULq8T3gqcvr7/2Y98BvOKJ8fdrn40Y4C7B9uB8WkMU16SSmHoGpDBGz
uibBZDYhheGVjWS7to81v59fqrY1LhmdNqdTPi7CNESVBCE+HI+XfAq8e4EeG/dGru3uwcfXIZDh
cV086TgE92Qq9jsX5G1yZ+ZihQJqDg6F7/PAq11p8ZUjH10A39wT55VG9birGwHM7c4nnrLsu8lX
aQ+fZRwye2GYnbT25xR1ouWRrYnvwZh5BgU3MDjtjnbH6eExlFB+zBnv8pVZD8Jd61Up4gVW5ve9
4+XSsaErHJ0qSanXfHkCG2kdPIhVzVY+euqso8IVb2TNpy5/rw1F+Hs+HHQDGh1fq0AInCfB1l5O
5mZ1sJGpRAkWXHHqBlmQXXjdLJmEQN1glXwCGU4hT7FcsYjzcnMz0XpwlwRf5S6Y3yb1YadFE3O7
VqznzdIrodhmAzEvQR+M5pA06HGnMdl22SefYmElwgObJjNLliVjqkY9RFXwJmGoWMH0aLm/0GVs
MFLeqprrr0+FZm1WSYhNp17u6jpFwt0OiEONIZ9WQU7RUo9q5mVDV1O28XRFb65HpvuREnTP9Jqh
dO+tGJkPfjAtRjlT4yQ64r2/dIPpIboXeBkHzNzl3crHQc5dyPLYw7NhBdYoaIcC4OMe8OMw42Bq
VscW5oS4BGtNlvSGVKogrVOXpBWLEG17LUZ8lkuSUtmpx2+/wqYDjOF5b9GwW0KEzsvowchwXuSM
nZ0GwtP8JMbzlibjLdNt7cx2Ol6GqzZnjX5yQTHPG+bNVpjwTGr36FtfNE9mu14J97lRhbsTMF3U
7vW8be8UueR9Zf51bwVLQctVeFy1J/HOzdviujJnmi3InG1PtyQZOxzqSDojFf/6vZX7RPirr1vJ
vAVvMmYh5GteEudmVsFYCh8Jkl2ZEnwS625+m08fSK08WaAdqm4A3wih2ysuI3gn5PO33tXtWC/o
AdVBx+GRVbAy9kuyX/d2PR+MYIad6B6wMlt9Mtf0XKEd0Rlz6ueYtCJWHZQJQkrz7SAl/aY3bDhf
zQ1WLNAt6VNPPZy3q5YFQifwMSm3OihbUWYOfZo1u23eM7TQtlR7HPKAI9sR0G99GCq1NVa3rocM
OouW/TMMOFzWGFpeYozYRshZQx0MvTGIzA00IeHsjBt14e3f7IzqRz5K5q6Hs3lW8KwZD8Tsea5Q
Hh2SQVbKS0IzVxSh5RUm/oVt+cPbeqm63x/fv9/L6Wf3ltDY5AyLpyoMWJMvj5Cpmc8CpTZzSbiL
PvKnaMPwPTmgyZSuT2Sc9ibm3fCyhvgTjaDc7y4evc2h+9tvDVUNtr0fzeBbTw6QezsQsj+GfDyr
jgDb1bli5jbUERW07IjuRhLhVmwt1GdDXfzq8Xecr+62LmkT3VQUkqC1Jml/N5FVTy1xrvtPMqof
XYW37RXMiNZm12ee18MgbFfE14tPMu+Wc69+alEip836ydnjXvXwcRaUmIz1SXf5aLbaFbZO1v6j
tuZ5kDFantMFvj9vck7RtHiiXVQ4hCjV0uqLZ9fA936QQ0CdejqdrSeUN/VANqxXEFtoRFe9tIh/
fpX0z7ST4P6pB7Iu5zLhJKhMkKKpYG5+L1C/5bMLT1xpv+fH55PWjeq4eVmEDIJYD++xmFE2bWMq
dq2K5sajOsrXU4mHwzbM53TSI02AW0lc2detlRtdChQ1hUZ/9cOJue/RRVrNeM1DyZq78jyi7/Xg
FZwFi6rH2gVNIAWxpXYIhsG4hoix44N4m1PLR0G7Fuh9158kmCbT4uh4E+HeaDc8B/2mHozc8NH7
8RSx6kZex33rhWHl4pSYS/KoaVx6KshOkWGBe9eE8sVqD86mf5ONR616ZiLdg/ZKT8QU2zGf5zg2
odTnA/HKrY0Gsd2JwJ7YI+H1uQn4nk7xT//omHZSMGyqGSPdci6E4F2RDyqKMCSzEjP9pmw4k9h8
RE6XrRg2xzaYJzXvYb5kOiHTuEfzYSVlv35m4aulSc/z6oLa2JGJnYwd7xt9aEDpBYWFK4zRjw9A
D0OZbNxHaY2FDAXY99Aj2Az2fPjUPEKH9xOT7/0O2OPe9miTbmvivnmaT+PNlWAn+gfmLh5tN3PP
pkDMqic/vuRcmjHqMKqx5mwzq7t8lrZa+ZrFiiU3LH5IEhua/Y4z3zBPHZUUI1R/eqxymqJ5Oiig
3QbrQ7zT7VyP6seVYWsUGZXJuO7mIjBjeJ4+GVWaUOP9tRQwXCK5xQLkEqfNw8Yg14lDbD3sOrZd
wxvG5swwfXKKfv6resHLZ+v1ug3465XaUAv7lgRq/+DzUXyHINhbi5gCXdXjxy1TkEkjEVcEGvBr
yWYkecKRhOxC62FtjClU2dpmDlzaZJauta5eLvsci1QO+Vjs4x510V4lW7k3OdeLfQiFIQDTV+Rd
j5kkz7/3dMGLoWZ86GW1vqA9FV+dwUfTnSTYFe6I0WdxR315Fo7q67WfmetWHHG5zzLQylJk+En0
ehr8qtJmJewx36M951uYTLgX7omEK24Hy87WfNjfxvOXD8xu5fgvG72dCyehEz3zcUiUDO5y75NA
Pp84l8wgguU5W5DtURWCnnsbEZQ064h5l6987mz+1KhTTVRd4jmgqWpJIEwGI3p4eNV89RxMtCv8
EavWYV2PyL6mYMdGTzbT9E4mfVSEXz2z9dev+/XeF6GzrjPW2t0DTaVwLNHLOGHi3SQW0NFZxGgd
7Sh9Thf52++HEFlg13g1vm3OPUsWIOmVAdcwlZxmQ3lEOkMb/Ig+fk1//boqC+lvf/DyrLxRaFsI
84t748OPh37+4rzSseZl1B9Re60+dIGWdcBXARZRfsISMwjtEa/1UgYk7T7MdJdBMsOqF9Rel3df
HtdzkXWDipjkDUz/pKr1/b4EePlp2I9vucjjCC2EyxZPDk7QTPaj+pcvCk9bWiyL9X/rt9W5Oz5d
T+z44w2qwqUJOJdUrKz1vU/FT8rRtKnUEDVvvSDmKXKtFR8aWRt5E375wwzGzVkOIdq8d+xoKZn1
zQMAj/3Sx7JxeKMeHm8BVPUIGCm1mayafLmHfk4COpjYsMbLSW2BvKuI6Ieoy+m5GFoII2RRZTUK
wXTh6wo2ZO2wTbE4IL7aX1z07R+GBWqjUa7zEr71QuzgfEz4gtwECAzSEZ1d3/m8Mm0RXAOd8HOR
POshFhYSiPtdhgVPOwRTQxMHVm6YEqtGTz5YpztGdftaYPTJ+no6aY8jhOIrJkE1ETT6R6cCzY2P
BHPvmnM0SabaTD5ipvbYB/PXH//yobUr/YAfHrUAWlmJZJ3HG2vw1LHQFH09/+XBYTqgEoyDOdDx
1b2tCZJwBHn0MeU2jhAvhTxTf/21HdWqm7ju91Ar8wXL+b3g7EiQD5hKF1yJjmnx4SLHkIztmZnR
K+h4YfAM7RpNprM5Amrtsg7Rx0sstj5sZd7n8TqGIMpc5rgRQ5zHTq+GkWLRWaCnmpv1SoJvPxAL
x3LweZK98OMFZtv13XrKy0+Evv+XwuLR5DNfLPaQ+wfKNt2g8/lsbS6QFvOauFpSdzyqyxjtYyNh
jqWo1lwdkxiUe+3SxVla8xETp0VIlRPif+txfrTKE4K9cMTTHbVff1MxhEmYEq+6dXwqIwXDN+8y
b34bnfhZ5OYvP1D+61e0vYqoxMWC6T9/DKiFf3mfuOrnxvtfftbPtk7HL6+PrBtkOFy1BTNqe1uP
TSxc4KfPxuudIy5d7xe4va85C/yVjr55ywUdZoI7WRiCsayDFkBa6mzjlFPdLZRRBNaOJ3a7Lgs+
eVPtwmyvN8Q9VQ/ERSx/5wVhT7562y239ytAQ2adOIw9g9/1oNXreMbi4IvW+A6kBrbZU/ryaJVQ
b/o8kSH3QIxTOQRMxHIJK6v1saiHXc1UO6JaIUwvYpTzs/scjO3xx+Mk13PNYuaOCgpvX0/8jJXw
m/eVCmR3SBiWTiTnkHgtfPmWmY9uHfRJudahOpQSMa+KjSSzsfyfX/78E81lbzvgpZcYL1Hd1dNi
HFMgtmgQ1yz6nMmRGML14UZ0rq7c4kq9tkHVPx5zzFHqKLpRCrQlOvnm126+nIYZ3ifVo69Z7fjI
PSKiVnYuZJ15dT1nLkjq8rRpsRQ9hXxKJ09SMv2DiLVHIp+fJMmU62V1ZJbNnwHb9Z349/qDYkG6
cf+YMzjXK0LCm0wtOoVNq6jw3m1HdV0EvXN1Ujjvmu/3ccQ7uc9SNQHHx8s8fll8m25LZXlat/g2
nG/5Xz67zEmD22Pn5PPB3+wh1WSHJeO7s4bD4+X88gQeOydGo+BUIhy8YouFqxxYQ7aYR7Bz3BB3
YJbF/ds6g80p1dmXrwKuh9YRbrWyxTNcxu7LvybyzIJiQVy7+WxmSwlNMd0yMlp1MHvHIFM/29WO
uHq4TgYRWqq+yYMQuogTa2piUNENQodcov2A2HAZY+TpwZkYfNp10y/f/+VzfeQ1+/n7r7/Dl1Na
P/6GQDpEzOH0FtBPPVHQu6PCtmtb6ujwWGawYMeQGd/80F/W9xl6lL/I9tOP37wQNGh3bipid63K
J8d4tiDb3pr5xdXp5kW76NUuOqpfvW/4ODzNCIxuKeJyeSec+9MxAuUWYWZGppR/+k+hou/8imrb
PAy+/fJU/X22YMGsPrq5w7oKTSpfiRmZx2TsTucjqLJtsa9fJPxwOb7hXC8JlmOzR3x/KVPt10/T
/KbW0Nmaq/zWBy81SeZFSiVUbNOBhFTWc8kCL4V1/LJY4JRdwpiz7KFXXMpMfGjyXrfsQksNWSDh
QifW/Kv3Fw0PxIn3J2sWsWqD8XkiipwyyGfBuQJaxvcLXvmrTTBl+c7UfvfDtmsjWG7TLIVUUx0W
ZukhGN1l+FY83Tt/5304eB+1sdXqZYiZv4jPCbuUSqXGmuR+81FpjSo6z4htFUK+eR113vTWQVFE
g1iGLvzVVxW3ESY3s3wE/cOJBbDAqZnzvu9zZkpZoXrtEDBXDx/J9KlRhA4nXybGBvp63hKrVE5G
atF9eDAS/p2PQEw/3b/zx6YaW+DJeCPkGYrJcBQ/M7w+7YaKXnftmD4ODvrxRRScoeYkaE10pXFP
iFmuAz6FtQPHdabiE8kPOS+FJEVffiY2frnJl8da+NY/VSVoEXOjywXNkUCwUD8Ajb0t9GBKIGDt
rka52J82T3hWZPvNI37H1bd6QQWhJltfG5rwInAp+uobSy7uAo2v4ozhPokvYjj8E0zf+Q6y87Ah
abe+5d/5agxxvVWZI7Z+gAYjtjUJVw1e3N9OMG/TVAQtFBvmjeqbj+JaaUH1+INCfpdrau7UCKhT
TizsDTtYLpxbhQ6DeyHr2n4l/WC8K5AD/U28tQ1owJyKaM6cjvhL/sr55vwZ4atXDL9SOxgddor/
zru+v2ct38VWRhsju1DlZWXBpB+SGRQhI7ht7tianGRRol7rASMcBzV7vVLnN4+kKGxmPlfv3ld9
c/HBvNrSoIckctAU0jedmmlR8/OhGNGleOzpj8/GxX0Vw3yorlh7HFAwvYutijSLunQ2Q1rzVeBI
6Iq0EwvQ0kvGso8AmrqIKPr6Vf/Z1QKk8+WAhVU5o0Z6ogKeTVUxki28nGcuitF0inJ29fXvvPhZ
6Yq3ahK2XxhVPf34gQv7HXFTZ5/wcnMy4dUhHS/LbRD027aXkAKOyqxQ31niIbmN8lwiTr9+YFFj
iC5/eclf8k1OBS3eq5tPqzBjh5/WiO+3Cva3+cy22bDOR8dfVuggxQpxSGF0NEalA/G4NolxRCc+
Hi4PRy1IbzKH+RHiD3IWYRnXF+I5+JW3xbO8aMu8k7EayzdryjzXVb+8x/RGDNB4LYUQcvkdkuye
gDWv9tce4oJuqOx1q3zYCFEKV8ckX/1O6tVmE6kwLDc5C9imSOYtzE+QA/PN1gvVCFSgnwy++z8k
SId3PapzWMBdypbEfzhW/ndeq6aCSZdf/x27cq9CxFTKPCE0A7HcPArwxn4kjhm+cxqs1hi0SvIY
6foh5w3FIXxu8QsvxcMZTbOXmoDrrGM+NV5Ws2Z3E33zJTs93nE9G4v7/u88ZoOWrGOs+2B4rrM7
0fVwQnOtt4XaJ55PXGdpIMm7pVR9RtOKYOY3OSfKztYO7waz0HNyzllkmdo6Sij5zqe7735SBqdw
Sv/yII23igmVv7CYZ6xrxKYCCRDpM2WOlFZ89I5prMIczUSXIav5IIc9pOT1xpLgWxZ1nlhC3/Mz
+0wE63t9F7TxS5no3mbZ9Su2MmEPrzWuw2aRj84zp//2Q7t857OQyg0S6Lhku7P+TGYSdE/45l2G
T6JVT5UdCAirZKYL0G6cXcqpgp1Ma7YWD2f+1dcndPLjxEg6VPncb1e+Ksb6nfk7hup+03v2X/87
hbqPOOjyDNHwOtPkMhwsujhEb/A39Yp423z+ztPfKkLW9cxCqdW71djdLjDRdcXs9r7KRybyEOr2
sSB2eLgnTDKDGIzocsPgdbtg/mR9BsNYb0kQf4xgjG25ga9+ffl66Oa97LoQDqNP/Hg/8k7OvBIu
TxkTP9W29fgykQ7feQ/VnJnlXz54A3z8AzPcpEpm6zNWAFpfM3N/7vgcl4aqSU9uEuv4Geq+uVij
DE2zYelDM1G/Cq6RWtFPTLaHKes68W6kasmfPz1sgt67JaJqKRxjVG3vNS1k5QjJMnz9zTuzpHgY
8EtQ8JuMj3re9UMGF8XbMEO/V8H08y8njCQqf/POd//CR5ryibByk5g1iakLMM8Hjxi//S+hCxuQ
KV3RQTJvVvdMDykExrbDC7VLu9F/n7J/7z+ipWX99b+v3tLdV2/6r96ghVBsSSjCizMuzaH2CscF
XtytCE27XVxpYrp8M3K4jcn0SNsYqevtiVkwlejN87YA9/A28fG7f0MFcW3C4xmVLFw8mv8BAAD/
/5x8S++CwLLn/nyKk7NlbkBe3dwdAvK2WwERk8kEfCAoIo9uoJP73W/wf2Yyi1nNUg0iRVX9HlUS
j3s2vWByu1kEcP0dMNv/2OrP33gJUIinY/7M4Hv0TbqbH1YxfY89By+6p1PLrVRzWvFAW/UbRYdm
BuPn5Cegte/s5w+zcQB0+vkXaA6l6d9+dSYZNQ1H36lpFhb/9k9RJMuMtso5/OkvwjUMgYFq4gSF
tKEEOF/EJq/9+nBqee3Hd4v52nkQXvceRw3mn4NWoryhJrIuUftUfYNpkdwWrPoMB/tRLGbRyFzt
Uz4wOfvSJ2B3Wl3Bjw+mIjya0opnypFvbGzzkgD6Y32vAKan8y9fe/bSH676wVZCtOZ+7wdIXhX8
xWvB9w+bnmnrQ88aP9g5nPV40yP/qvz0oJVrKZuPvNcAQ+Q4GrA7Y7/5IBBPp4FaHzvs5zV+EBzS
J/W6YxhIgZc0cM/FOyQH2r2YsvDZwS7cxev8/AZGLlNdGD+wQdTq+qjX+BL4NpojdVrxVc/98vq3
f+ivfunf8f/6bQX81//4/9go2Py/Nwpqxf8gJfPiglx8RYf1uAC6AxQHS7kdrnCrfQtq9rgNZqj7
JRiPckHxS6nM6TUKE4ze75wst7NTt6FSRnAXJnfsTIJbi3jnvUDNGXfsdiUHZjPqGihrCGLHukRB
wx6vBvIq84lkXSJzTuDgwhg3PXXP3pfNgQlz+JacLQ7UiwuY/Dk2miqBK9k8VCOYeB/lqrJLCHXa
CbMl/O5kGCfXGofmsTTns+wOcACeic33vmd0yEMfxpc0J5t7tDMnLEi+mqvZjhbDV42X3TsXoJAk
JT581T5eNoKPgOKjNzXmCphUH6cQjpflS2SYNAXzm9YAx8tkIniRaTwVZPSBDF48RlpagaGF0wLA
GF0oEkVUdKVrHYCR6hMRHdMwN9rHyyBKREoIjmmw9CDKQPCaI+ocnma/uambBr6sKqYODYNi/rad
C57X14ei5L0Dy1ksErgBdYWkrSYWXecUMnxnjwI7x5MREO6h5oCd9gJhNT+yZbP1ZDiEsYr4cmf2
hIvUCRbcrqNbWxPNaRK6HKyfE8qXR3MKds8J8PqsUMeV/GCpvt8KKoFYrfdrCUZt667eEojx7lFW
YFqMMtJyw5ixe2dmMZHLi9M+6WWDQ9EB/fdU95YceupAakttwNy6pq3Oiq9iO9clMEYJteBzXhk9
9+7M2U9PLRynckvPON7VyyKPHXg+7w8kD1Ctl9vlk0B/N2zx47rxCsIrSqeu+YNR8n6zdtdSGbap
Cqi1f3m9xL1eAsxr+YhvdznrWSQfVKiMxCCTX7wYWZa9AFl2I4QrCr0mx6WM4Mt2D3i/3eJiXnRV
hnEOz0i7W2rQtlRxAZAMl5pjb7IvyxcXgk0WIEk6vMxPpW8i0CkcxcGx7cEkybc7zIMK4kDSRnO2
lOcd1lFxQJIS6T1DQHQhyx4Eo/jsAPHLYAYNyztg64hqc6KOSUA58ReMjPlezM24byASCaZO70b1
3JQ7CPsDcTDOL2PPxAEg4JmnAhtaN/S0MDsdzlVvk8EwjbgMmXuFhlLu0ecpXoMFHU2Df+KGI0pW
R/V0yZ0KvgEnI8B/7FoayhpCYbgfibTmg+CIIwE6Sj/YD9uqnm/b0x3y/HdLA3kOwGJJ0IZ5Vr2J
tXFaMNi4EaB9MwcEuv29nw0+sKASwwS7ezMBFH8TGYiGeUbPTe4FgxQlC7yLB4XqafWohw/ObSg+
DB6HyL+ZzB22LfR3ZIsfORvAtKklUT1J5RnfQ2dk083rWnjNbIhUXT0Us0fTAe4fj5lo7G4E05CR
CYRUt/G5I3GxDNUl+asPtb3hupfspQTlgYrUvl5Z/3GX7xVckvlNRmYZTHyd9QhynnCh29a6sMVR
5LsaFVJPgBL0wXKumAqhSkciK/cgnkNJVWFm3HM0GXMfM3oqB5XJ45Gu9zOeG3WHoL9/Yerb1b7o
4BG+YFu2Hc42+dJ/P/uBwPg12OjwEFKTSKBF8MR3LrULtOmpXCoLOOr6iXqctSlmDukJTA3VJL1U
78FSm6UAdpEQ41MSTsGCpIBAYbgeKVLttp78vrvDPua2hJcP52CZAwTBWSY9Yt3GZTMjlxe87RKX
4guPYnqV2hJcBpDSkOMXNnlcMEB3cp7UbLZdTM9Pu4JOsI6LbrMNlvbNNaqY1QE1X5d9PC2ea6lm
4PpkekWkmADJctXzNhj7Aq/HrNK1CDiJ8cGoTVVz0qOiga3WRXQb9W2xPIt6gBc5NtCMC2L+5RM4
yqviuQv1MLmerIqb6Yzx8SkWNEPjQeXPx5Hab+IH89PKDPDchhBNDv81p5Dpd3BS3IQmdBMFU3xT
78DUDYOGKshNOiZVqVIBYuoZqVHPgnuQ4f5xm4nMl59+GoKPDyVc7db4nwqW4/oOvc5esPmso3gB
5xTBe63z2D09B0Ar52RDxIk6AQedMKp19h3y1pZRdyPMbAylRdXuz4tBJuMcmnTX7Q7wad1F8mRL
z5jlNUhd653wx6cYk83HeP2OR1LV5f1XU0sBnsuXih9n7cjW+GYwen9yHF6TPiCWBSZod9sT0bab
PZvljrSg2Q0fHFyidzx69E6AdXucMLK3cj8k3KWErvIesGNv9nV30ZQUPg1RQeDq6vHkdq+D+pGP
J+qV+9CcvyeDaGAqE5xdArtgu/p9B4fdXNKwSp14un7NEopmnxDlzq/3T+4j8HWyLb45ZlBPQ9Ys
4DROLd3izapQWzUFQickZHqKSz+WcIbaVvI+v/7Khs9pVsG0t3Tq14EMpk81l7ADzbDeH6dmSjKl
8LvwG1JutYGRJdy5IOUeIt5+hxQsaXsqAerTGuO9ycWzGgYcwPIsYRO2St3mW3QFzyZJabiJb8Gg
ZyAD+SnqqXk2b2z+JGEJmle6xwFThKI5FScV1tHlQMR+auLxDtgEw1OUoSq6McbiXfaC1mADbNZv
1C/2ToHQmjiGzqJogff+roggh4aBsnP9NVd+dfj167/8m079dFCbTtji6z1C9fDr13GTmoRx4Y3N
hS1Z4HJ89nSrWUYvFIbVatvxIlP3ut0Wi7jRc82RSos66OEEXxrdOIAcycZeN5Yx5XcB/PUvahTU
j9/qe2dANSl4oh7FMp7DObbh+YQR0l4+jlk5nBsY7t0GcQn/Nkm7iAgac35FXBI09figaqIe/O95
zY8hYK9TpEKZExW85kMxFzZvA32Ae7xf8Xn5cHwLLCEcaTHFMfhOby0DlKURIqfLq5huaRfBVnAd
GihBYA53y9O1sdNDfN73bT2FQ9mqdCYnIuofu/7O9rUCL0NNadjpJZvh3vDhk4pPunu6pbmkRiKA
FjEPI+juAipYnAvadirQ3FoKYKVyRtAu4w/F8eMCpgK8FvA4yi7SnokQT5/rhQDPk/BfvoxCISe/
+GE36mfAPNuFwBCjGFt7zmWstO8yfFfYJzMX6fFG+2wzqKfXC93d7k29DK3igrHvPPzjZ13mqQKw
zZgjH7RzmMTd9wZ0oqyjiDaCyQ6eoKtP6yoiyXzH8cjyxQdA0l2kfG5NMEzuVoamtl+Icn8TMEfO
5w4lTz9i1wq0fsA5r/71I2n3oYx0SVxBXxd9RFd+JWa3PPp7ze9Nrpgf10gGHSc0+LAsVT8jeba1
rr4C7CCliVmh+S7E71rDPzxbxMJzIYbbGfvNlrHhllaRhsDLxlg+nM3h1MsRBIfDjETNOzNSm60I
hTn10EZ4bMyu1lz4xyf2s7gzN9DuUngtQ4IEn6CeOXN9gPQhCli/LRfAUhW/1M57fPFOeJyCWRM8
HZywZ5BWRMeAXdpXBYPJ5cg3PgI2o4dsAJUrDmiTHVowdrVbwd5tTmjaeiTuDzU4QNqjElupqJss
T4MDOHFGQD7i9V0Tfn/04e48+DTm9Ws8d9BrwA8fJpKZBSvO3hWa4eTQ8wFUK3+PEyDXD0Sdd3nq
afRS7oDaO5sa19oLBI8LCGyOdoyaU6jErF8djYHLNOqwpuuZRYkKlMHa0d1iKgW7WSXSwMltabIB
TrHWTweSTZEg+RI0MRFJ2wAfXjTq6/YMqBsZGTDFkUMcHkkxeYGZw23ZM8JpqcE2TbmH8KEbLTad
1CiY/skyEO3OEsVTyBX0x1ed8+FJXYuH5mA4WQppk36x7zeNufDK3MIfX1K2lRV0obivoNjlPbaT
LAdL3mc2XAIjp/jH3+KDKoAvP6qrvnkD6uyMBRSfj0gN7w17Sqs2gfEWfTF28YMNa73Ba7Ix8Mo3
iqniHwLgn9srdZfXuZ9WPapZ2iATvjI/QX9IQacqZ4SIKry3weQvxgGkM50RfwkoIDt5OYBScfy/
fGGPx4MDQWuGWP9YUzFEV3gA8Xm40QheEZhGe1fBs5q01AnwwIZav9uQLO8vkWjYF/ODLql2dvGb
3GjuxVRJ5ASufAYpiaKYC3jdBLi7vrbUTWnXM/tohTA2hIbuOP5bzGEHDvB6GRRs3M6fmowwa1Sx
y3oiR/3Mlsrz71AnL58+snnq+/zbEdULHiHiVr05w/gLgTZZPTXCrRy/7pZnwKNdemv8eMCG+JVC
yckTGmjYZMvW4HMYfHsP+7aLzPbYagM4eGZO/WazFPP+fengWk9o6hK/JqMxXbV87yp0/832wXS6
Tx1Y+S/G77Hs2TjWL1C/ook8fn7BGxUqjGvLw9s5PcRTqLQH0Jr99k8vr3pzgUW3magBDdovwdWO
gBe/OGwpwbZe7Mslh/f9urS5IwgIXIpFIBzthghGt2Fj5Hyuv/wik3t7xuMN3ezf9f/6AVv81iHg
8XYNwiGFsh9fgeUHd9Q44tScQl13oX1hy9p/JDAa70AASVv0SKP5t2CahCxoHcYD2oATBotkuDZc
9R/aZJ1rMlIYiRpWhy09DnA2JyUELlj7CzZpXLIPAc/8h1fYJ7uhZ/lklPA6XY9E4e5J3SbSPAAV
eCk2wm0WM+RqAnAUd0uT8YaLxZqvkepZfIa9x2VfrP7CBIXHtUXf0yj1pPbdDqLA0bH33dUx+WQc
B4tOmrBf81I8748LAmQj1UR9AK7u94kUwVOUa6Re/ZHv4xqp2rO2r2TeUrkYWxJMMIzbAgeweBfT
9WqqILj2JRmDp9nPp1sQAVbhG3kCozFHM+pesN8lBeoT69OzlW/DufraSJjiLp5sTAQYjTQgqt80
AWU3HsEPu9xxcERDzw7mFcFrvN/hVb/EzIJNCaS96uLdUFzjyWiUP71DAP9p+nlJohQK+WvGIX9Z
zDkiJIGnUydR54r44EXinQ9Ztb8hmWViwL532gDxvW78ON0jYNE82+rOH550m4wWoFF/PMBzcHvQ
re6AmAh6MEDHMTjy6/8zeNQZqK8+95fvX2hXyQ9/qG8OS0EVlJVQzpyK+ktt9ezbfxAA3gSRRJYh
mFb+DFQ4x396njpcp/70Fdaf4lKzZ3n3AX0Nwx++zyxXXTWS7ioCuMEFi+6XFqiSciULU2+MiOtG
q1hFKtW7pOuX+Y1CsPo5GOGRxEsJrBck9EQw5dVDzPh5uMNVv+DHWp/LJ+ATwE5YwEa5r/p24D5X
0F1oSDZp29dLlxQViD+cTt2oP7IpR8iCzeN6olftfGKU78MONnlzp0bFv/ppy6sRYGjk6Y4LfMZG
p1PB47Kk2O4nO2Y3q0Wwlsruzx+UCs3w4T3dptRS+NIcSX/WoUOziOKfXnG0XQkLzunIMaVPk0LT
ecEs4xC2pvHb09dGgcCztIzi6tYUE/jIDXyPp4hu5c4sZmvvcrD5NA4a9ilXE9aEOWyM+0hk3nbj
zU3VXlDkDxXetrJcDGD9BwtpNyqB5lEPhLV+RcwfexpWU1OzZ5xWUCXBiMRt8Q5mD6bJD98oPsRZ
T/2m1eFPL+6XzuyZp2wjzWuEH99+Ft2Pb1dTPVFnEtp6kh9Jpqmvb4U2j7JibTIfZXApTJF0Aq8X
LA6mHK56h7r5xQqEA+KucENqhagffOz//Lr6rY57DrrvYGGNlWs05zSqT7pej5qwNSC4GSKCJyGt
p/iwCOB3Ph71RTFr4zPUVj8EzVuaxZNS5g0I7GDA+2eSxKTLWQqHiFEiiPTNupRsEqiYw0xt57aN
x9lOSvi8oxf251PDRnI8L3CbBxyRjFlgxLhyLeA/9YFMKz9c9lOtwg14VmQR00tPrCPIYfy8hlR/
GkIwdsb3Cm7mENBorgqz4/cXH6rJhce7OJuL5ajo9i++qHfTTfFd+xk0dmxPBrmWiuF2M2xYn2OH
sGB69lQXDjoMLkVDhO1VDqYrrRNtxUeyBBpfMM7aLZAL7inVn2JUT0djY8DMSY9IQFlrrnzVV06K
n1DrkL/65RufUjAHCsEuTW7FpCv5HXJueMU3+zsULFWdBp7jzxG16SkolC/dQq3V2gijkTsHU45s
C3TgNdBdEk7mkt2iA5jfQCdqU8Tm/PMPFRv2f/qR/fyA4nWC9NfP591YGnCtV8TN7aGez3fvDlb+
hN34MRVsTLoS3pPLhRqqcOwnQ58qkB0tnQZHFNaTwSYRHC+LSZRuLIv5ZIaGuq3BiB335rPlYl1C
0OoXB7vKvS+IY08+nHHQUq8b9Vg63IoruNcGj3eLeSlYvDs0MCboiKrbbDPpfZVlyH+eB/KUD5Y5
7z460n74wK40Boxe9Qp0Jd5iy09o3MfRIQRWd+cIa4pvTSwuliGIq56A88FlQ+tNdzjJvYA87SCC
peU18efPEUmWt8WqN18APYWMpjY2ayFU2khZ8/HHb9kynI4DaKFxwoE892x03YcIxntXEk4tP+Ab
g5FTFmcqf/Vbbzjkpn/9QjCtkzkLVeZCE+UW2RRGB9roC2Q1mBdADTG91EyQuDuMdieJ+tJcFZMU
XSfw8nIf75sqCj6b5SqA6OG8Ef96VGAYgo/7w1O6PcS7fmZbxQLN437ClqcezIYbVB02TvrCzlsV
2Lw9vnL4yx9L4fWgH+IhgabCy9iR5W28+VZGomU3cUeWRME182wdgq44e3TfcnsmHTW1gu1S+dRy
057RH/9Y/Tbqrni4WfUOzIxrjsPXUptLZXQdfHvS5i+fysSacxD7RKDe7Xw02ZVzE/Xu990f3v34
Gew09EXMjL51//v+FW/pVgcKGBGfydA3rg4O7EQGy5OaPjxl8kyD1f9d9t7bgPqxqei+132TSno+
QK8fKN4SVaj7t6F0v3kQ9le+LpYnU4Uos78EkKNUzOru5ULlHCKcqiMMJr7GV5g97sZPjxeEt+dU
Oz31mtzCk1FI+ifLoSRt+L9++DePWvXnOv9Ig3F6nBoIiJrSn984h9bygj8/bK33mG1kof35z6s/
KgUkuKIDODwzSu2mcmsxg8wFhx0ryQtlbTDcDv4d7uEtx97Kz2b5Rg1oKpqMHamrCzqqPVIsk34o
Cp97k52fqILQjwxqPrW5ryyuUCWVeCMZ/XgHJn1R25/eIdwkROZUXR6uGuBeIrLYTvHw+iTwpzep
ddDf8cSljgB3Pnni4JCTYO0PFVz9YexsNTGmu3q8gq0UfPB28RGbtY+Xwx+/E9guLDYCmoZfv6SB
fB76qWb3DJwSXV3jfy7mn1+k26GItyL69OxMkA9//tkPT8dPYpVQg8JEbf6p9otan9uf/0wup/Fc
z89alqFdP58I7F9xMD2OegvX+RACKJv7zzNO//CFGnh/qZleTg08u/s33WUHlwnNk6XaqWrLtb4D
Rn9+mmDcdIr5cg7oqvdA4yQvjPzdHM9zA14AdMaDBiprg+GAuDtc/cgVrxK2/n5XXZylJPPCjTEb
4iEFP/8yjLiiZrpwMDQfo5AG/abop5bOvrbWO5JPYAjmMIhy8Lue+eRYpvDjS6tfsG6EIvDnT2jz
0FJTt924MURNhsnmkpAoMpN+PnNtBY/bLkJCKpbm6t+n0K7rJ5q0nq8pd34jaD12Ef7LF7mcJ823
bIsc13jTXhkNSKF4w/rbHtgcgzeEt0mBSGrTQz+Mb5LDbP+q8Pb0NP+Np6NugNX/+wSj2fgGCFP1
tJ5fDzbvGCUiComGGC+obDyl/RXqT3FHeN6eGQnpm0CUWV9qniUYT0H0zLX42iTUJLs6aNpFDGFX
7reIJPz7N4/M4Lb8MowA2fVLPXmp+ps3r/NesPDxxEFnXE7Y5e02pvsnJ4NToLgYn7UZrP5tohk3
lSNLxb9WvNYMePe/HbZN9gGL8RUPEGy/AbW21ywY06kjf/Vk+QTVY2DC7IffSGzgB7DNkgjapY8v
RHiebr/8z1T6bWrsvG0xXvWO/NPH2Aymbb/5zV/6h+3h7dkr4zJHtg0Mb+uQDf1o9fjUJh+epOqM
5uj4NZfHc5dC56kf1v71NGfjbQrqii/4hzfzoi/qr5+Tlb+ZSyjuSrCkuU3R2+bZ9604OhyG+ka4
Ns2DVW9a8ApeF+yTRwB+9QbEPHAJB6tX8Z2GUwjXeS9FgLzr6XoNVCh5xvFvXk+cYRLBz7/0yS7s
ZzU0uf+9UfCPf/7zf/6eWdC0t/t7XQwY7/P4H/9nVeA/8lv+H4Ig/j3YgAx5ef/Xf/57A+Ff375t
vuP/GtvX/TP86z//Kf6tGvxrbMf8/X+9/Y/1RP/1j/8GAAD//wMAjOe+STBBAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c08cd2171e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:51 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=5OrCIMrDXJuaI7E_fumscwCdlgwf.KELcNNXpqUhEow-1712872371-1.0.1.1-bNxmXGmOqEyX1SWd6RncfX6UbfadPSUR6nozzYuIFPlH.ZyTJa3ShucPXS_IcZgJ4_DUw9_D4zpH1AZ488gU2g;
path=/; expires=Thu, 11-Apr-24 22:22:51 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=4lJZBUnx2WP1QxsOjyiKsxhmmCLce_ojBS66oRdC3kU-1712872371411-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-ada-002
openai-organization:
- langchain
openai-processing-ms:
- '22'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9996'
x-ratelimit-remaining-tokens:
- '9999998'
x-ratelimit-reset-requests:
- 23ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_38eadc2dd736cb2e124bf1a9b9d456bd
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a car'', ''question'':
''Can we logically conclude for sure that the man is not steering a sedan?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: Yes\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3xTXWvbQBB8169Y7ll2bSeta7+GNIRCH4opmLqY02klXS3tKbcrXBP838tK/ogb
6Ivgdndm5+ZGrwmA8blZgnGVFde09WjxsM/LSVk+fn15Lr/8ePhmH7vse/b0VHf2YFJFhOw3Ojmj
xi40bY3iAw1tF9EKKut0Pp19ns/u5rO+0YQca4WVrYzuR5NP07sTogreIZsl/EwAAF77r2qjHP+Y
JUzSc6VBZluiWV6GAEwMtVaMZfYslsSk16YLJEi93FWF4KIXjD4QFCGCVJ5BLO/AM3SMRVcTMqew
r7yrtJhj4QlzsAxSIbQRc+/0spChpxJciBGdgKX8Q4hgeadVeyKDItR12HctvHTIChtvaEPPNGx2
ljHtebnLGs+svJ5hY9bIG3OW0VhxFQ4CIhYYkRyCJd5jHMNKmRq0pANW/pWpawaNOnqzKQ/IQEFU
Ndj3WlPIOhmU+mGQ0OkDxEPvXoMoelu5+mrPRl3PSsr9PHqpMIK9mPZGpnr3f9NWFUYsQnzvmOq4
3doD1htam1MQjpcE1aFsY8g0bdTV9aVeePJcbSNaDqRpYQntAD8mAL/6pHY34TNtDE0rWwk7JCWc
Te4HPnP9Ka7dxcdTU4LY+g1qsUhOCg0fWLDZFp5KjG30fXBVZ3JM/gIAAP//AwDVv4w5rwMAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c84831cf45-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:56 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=_syfr2WeOGqh4FqMjdRC_PyiPJJmytQaW.sG5WImOdk-1712872376-1.0.1.1-x2S4qN84_7F0gwkvLqa5NEI7.ZZ.64w0QVDjWI_G5lJvvXsQHTeJHDimKd_Dcaq60svgo53hO9JJ.YkoRqAd7w;
path=/; expires=Thu, 11-Apr-24 22:22:56 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=bYWKw.k39cbtUhvvU7LCsaJlz8a_rASbTSogsEAWDgI-1712872376901-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '4275'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299759'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 48ms
x-request-id:
- req_e7f462a60f4b7e6f113754c217415d28
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a car'', ''question'':
''Can we logically conclude for sure that the man is not steering a jeep?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: Yes\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA2xTTU8bMRC951eMfMkloWygDc2lBy5F4ohKo4KQ453dNXhntp5ZhRTlv1f2bggI
LpY14/fh5/HLBMD40qzAuMaqa7sw/365Leur/va6LuTi39efRXn9uPl9g0X8dXVrZgnBm0d0ekCd
OG67gOqZhraLaBUTa7EsFhfLxdlykRstlxgSrO50fj4//VacjYiGvUMxK/gzAQB4yWvyRiU+mxWc
zg6VFkVsjWb1egjARA6pYqyIF7WkZnZsOiZFynZvGgQXvWL0TFBxBG28gFp5Ai/QC1Z9IBSZwbbx
rknFEitPWIIV0Aahi1h6ly4LG/RUg+MY0SlYKr9wBCtPqWpHMqg4BN72HfztURLs5I7u6Jp5OKaZ
1FPX6yxvs9tnTcrTZLe1lPbECqKIcSB3Nk6TYoYcmDPm0hJsEQLX3tkQdonQhb7EfF3pI4I2o+zn
3I+I3Y9ptpkMSL9pvWpKgGSLMausUaaHjFqrrsEhnYgVRiSH4+ETuEkBt2hJjrpvMvRyCPC9oEjq
loyDvfRA9mOWM9j0OrzheA9ClwYk7j55r49aESuOOAT/RrZF1AF9HBau3oxHxq/vaG3GMdu/zmfg
uou8SbNMfQiv9cqTl+YhohWmNIui3A3w/QTgPv+D/t1omy5y2+mD8hNSIlycng985vjljt2iuBi7
ymrDsXG2WExGi0Z2otg+VJ5qjF30+V8ko5P95D8AAAD//wMAUoMU+w4EAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c84a7b17ea-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:57 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=ER5kCl2QwvB_Y2ucvpkG05atInnH6AjdgOjLgc8xokQ-1712872377-1.0.1.1-rVbXxP4nlWdwi3njgiKLffIYeHNimi5.hCeFRfTUlDaKQ4968gUp3Mt3a3EUNfibsTjo2qn6rymT0YO.a3soUQ;
path=/; expires=Thu, 11-Apr-24 22:22:57 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=SkX7F6FLp8qjOYESeaPfJpl4I6ZRHKY9Xwq1WqiRYok-1712872377489-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '4856'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9998'
x-ratelimit-remaining-tokens:
- '299518'
x-ratelimit-reset-requests:
- 11ms
x-ratelimit-reset-tokens:
- 96ms
x-request-id:
- req_648bb00b6435e233b39fb4198b35cb96
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a car'', ''question'':
''Can we logically conclude for sure that the man is not steering a convertible?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: Yes\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3xTTW/aQBC98ytGezaUjyikHBv1QFtVqsShpERoWY/tLfaMszMWVFH+e7XYmFCk
XnyYmfexT8+vAwDjU7MA4wqrrqrL4cfHQ5rb1Y+fX5dPR3qc1fm6+Xz/afnl+9O3o0kigne/0ekZ
NXJc1SWqZ2rXLqBVjKyT+WT6MJ/O5tPTouIUywjLax3eDcf3k1mHKNg7FLOAXwMAgNfTN3qjFI9m
AePkPKlQxOZoFv0RgAlcxomxIl7UkprksnRMinSyuyoQXPCKwTNBxgG08AJqZQ9eoBHMmpJQJIFD
4V0RhylmnjAFK6AFQh0w9S4+FnboKQfHIaBTsJR+4ABW9nFqOzLIuCz50NTw0qBE2GhDG1pSq+ys
YHLilWZXeZHI6wU2Zo2yMWcblVVXYGsgYIYBySFYkgOGEawiU4WW4oHVf21GmdbjCKL26lotZRQg
1ugc7K3fBHaNgvioqFf52XgpXYxXksqwQ0CvBYY+oDac/ySTtJn4zg4EfGl8wApJe5F3xpWhQtT3
puyoe2DAjMNtsvFebgHrDa1NV5i3vmkl53XgXWwlNWXZzzNPXoptQCtMsVWiXLfwtwHA86nRzVVJ
TR24qnWrvEeKhNPxXctnLj/PZTvpt8pqy8tiNn4YdBaN/BHFapt5yjHUwZ8aHo0O3gZ/AQAA//8D
AC63my3YAwAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c8ff0c67c1-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:57 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=uBgvN1hX_VJANk4lHzKX.WoO0Rj0YJtGRXb_vazGBf8-1712872377-1.0.1.1-Zap6pz5_HJl.iiXmFuzj_bFFNBfIPR4Nfvc7kKqdAfrOadJ6fmeqq0C4IOXQ5o8kzN5wZZd.XqGXKsU9wHv.kg;
path=/; expires=Thu, 11-Apr-24 22:22:57 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=VeIKfAPWTbksncZfm.gYlseahryqR4izpvBJftUHsbc-1712872377878-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '5140'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299406'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 118ms
x-request-id:
- req_cb91564c57dcc729944a2c358f37e6bd
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a car'', ''question'':
''Can we logically conclude for sure that the man is not steering a SUV?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: Yes\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3yUTU/bQBCG7/kVoz0HShIgkCNckFq1lWiRUFOhzXrWHrLeNTvjGoT479XYCWko
7SWy5uPdZ77yPAIwVJgFGFdZcXUTDs4vu6K8vA9XX/18ffPlc3d2fdPJxfrkYxQyY81Iq3t0ss06
dKluAgqlOLhdRiuoqpP5ZHo2n87m095RpwKDppWNHBwfHJ1OZpuMKpFDNgv4MQIAeO5/lS0W+GgW
cDTeWmpktiWaxWsQgMkpqMVYZmKxUcx453QpCsYe91uF4DIJZkoRfMogFTGI5TUQQ8vo2xCReQxd
Ra5SY4GeIhZgGaRCaDIW5LRYWCHFElzKGZ2AjcWHlMHyWq12IwY+hZC6toGHFlnTDpdxGT+lNIRJ
L0qxaWXcf/a0jwIsVlCftALLHry2UXliUidiHp5xNi/NIWjA9gVlYCAPHYKzEUIqydkQnlTchbbA
vnRuMw768i/1CNffb3pg1ed2VZOI9iJyh1njl+YWeWm2/aqtuAqHTmX0mDE63IQrJDFQLMjtanvT
U+JtQ8facRKwgcrI0JFU+7I911Xq8BfmoXc9ILPqFAmHYnS09p0pwEUrwKR4srcVViN5sxx7bJJg
hdu5klc4YkCSCvPrGgwrwP9ZgIE1WLeG5N9j28EXxA+tDeSf3tbnc6qhRhSd014B23Fl9CnjX43R
HH5TcvJ/rH6ff7uMt2ZzQi+vtxdS2eS00juNbQivdk+RuLrLaDlFvTOW1AzpLyOAn/2Nt3tna5qc
6kbuJK0xquD06HjQM7u/k513cnK+8UoSG3aO2elstEE0/MSC9Z2nWGJuMvU3r6Cjl9FvAAAA//8D
ADVmmy7qBAAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c98fa8fada-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:58 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=U68_p30JFeIm5Owoo90aP0.kj2ohG.1.oIlb3PLoP4Y-1712872378-1.0.1.1-CC7vzfdWYR8N8Y1JZWDhkjWdAW96wsgoCAiDXupGcGR9VVbiqTt3t.8ihn6FJDy4ZaJZP7hmBzQa3SXDuE_AzQ;
path=/; expires=Thu, 11-Apr-24 22:22:58 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=fWTJWvhn9kk_Uws5fx.5UuC6ia_OBz2KRjT13I60iwM-1712872378694-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '5645'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '298818'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 236ms
x-request-id:
- req_d6f490eef32a3a1ea0e772b01490a76f
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a car'', ''question'':
''Can we logically conclude for sure that the man is not steering a subcompact?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: Yes\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3RU0W7TUAx971dY9wWQutJ2Gxt9BCEmBEIae5komm5vnMbdzXV27dBN0/4dOU3b
jcJLFPn4HB/bcR4HAI4KNwMXKq+hbuLR+4/rYrn6dlVdyOWn1Sr51Ze8/jwuflx+H1+5oTF4scKg
W9YocN1EVOK0gUNGr2iqk7PJ9Pxsenw27YCaC4xGWzZ6dHI0fjc57hkVU0BxM/g5AAB47J7mLRV4
72YwHm4jNYr4JbrZLgnAZY4WcV6ERH1SN9yDgZNi6uxeVQghk2ImTlByBq1IQL3cAgm0gmUbE4oM
YV1RqCxYYEkJC/ACWiE0GQsK1iwskNISAueMQcGn4i1n8HJrUd+LQckx8rpt4K5FMdponubpK/Mm
TTtRSk2rw+61c3uvIOoVraRXmHfGa5/MT2IDEfOmTPB57kZgCdsK5kGASlgjBJ8g8pKCj/HBxENs
C+xalzbjRl//qy7twrbrg47AfFsZaRc1iRCnVwI+yRqzMefuGmXutpOrvYYKNzPLWGLGFLBPN7sk
QKmgsO9SX2ibZD/abmIXvMbfmId/pxWMG9e2Q/+PccOHVkHIquuL9e+YGe9ayggL1spGVJARBV4f
LHi/3YM6b0AZFgg16oFHW4IoxWgJgZNQgRmL7SdCJZA+63f38Vlw1I89Y8kZD6RrRJXnjfmOcD1P
164/gafd7UReNpkXdmepjXEXLymRVDcZvXCyOxHlZkN/GgD86m60fXF2rslcN3qjfIvJBKfj042e
2/8O9ujk5LxHldXHPXB8ejzoLTp5EMX6pqS0xNxk6m7WjA6eBn8AAAD//wMAeqqMQaoEAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c90a2cfa8e-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:59 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=AuLtIqw3koeAeosLE7yWn5UuBhwaU5jDvQYncv2C5dE-1712872379-1.0.1.1-F3anOuTMtC1hS_A3FJsGVc6ml4OKyolXnIe9qdv_arHnvvxCl62P8U6NKMy2KoZCvML3FasqtMgYQMAFzYVPyg;
path=/; expires=Thu, 11-Apr-24 22:22:59 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=8eAZ5kbDu9yMlIJrpQ4ITsevmTBX2nJczh_2Qy9R0Co-1712872379704-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '6964'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9997'
x-ratelimit-remaining-tokens:
- '298948'
x-ratelimit-reset-requests:
- 12ms
x-ratelimit-reset-tokens:
- 210ms
x-request-id:
- req_d8fac4c146c75db38073a635359e440d
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a sedan'', ''question'':
''Can we logically conclude for sure that the man is not steering a car?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: No\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RUTU/cMBC9768Y+dLLsmUXBJQbqlShHpBaUaSqWyGvPUkMzkzqmXRBFf+9spP9
6oe45DAz7/m9+civCYAJ3lyCcY1V13bx6N37ta8/tKjiP1/XH5uHh/Wnk/7uzrdfrpyZZgSvHtDp
BjVz3HYRNTANaZfQKmbW+fl8cXG+ODlflETLHmOG1Z0enR4dn81PRkTDwaGYS/g2AQD4Vb5ZG3l8
MpdwPN1EWhSxNZrLbRGASRxzxFiRIGpJzXSXdEyKVOTeNgguBcUUmKDiBNoEAbXyCEGgF6z6SCgy
hXUTXJODHqtA6MEKaIPQJfTBZbOwwkA1OE4JnYIl/5YTWHnMUTuSQcUx8rrv4EePkmGzJS0pCwnU
9ZpfsBC5Ds7Gbc2GJVSwRnCWwDG52HssoqVPCNpYLYpaS5mFWEEUMQ3PO5umUIefSKWoNOFJX0MJ
eksz2EiUftUG1eyeZI0pI5bmK8rS7HUo0NiDGVwNDIMrfe4QuBqkCGc3r7w8haDgGYXeKBC6POsU
4jO0aAlyz/7lcwbX2WAfPaxwP+dDVWFC0n0pM7htMGHFCad/TnSk39g5aINIGUwULgpLYd6b/096
MK278hZxaL6gY/LQ2aRZlR6sJQZtMG0fT1g8ODyYwQ0fjGAY8WYRS5lyiW7XDm7zqld9yux5H6qQ
WtktxJ7Jg5mOOvY7tlf6t7fRiC3AmyXdmPEUX7Y3HLnuEq/yvVMf4zZeBQrS3Ce0wpTvVZS7Af4y
Afhe/hX9wfmbLnHb6b3yI1ImXByfDnxm91vaZecXizGrrDbuEicXZ5NRopFnUWzvq0A1pi6F8u/I
Qicvk98AAAD//wMAOmtnyjIFAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c9cc0ffa6a-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:53:00 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=ork2miUx0hShbPC0wyHSBcXH3gGl1BoSEH94qTDU_kI-1712872380-1.0.1.1-R1xbuKhNwhK4Pu5L5hkY2bz1AI25jW_Tz9NO1lqpo556lklrIe72oLhNzqTDrgDd4wCCzEP8Y7yTnqD1jjWW2Q;
path=/; expires=Thu, 11-Apr-24 22:23:00 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=B4KOjMeF5NjJT68S2KiOzpJ0V4RiYCEDdLU8Q1gxXVY-1712872380376-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '7488'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '298899'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 220ms
x-request-id:
- req_bd5ce83e3a3a782e956775851e4e1533
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a jeep.'', ''question'':
''Can we logically conclude for sure that the man is not steering a car?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: No\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA3yUTW/bMAyG7/kVhM5u1iTF2uYyDD1sh67AuqLDsAyFLNO2Wll0RTpOUPS/D5Kd
pNu6XgyDH68eSiSfJgDKFmoJytRaTNO6o/OLvqiuP62/3F5/NbffHz/P8upcLrf22yI8qixmUH6P
RnZZU0NN61As+cFtAmrBqDo7nc3PTueL03lyNFSgi2lVK0cnR8fvZ4sxoyZrkNUSfk4AAJ7SN7L5
AjdqCcfZztIgs65QLfdBACqQixalmS2L9qKyg9OQF/QJ96ZGMMEKBkseSgogtWUQzQ9gGTrGsnMe
mTPoa2vqaCywtB4L0AxSI7QBC2tisZCj9RUYCgGNgPbFOwqg+SFa9SgGJTlHfdfCY4cc06Yrv/KX
REOYJFHr206y9JtoNwIsWjAeqQVWCbzRPvJ4ik7EMBxzj9hOVwpixO6ICMEZrNSF9tAjOKqs0c5t
o7pxXYGpdu4CDgfIf+WNDh9G9UOh3GMAoeHydodm8YYqu0YP1ifFgCUG9AazKLxSV7RSU7iJSZYh
R6M7xtfqgYJwIPFo4nsH67bQoPZvk04Tp/QEOgzCTSddKhw3xnVs15iuP4ZxlzdWJL5sKiiDmnpc
x59E+wP5Ja71u/pzzVgADSzpagE3rdNDl+S0xsQRsKSA2d9NM4KPYonmY1HY6IugQ3xiY47x+6uI
Par/bads12gUgKTG0FvGKYxVvoR4TbRBHF7/MBVUvpiD+KZWEjTaqL7vAk9hkLE7tDcb/mrlr9Q4
k8/7YXZUtYHyOPi+c25vL623XN8F1Ew+Di4LtUP68wTgV1oa3R97QLWBmlbuhB7QR8H58cmgpw77
6eCdnY0rRQmJdgfH4uxkMiIq3rJgc1daX2Fog01LJIJOnie/AQAA//8DANvXF107BQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c90d1e7aca-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:53:00 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=kT_9.zCx2vcv8yWJc2whc8YfUJc7heIe9ud6otSJ6Ns-1712872380-1.0.1.1-TBIHTiko1UBuvE3mfIqcbK84p3lVH7ywvvnCITojtVicQS_Yai3BkS_mm.ByIEJURPDYbtZcbq6jYVecwuxMZw;
path=/; expires=Thu, 11-Apr-24 22:23:00 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=yKWEmm7IjfqDWJ38jHgqmyKijPNTaVbVlhcjDTA5vT4-1712872380904-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '8158'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9996'
x-ratelimit-remaining-tokens:
- '299119'
x-ratelimit-reset-requests:
- 21ms
x-ratelimit-reset-tokens:
- 176ms
x-request-id:
- req_ff43ca0dffccedb9c360ac383de4755f
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a SUV'', ''question'':
''Can we logically conclude for sure that the man is not steering a car?''}\n***\n[Submission]:
Yes\n***\n[Criteria]: usefulness: The prediction is useful if it is correct
and/or asks a useful followup question.\n***\n[Reference]: No\n***\n[END DATA]\nDoes
the submission meet the Criteria? First, write out in a step by step manner
your reasoning about each criterion to be sure that your conclusion is correct.
Avoid simply stating the correct answers at the outset. Then print only the
single character \"Y\" or \"N\" (without quotes or punctuation) on its own line
corresponding to the correct answer of whether the submission meets all criteria.
At the end, repeat just the letter again by itself on a new line."}], "model":
"gpt-4", "n": 1, "seed": 42, "stream": false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA6RUwW7bMAy95ysIndOuSYqly2UYusOAAb10WzcsQ6HItK1VJl2RrlsU/fdBspMs
XXvaxYD59B4fKVKPEwDjC7MC42qrrmnD0bvzvqjOi/7DZfxOcdPb5aJdXj18vj2rrz6aaWLw5jc6
3bKOHTdtQPVMA+wiWsWkOlvO5mfL+WI5z0DDBYZEq1o9Oj06eTtbjIyavUMxK/g5AQB4zN/kjQq8
Nys4mW4jDYrYCs1qdwjARA4pYqyIF7WkZroHHZMiZbtfagQXvWL0TFByBK29gFq5AS/QCZZdIBSZ
Ql97V6dggaUnLMAKaI3QRiy8S8XCBj1V4DhGdAqWijccwcpNitpRDEoOgfuuhdsOJdGOYU1rSk48
tZ1CG/nOFyhgITu9z1Jg/yJk2yPmBda5jsZS+iFWEEWMQ9bLr9/WJgsks1uJgXVuCXqEwJV3NoSH
pOlCV2DuhHQRQWurmfmyurPx/drsCpBu03jV1BySHuOQ5gfK2mwbKF1VoagMyj2Cs/SfDmBjBQtg
yscqf4e07c7Q20/c4x3GacYjlhiRHB54vOC9xcSNNl3qcMPPq0r99/KsEn2lfE/jPBy/3iUbhKFg
HIpLs/f6tGxNNmhJwOue1yCONtAxFdDaqMBlDu1mfOciYskRp4e9268ClwfD/2J1/2Ye+XbIcrGm
CzOu3dNuXwNXbeRN2m3qQtjFS09e6uuIVpjSbopyO9CfJgC/8rvQHay6aSM3rV4r3yAlwfnJ6aBn
9k/QHp0tt6iy2rAHFsuzyWjRyIMoNtelpwpjG31+J5LRydPkDwAAAP//AwAJJr7THgUAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c8dfe6230e-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:53:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=pF1oEoYT_biORIRg8npH9A2.lKMM2.xf.nn7zeHZ6uo-1712872381-1.0.1.1-Sk3.VxqWZU17oDw1mje9SnxPZbjFyhJrl2f4XbybPby9vRp6.sgtaykCodfK.XVVXuBNkKPGUOKwhKKkSxIWYg;
path=/; expires=Thu, 11-Apr-24 22:23:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=NFO4KfBNR4Pi53icCxsLgA__JxG86cCzHbkUNCAjxdE-1712872381241-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '8511'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299508'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 98ms
x-request-id:
- req_16709a33494e9fdd5546d9d63f9d6d40
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a subcompact'',
''question'': ''Can we logically conclude for sure that the man is not steering
a car?''}\n***\n[Submission]: Yes\n***\n[Criteria]: usefulness: The prediction
is useful if it is correct and/or asks a useful followup question.\n***\n[Reference]:
No\n***\n[END DATA]\nDoes the submission meet the Criteria? First, write out
in a step by step manner your reasoning about each criterion to be sure that
your conclusion is correct. Avoid simply stating the correct answers at the
outset. Then print only the single character \"Y\" or \"N\" (without quotes
or punctuation) on its own line corresponding to the correct answer of whether
the submission meets all criteria. At the end, repeat just the letter again
by itself on a new line."}], "model": "gpt-4", "n": 1, "seed": 42, "stream":
false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA6RU32/TQAx+719h3XNX2m5s0Bc0JiR+aTCEGIig6Xpxklsvdjhf1nXT/nd0l7bp
pAIPe4l0tr/Pn+3Y9wMAZXM1A2UqHUzduIOXZ8u8vPz+/vTj5fjN4afjBeEHvrs4vfty8fmbGkYE
z6/RhA1qZLhuHAbL1LmNRx0wsk5OJtMXJ9PDk2ly1Jyji7CyCQdHB+PjyeEaUbE1KGoGPwcAAPfp
G7VRjrdqBuPhxlKjiC5RzbZBAMqzixalRawETUENe6dhCkhJ7tcKwXgb0FsmKNhDqKxA0LIAK9AK
Fq0jFBnCsrKmisYcC0uYgxYIFULjMbcmFgtztFSCYe/RBNCUP2MPWhbRqtdkULBzvGwb+N2iRNgo
o4yiEEtNG6DxfGNzFNCQhN4mJtB9PCTVa58VyFIZtab4IA4gAdF3SaWdx2loEzKVeKLkDVMHPtME
SwTHpTXauVWkNq7NMfVDWo8QKh0Scn8So/2rTG3rkHZe2xBii0iW6Ls0P1AytW1j3TiL0hEvEYym
JwqAuRbMgSmFlfYGadOjJOwtL/EG/TC5PRbokQw+UnjOmYrNtQLSliVKeCQw5nyixutWAszR6FYQ
4sT/Ma8RnO68YqiGsGoQuIhcQxD+Oxhyxo6b0MQF8datoEZN+2TpANq5zfw8Fuyxa9S+UVra/uEC
didVrYOp9vZ3BO9iCuE+OG7Yf3bidT/RWD1ptxIrO8JE4k/c50fsBtBvNBc7O5xIzzM6V+tb8LA9
Io7LxvM8HhxqndvaC0tWqiuPWpjiwZDATQd/GAD8SseqfXR/VOO5bsJV4AVSJJyOn3d8qr+L+7yB
g3a942gyHqwlKllJwPqqsFSib7xNxysKHTwM/gAAAP//AwABx9QGswUAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c86b41cea8-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:53:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=EjjZ6swOGIec.kYPdg8ZPextPq.N1qH7ivgpBRJzzaA-1712872381-1.0.1.1-KAwilzyUeNnZ24dTfiHlDfI.qneKsJWrDfDnX9hFEjh5tA33nTSYL4qHUdk3kh5JtI2CDusx_AcZyfwek3q0EA;
path=/; expires=Thu, 11-Apr-24 22:23:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Y9yA2wjAcZbIM3GL22xjFCS56KiVvRDd.RKqqpcnE6Y-1712872381873-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '9215'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299413'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 117ms
x-request-id:
- req_7339de803d169dfde5b473533a8e38f2
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are assessing a submitted
answer on a given task or input based on a set of criteria. Here is the data:\n[BEGIN
DATA]\n***\n[Input]: {''context'': ''The man is not steering a convertible'',
''question'': ''Can we logically conclude for sure that the man is not steering
a car?''}\n***\n[Submission]: Yes\n***\n[Criteria]: usefulness: The prediction
is useful if it is correct and/or asks a useful followup question.\n***\n[Reference]:
No\n***\n[END DATA]\nDoes the submission meet the Criteria? First, write out
in a step by step manner your reasoning about each criterion to be sure that
your conclusion is correct. Avoid simply stating the correct answers at the
outset. Then print only the single character \"Y\" or \"N\" (without quotes
or punctuation) on its own line corresponding to the correct answer of whether
the submission meets all criteria. At the end, repeat just the letter again
by itself on a new line."}], "model": "gpt-4", "n": 1, "seed": 42, "stream":
false, "temperature": 0.0}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA6RUTW/bMAy951cQOqdZkmbrmsswFEN3yi4Dgm0eAlmmbTWy6Il006Dofx8k53NF
twG9BAif3+Pjk8THAYCyhZqDMrUW07Tu4vpmU1T8aZYvl9eb29tq/P3Lckvrt83dcnGrhpFB+R0a
2bNGhprWoVjyPWwCasGoOrmaTN9fTS+vpgloqEAXaVUrF7OL8bvJ5Y5RkzXIag4/BgAAj+k3evMF
Pqg5jIf7SoPMukI1P3wEoAK5WFGa2bJoL2p4BA15QZ/sfq0RTLCCwZKHkgJIbRlE8xosQ8dYds4j
8xA2tTV1LBZYWo8FaAapEdqAhTVxWMjR+goMhYBGQPviDQXQvI5VvRODkpyjTdfCrw450kaZz3w0
Yn3bCbSB7m2BDBqS0YekBPr4PSTXO8wyZGmMRvv4x5MAC2Lomxry9xjE5g4zlYSi571Uz77RHjYI
jiprtHPbSDKuKzAFwl1AkFpLYr7QRYcPmToMwl3eWJGYkecNhr7NN+RM7XNsUPtn+cXkY6MNgtH+
lYYObgKWGNAbPHOzoOdmDq2j2uu6w13HAjka3TFCPNy/HU3yekNNqxMm+wyZYyxCqXKYYwg2nbtx
qMPRywnDMli/u4cj+HjaKmIaZNsiUBmdDoHp3K38z22CgrDHY3ovTKjDCD7Hu9q5AnI8xQpbpnHk
1Mv5Depn0Y7p2Cw+zH8/pYAlBRz+GcuJZewzO759Kk9ee9JZZH6hdlvj6bBuHFVtoDyuJt85d6iX
1luuVwE1k4+rhYXanv40APiZ1lp3tqlUG6hpZSW0Rh8Fp+NZr6eOG/QEneyWnhIS7Y7AbDIb7Cwq
3rJgsyqtrzC0waY1F40Onga/AQAA//8DAMHDaRbdBQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e34c95bd167bf-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:53:03 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=vXVspJxjv7dG_kCoW0uEt_NutH_3fgip.axpdjGUET0-1712872383-1.0.1.1-C9b_Msm4ykgJVsBWjTR52EyBZmOUvS.oTg3nbd6HDCCUuhnaqu5XggMqlgqBH_MZIxrnMZGPZM53FTBfb8_QLg;
path=/; expires=Thu, 11-Apr-24 22:23:03 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=jPm.coApYy2f9whIB1aUJxYT_aW50Xb6SbOn.5Q7tuE-1712872383220-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- langchain
openai-processing-ms:
- '10445'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '298925'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 214ms
x-request-id:
- req_638318e0893bfa0ea4a8c852b03809d7
status:
code: 200
message: OK
- request:
body: '{"messages": [{"content": "Is the output accurate with respect to the expected
output? Y/N\nOutput: Yes\nExpected: Yes", "role": "user"}], "model": "gpt-4o",
"n": 1, "parallel_tool_calls": false, "stream": false, "temperature": 0.7, "tool_choice":
{"type": "function", "function": {"name": "reference_accuracy"}}, "tools": [{"type":
"function", "function": {"name": "reference_accuracy", "description": "Whether
the output is accurate with respect to the expected output.", "parameters":
{"type": "object", "properties": {"score": {"type": "string", "enum": ["Y",
"N"], "description": "The score for the evaluation, one of Y, N."}}, "required":
["score"]}}}]}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xT24rbMBB991eIeY6Lk9i7id8W2i2UQi+79EKzGFkeO9rIkpBk2hDy70V21nLS
LNQPQsyZOWfmaHyICAFeQU6AbaljrRbx3ZfvdbYwv5VdZ/f7t99Wzw+Szh/LDz9c9xlmvkKVz8jc
S9Ubplot0HElB5gZpA496/x2maxWWZoue6BVFQpf1mgXpypeJIs0TlZxcnMq3CrO0EJOfkWEEHLo
T9+irPAP5CSZvURatJY2CPmYRAgYJXwEqLXcOiodzALIlHQofdeyE2ICOKVEwagQQXj4DpN78IkK
Ubzj6/tPXU0f3dcHtXxf3u0+ljfNfjfRG6j3um+o7iQb/ZngYzy/ECMEJG37WoM1GpQMC8pYZyjb
X7AQAtQ0XYvS+QngsAHLlMEN5Bv4uYEjnKUfo2v3p4khBuvOUnFy6hQ/jtYL1WijSnvhJNRccrst
DFLbTwTWKT1oe51eAbqzVwNtVKtd4dQOpSdc3w50EHYqgNkJc8pREcLzZDG7wlZU6CjvH3XcI0bZ
FqtQGvaJdhVXEyCazPxvM9e4h7m5bP6HPgCMoXZYFdpgxdn5wCHNoP/jXksbPe4bBru3Dtui5rJB
ow3vlx5qXaQZq7O0QooQHaO/AAAA//8DAERRdCH9AwAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de411052c0a2320-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:30:43 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=46aW_1z3Yu96PFQG.9WdGafKDLThRQ2V2BH4Wc__v2k-1730885443-1.0.1.1-dyb20fuCsDK3FwOSUtTnfWmhmQbPJl2FAYfbORyKyICHL1woSnrX0KQ1fQOaD8ifq8ABkL0lDQdEb9zPCgTcMA;
path=/; expires=Wed, 06-Nov-24 10:00:43 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=AQfLeHBIv_Vjs.O8aHH2.5xm6D8_TmJg6.A65iJorpk-1730885443709-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- langchain
openai-processing-ms:
- '223'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9998'
x-ratelimit-remaining-tokens:
- '29999959'
x-ratelimit-reset-requests:
- 11ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_737e0742debe3c7d3d8504412cf9fbe7
status:
code: 200
message: OK
- request:
body: '{"messages": [{"content": "Is the output accurate with respect to the expected
output? Y/N\nOutput: Yes\nExpected: No", "role": "user"}], "model": "gpt-4o",
"n": 1, "parallel_tool_calls": false, "stream": false, "temperature": 0.7, "tool_choice":
{"type": "function", "function": {"name": "reference_accuracy"}}, "tools": [{"type":
"function", "function": {"name": "reference_accuracy", "description": "Whether
the output is accurate with respect to the expected output.", "parameters":
{"type": "object", "properties": {"score": {"type": "string", "enum": ["Y",
"N"], "description": "The score for the evaluation, one of Y, N."}}, "required":
["score"]}}}]}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xTUW/TMBB+z6+w7rlFaZqwLm/VJiqKBgJtbIKiyLtcUoNjG9sRVFX/O0rSJWkp
EnmwrPvuvu/u82UfMAYih5QBbrnHysjp8uNjkTyJeLn6NFNPb78sI3V3++4+2pkbfgOTpkI/fyf0
L1WvUFdGkhdadTBa4p4a1tnVPFwskjiet0Clc5JNWWn8NNbTKIziabiYhq+PhVstkByk7GvAGGP7
9mxaVDn9hpSFk5dIRc7xkiDtkxgDq2UTAe6ccJ4rD5MBRK08qaZrVUs5ArzWMkMu5SDcffvRffCJ
S5klvz6sP6/Ez2KNj2u8267eqPuHW40jvY56Z9qGilph788I7+PpmRhjoHjV1loqyJJCyjhibTnu
zlgYA27LuiLlmwlgvwGH2tIG0g2838ABTtIPwaX7t5EhloracXl06hg/9NZLXRqrn92Zk1AIJdw2
s8RdOxE4r02n3ei0ClCfvBoYqyvjM69/kGoIr686Ohh2agCTI+a153IIz8JocoEty8lz0T5qv0fI
cUv5UDrsE69zoUdAMJr572YucXdzC1X+D/0AIJLxlGfGUi7wdOAhzVLzx/0rrfe4bRjcznmqskKo
kqyxol16KEw2S67zxTyeIUJwCP4AAAD//wMAqYUqgf0DAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de4110529839e56-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:30:43 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=loqAsRu86UykMKQMg3z8gB4BBDAuZ6Vca9ZfOTq66Qo-1730885443-1.0.1.1-YK976xVsj2Yu_OpQs.IoHyxvN0bSXd._..jfqU8pn1HtpMMHXCDbEUpMcqNTuhYvuuA95HQGI6Jq_vjEVd_SLQ;
path=/; expires=Wed, 06-Nov-24 10:00:43 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=LX36sP3b4IHGOqhbqRPeGY5sTMM8V.N5Kv_LqSGFIH8-1730885443866-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- langchain
openai-processing-ms:
- '380'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999960'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_43e9f6e698f554ded303aa5ff5fe57b8
status:
code: 200
message: OK
- request:
body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Context:
The man is not steering a car\nQuestion: Can we logically conclude for sure
that the man is not steering a jeep?\nOutput: "}], "model": "claude-3-haiku-20240307",
"system": "Is the output accurate with respect to the context and question?
Y/N", "tool_choice": {"type": "tool", "name": "accuracy"}, "tools": [{"name":
"accuracy", "description": "Whether the output is accurate with respect to the
context and question.", "input_schema": {"type": "object", "properties": {"score":
{"type": "string", "enum": ["Y", "N"], "description": "The score for the evaluation,
one of Y, N."}, "explanation": {"type": "string", "description": "The explanation
for the score."}}, "required": ["score", "explanation"]}}]}'
headers: {}
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAA5ySTWsbMRCG/8owZ7msHZeke2ugpDQ5hKSUmiYYWRqvZcuarTRyshj/9yIt6Qcl
l5yE5h0982pmjugstrhP3bKZ3m3p2xe6ve3s/G5qF93209U1PaFCGXoqWZSS7ggVRvYloFNySXQQ
VLhnSx5bNF5nS5OzyUa7XZ7Mmtm8OWvOUaHhIBQE2x/HF6Iw+2VOBVl9lHteNtMPF8EuLs+v58Pq
5v5Gf57e775fDagw6H0tbEyO2pSIC30WbI+YDMeiLVAhPfdeBy2OA7Z4qRNZ4ACyIejcgQJUL88C
stECD1iEvQ7gEgQWSEIUXehAg9HxARU8ERgdiua5c0Z7PxSG8dkSrDlCypFG2uusLVH/Dj7Ws8ga
ShuA13CgjTOeFKyy1Fe1sILEFffito98cJYsWKaRHrMn4DxW7Tklt3LeyfCvFcPZW1jR/2a+bijS
miOpmsxZ+iwwtlfIDxBp7clIGoF/2vC2z+Pp9KgwCffLSDrV6fy1A1VI9DNTMIRtyN4rzHXn2uM4
6aXwjkLC9v2sUTj6/R2bzi9Op18AAAD//wMAkleKc9MCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de41107cea917e8-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:30:45 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
request-id:
- req_01Viqb8TwVsSAoVK9VoijQww
via:
- 1.1 google
status:
code: 200
message: OK
- request:
body: '{"max_tokens": 1024, "messages": [{"role": "user", "content": "Context:
The man is not steering a SUV\nQuestion: Can we logically conclude for sure
that the man is not steering a car?\nOutput: "}], "model": "claude-3-haiku-20240307",
"system": "Is the output accurate with respect to the context and question?
Y/N", "tool_choice": {"type": "tool", "name": "accuracy"}, "tools": [{"name":
"accuracy", "description": "Whether the output is accurate with respect to the
context and question.", "input_schema": {"type": "object", "properties": {"score":
{"type": "string", "enum": ["Y", "N"], "description": "The score for the evaluation,
one of Y, N."}, "explanation": {"type": "string", "description": "The explanation
for the score."}}, "required": ["score", "explanation"]}}]}'
headers: {}
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: !!binary |
H4sIAAAAAAAAA4SSQW/bMAyF/wrBs1M4SbNhvg0tstMKDG1WbGthMDJjK5VFT5TSGkH++yAH3brD
sJMgPerjo56OaBussNe2Ludr3ozD9ebL9f16f/Pp6kD3K75cYYFxHDhXsSq1jAUGcfmAVK1G8hEL
7KVhhxUaR6nh2XLWkX1Ks0W5uCyX5Xss0IiP7CNWP46vxCji6qQZOfnI+1SX89ur73bRt5/3z/Hd
t0B+vafOWyzQUz81NiYFMmO+54cUsTqiGglZu8EC+WVw5Cla8VjhXcfQ2gN7mCy8RNBIkRViRxEe
MHYMPXmwCl6yyBysb4HgdvP1AQvYpgixswqN8LnGs8mPEawboWfy8G+GoXABH/OSRYJtEGo4gKHI
rYTx7MJ641LDmltqASoT0fqdhH6aA4YgB9tw88cEOSfPkBSigJPWGnJuzDNOKNhJAE2Bzw3+Y/Cu
48BAgUFixwFyQAqyy6rCltW+mvsbZyS5Brb8G3iBp9NjgRplqAOTTgm8yXkSlH8m9oax8sm5AtP0
r6rjOc06yhN7xWq1KAuUFN+ezZcfTqdfAAAA//8DAGwLxC23AgAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de411087a3ecfc8-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:30:45 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
request-id:
- req_01NPZFdoNbxrSN1nczYcmoZ8
via:
- 1.1 google
status:
code: 200
message: OK
version: 1
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/cassettes/58e9f031-846d-412e-82a3-7dcc7e6370d2.yaml | interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Say hello!"}], "model": "gpt-3.5-turbo", "stream":
false}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSQU7DMBC85xWLzy0qhUDpBSE4tEdOCCEUufY2NXW8xt4ICurfkdO0SQVIXHyY
2RnPrP2VAQijxRSEWklWlbfD24fHJ8TXdXjLZ2t6GFfycn7/ecc6t+u5GCQFLV5R8V51qqjyFtmQ
29EqoGRMrmdX56PJJB9dXDRERRptkpWeh+en+ZDrsKDh6Gyct8oVGYVRTOE5AwD4as6U0Wn8EFMY
DfZIhTHKEsX0MAQgAtmECBmjiSwdi0FHKnKMrok9Q2vpBGb0Dko6mMNOABuqgUnLzU1fGHBZR5mC
u9raFt8eklgqfaBFbPkDvjTOxFURUEZy6dbI5EXDbjOAl6ZxfVRC+ECV54JpjS4ZjtvCottxR163
HBNL29Ps8SOzQiNLY2NvYUJJtULdKbvtylob6hFZr/LPLL9572obV/7HviOUQs+oCx9QG3XctxsL
mD7gX2OHFTeBRdxExqpYGldi8ME0X6B5yW32DQAA//8DAGEUbBABAwAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de40748b8b1f96f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:24:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=7H8sL8DtZS2fxJWGXN_bJrSxvLWln0HK6T7NTLmjAlA-1730885045-1.0.1.1-GdUVLAd0roeRTP8c_I6Gsptp04f0d79ExxGkBanvTOaylVI5yqfby.rcFMGa4rrwuNGsAqzBgHmdujksn9VbHQ;
path=/; expires=Wed, 06-Nov-24 09:54:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=sN.VwPuHTQSfBCNjZq7RmFh22sDf3zzfaQWwRFqc9co-1730885045169-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- langchain
openai-processing-ms:
- '440'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999971'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_dc7dd83e2d482e960ff45323e469452f
status:
code: 200
message: OK
- request:
body: '{"input": ["Hello! How can I assist you today?", "Hello!"], "model": "text-embedding-3-small",
"encoding_format": "base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA5yaW6+yzJft7/tTPHlv6UQOSlX97ziJnAsBEfsKEBEQkVMB1env3tGns3d2sq/6
ZiVLWcuymDXGmL/Jf/7bnz//dFld5NM///rzz6sap3/+/fvaPZ3Sf/715z/+7c+fP3/+8/fz/7my
aLPifq/e5e/y35vV+16s//zrD/t/Xvm/F/3rzz9LiDfvQM4RIAaUDJTG1YaljH8Mi/FaCoRS3yRx
d5RUtrobEhxg4RB7hz9gWZN0BG+UtDObH6JMuA9hBVVCP97A386AdZzQQNKqXonm8Txdri2fo8Rp
WawIaTEs+n7vQ/yseY91nrK6CcHaQ1rvF5yEBQ7X5BwUCIV8jr3qLg/cAmwGdsZtxqfnTVcJe3pr
UGhuNjnWJ9ket8JTDpvRXLBTnjz7ecw+LQwvbTSzRUnqbb9bN5AV8xsfn/nVXnd83MO06LAnMINk
k5W9zGAXeSesnh5hRl8DXyLnvWRYeW33bMyeUYeATGVvLq/ZQPP+ysPq5ibEOU91tjFh3kNV4QSs
M6I+sEA/NPCuTyJRD7uErvm5ZtD1uj2wGbaxvckqHKGtRTXOztAEAg9iD960rSdqbW3qBwoSD1ed
dcnxfeizEbdZAQOVdcil6OKMW7Mni9ArPHvreOrVRdBLHRlv+Yill10Nk5IlOhh2F97becdrLWBY
W+gQPcG8Z+OwplM4p3BdXEQcX7mFXfnxWXR/6AEuIq4MOcrOBXz4w37mQ0+mXGYvHbp5MZyZT4dr
zlPuG+CEm0RO4XQBPFn3G+LH3Cf37fMIKfwMjfgZhBYbpMzCSfPsBpzV8kBuKJHV9T0pDLLe6UKw
c8L29hZkDfkX40zuJY4Ab+91FnTc9UnMnk/o+pGiGNnaqSPy1bmrtEomD1ydKsf2rj8CNjg0G9rl
2scTgcpm23wOIAq6W0uiF3Gy7aPWOfqg7UyUwq/szyS9ePBOVX2GixHZwltMnF99Y+c+m9m2Hi0L
7IjreWLSqZSKcRqgSHzIRDneNHVLjWQBvb8XcNozj+G3HnjuLh72ZnWox2gKIUx080QybbiA7VKK
KfSejwJbqHzTzd7CEm01mxBFEg/heFyPLdwJ8fj381l0mBR4hdcDdhS1H6iOJQmFV9UkR+/tZ1N3
wwX4oOVMsnSTKf++kAa6ay8TbI8B4Cr7wYh517jkzDzZelFQkUKHdAg74OrXsybULCKhnWInFY2a
S49yD5NP9SbSGPu1cL03ysFyjAnbz1YZlsYTWOg8j5S4aJ+rC30tM2o/cMZ2slgDfZ55D4lMl+G7
1sQq592lAO326xPHkWRni9oMAWQbT8UF8Bew1N3cw7VeJJzrRTvMkzUw8HVCA/mdD26rEhbWg5fh
h18N6rIMQYmih8Pg8/u80vF88xjYJf3H2yfRe1hNcHEQ3F8LrBxvjbo1sN/Dg9yfCZaeR5tchZSF
jsLoHmsocSjsak+H63S8Ypl5RvX6diYD1p7fYY1e3Xq1PzcdyhduxKqAWyCs0zzChZkJlm8fv974
LdygSxjf44XBBFyZNMVvPTO7N6822RvQh2/ubGKd+i5YzC1YUOyKh1mcHk+wqqeoRVDRo1nAwQ2w
ox+XcJfrH+zhXWfz7K7zgfcGNnEWRwF8MKkO6gfAeOKT+9j08oE8BDx/JvixvodFaBUHjRUMsB1d
vIE/bZYCzt3V8/ivfk+ieVTQTx/PxwbQSS5PMxJkdcDSRjj1UzScj2SLbcjZU/Wal9CxR+t0umLF
XM7hynKVAWG4afNezjaVvraphM4azfh0Hph65VXDg9eNvonHaGtNf34yMl400+56reltUCQUbA5L
Hspjqjco2TE4qm6Fb+TMAvq+iQnMQJeS+65u1W0nfnJw2vd3XNglyhZGxwns+uhGpCKk9kL6ggcl
e1o8Mdmt9rIxNYtuvjJhwxpbe33tsA4/A9d6u+plZFx3O+XIfJ/uRDOnMtzSt7iHKlk/M702nUpc
7x0hJlfOWH6FZsZJ4StF33olRzgewFJXNEZyABXyuCW1OjWHbIFMuGfJPR4qe/3uLyj7JcH3t3oJ
V1kUPfitf+Lq7RSucRP4yI6lkNjEU4Gw/wQ67Fn/iouPS+vtoHc8ZKV3RxTPnkK6q3UNHe8KIWpB
Yns5bqYPcHHzsW3qZ7Adi/0IGpbT8aOwxp/etvB5MyVvp0HZ5j6laMBHfz/i+1CcQzpIFw15naGT
k1F19nrfLxuKrlGOi6FN7O/55mHNbjM5UuOtbtzrkIMtDY44x8MAFvGI9tC4XC5E40+7YX04igEf
/p4Ss7lHIWs/UgbKEzLJWcqOIX9sGgU9xFEi0jIhuoYfqAEr8m0id2qW8c+qb0GqcQq+72rdpuAu
++j6yTG5TRjVW30pNNj528mjzmUYeqPQK/FM32eiBIivNxp0FmqTgCE/v15fjwaiU8gaOGS4BWyK
Vc9w6Lccm8Z2GXhy1QNYsWNM7lqngTXW9ha8I3eHA+VVh9RXigrCrG+x/u5VSp9nxoHVdalxeo+L
bEnemwXg/ZQRO1n6etQF2MLndbaxm93gMBfevTm8Trth5p7HD6DWTDuktWWP1bdX0vWT2hZ8ebcE
3w/PGVBYLz7SwvSIDXyq7NVSHB/2vih4tLYCdTb2q4RYia+xd3PcYZJY1UNFyWfEwCrNVqaRFXR2
jzaJLqEV0mKzSnh9xhtW5yVXuVOnivC7vyTdcWjYvvsN2ZPv42vXsPYyCXWM7IOxJ8dXeaTsh48t
CHj2jB+CbIUrjAcFmvfaIk5zEMK/+eZGExcnKhvXc5JGe6REs4XTyomz5fv3v3rFNrAzm94WLoYD
RxuiJSfbpgdcB6jlnJU8simhG9ugBVKjm/CZeUbD168gdAu/xK6CJZX19UMMjit3JB7UTZW9c88e
dp6Qzfy5FgB9OXkCvnqC1fdzF65NaPVgl3HzN1881U1IGghXKS2xiZKnvYrqbYZo368zx8xnm++X
ukUjbtYZmWpJF0YsR+DckUuS9IOyJSzXDkUqA7F0eGsDx6a7Dg4wd7A/oQFQTa4U1LKIx4q/m9XF
HRcG8Ka2/P0+W2H0PlAPjYDdpvfBIkpWA6qHKOFo7x4BvfRLCZmFfZOv3oSbPoQSJHy48/ZrKw90
9bQWKYScseXeTjb1u3cPpVbBxD3oAViP2bMF/gG2RBezjP7yLKjhx8cn1hsB9Ss2BjNz0UhkTlJG
L74niV+/J16lvOrFu60GSgiM8N1dQbhxQ7OhxzUqiWJvxkBCCHuoq2dxFvp2COed+CxQrSf63/2l
IiAzMIhR4TSTu3CV0yiB/sqZWM95NeRet0MHa/ZJsSuZok0/7rP9+/898GZUCuCQQpqtwS9/DFOy
wBZIppaTSx8QdSlg0kFb6Sgx5fepnoAlzVCb+xOxyN0KF/y6brCpzXhG7BADXl33Fbg5H8fjv347
Oq0Mkb5vjuQ6dgqdpeveg/i9q4n1PQ/ja7lF0DnkNk7WKqD05UQpLPxXiR0SW8NinlkofqoSkwe7
C+1lvrMlirXo/dtfuinWMENJ6QiW/dzOKDSPC4yOt5X4pnBWVze+BaA/xQ3xVKZSR8e+5ih3T9hb
feGdzZIQRPCm44f37tyW0ilsE+C9DzZ5HPFjeFOZbvBZTSF2H3c7W+TzmUG/85viOQfc+X5boBvN
HjkKoh8Kh8XUgbeOt5mxzUVd2zaE0Fz2g8dcjAJMp87eA3VRXjO6SInNtyROxZNiJTig6pqtFyHV
oTw7Gj5pJ0f9rQdy02vD8tW3KWE7TURc0H7zQvIG6+DwHWQEP8IXOYbhxo9djtjpmWJZCK70Mxt+
idw107HMIsXud5EfodM9TYl3PCV0wqa2/PojLDE3l3In47kgGD5NEj/sg00X4xygX544Gq+buj3w
OQDUOlXkm18HgYjUg8w0yjgqhJxycZMG8HV+MvjEDKU6N3dpgcyemN7aqSBcjpscIGDuC5JE/p5u
ZUZbGIrph9hVdLe3YqYpzD6wJ5JyPqmjcQw6aGvHjnz3m9LDpWig8zxRT7zHRbieJsGDb4+qMyJV
aZObNToQSJEzL8uEwMKekQOGoNuR+yTENncyPht8HLvFY8rMCkdE1xnh8ijP7RocQ1Iesz2svaDD
2DsKw1YvUgv7ftawvcMmpX4FI3ARtwh75RXUyyd/x/DK3q8efTpm/dUjCYmGEeOrfmQHos+9Ab95
j8hBodQst9dj+KZ8NO9FsrcX8dGMEGs844HMJOr6cCzrYKoTwtr18qynSi8jaGmq7wnXVas/v3xz
mgU87wr9XtPFuAXw2L6Vbz7WKc8yYwO9fK+Sy6t80TeGtfHrrzzmmy9WLeT3v/tB5GMDALkyJg87
v3Zm0Kz+sH77VfjrJ5I48FXePnMz+uY7orw0BXTrJldIsfYQq6BXKOslKAfq3lBwUteaLRhQspBR
vhLibnFJF/6ReTB+XGfstaxHebIuG1yNs0LMOOXpollsgZDR3rAams6XBxQd+PZvRLrDF+VkcXNQ
36j5/L3f2QjGkwTLp6fjY+qK4XLYX1lgx0qIj2etBsSaQSeWLvsiFsoP6mIM9fbLQ9jYWaw99767
QahoEYlkjxvWX/2D+bH35o9Lh1nvHxGcw6OC8wRKNr0KAYtw5xVYua1CtvayLyGBcRKcqPdP9vWD
Dubx/o5TcXzbY+nNsfjzpx8vGQqB42Goio23fv1oSVzc/+od29bkqn/zZ3TfTPzLYwuclBG64i4h
p8jY7Dk1kg3YB2tPnImQcDSDfQ9zyvozPT57SnNNH2HkaDciXwI1WyJ35VGnwwO5ffMg1yQIgm89
z4con7MF6IcWJp/yTYz0cw9XhwMJHLi1wRrT9uFWwnMCN5T22HPbut7mQd6gcBpDfLNYh44FyCEw
VYI8geZ1tkw86cB0jkSi9sqzXkoDKDDq1vMvb4I1UHsIzf3q4i8vsAlagubXH3rilIU2f9GxCBu9
KLB+RBNdBL3T4J273rF1j5ls2b3LPXIsvsNSRMqBzIddBWvdtL163MJhOxwnC66B7mM9Vap6dUE7
QzSlM3a//GOzz+ibNzLe22t4o+R3XsLynf/NjyvL9Rb85S/32y8v4pETIWBmSpzFiNRBKrce8Ywl
YTngMCBADXKEZLBio7lM9WjYLgPvOvnq5+Vh83V8W6B3aMC8BNNHpU2rOlBoMhs78cMZ1pG1c3i8
GO7Meg+zpqZRlejp7hHxARSzZRnSEs679UBOj3BHlydHDWTsR85bwar8Tz+XuZKAYysxAe+11gjc
Qn15q9lZ9WYewwTuDEbDtixc7CVyGAsGhRBiU30t6mRyzz16JPQybzA61n/1OtHmgMgsqmxa7U8R
tBxrwljnUzqf28iBq2S/sN2sy9CnU+sctp2W4qSfN3WtXvqGvv3Pj0fYNLwHJVSjpcK/en1x8Gyh
59MvsJ5v7EC4mYrwpVc80Y30mf36PbiF77sH0XLJ+NTTNBSpEBL9XAt0Fbu3BQOie/ikXpRhZT4w
gk/2ps6A3fjhm78aZJafhVhJV9OF3TIJfPcfG189WpLpVcCFfwfYdq2ynlTD91H2FFzs3h8VXdxQ
sUR21F7EvWktXV+3tUcXUp6xynA+oI3+1KGZ6xfslmFdLzcuMUDYzYcvrztk6z53WLCiKMDXXMzC
7asAQDHzEzbFRgq//rqJX15KbPJa7Xk+p4zY+NKKJVNY1c25lSk6tXAiv3xMgyWIQXhpInwnlWTT
8rXO0E9UMNPziQX0BJwCVLXSfvkbUmf5XpfwCcuSpGnrhGtcF+3hfPE+WFnl1J489ZZDWfYDnGlR
N6yJMDTwBklDXHaI6WKuVw09+zeHjbV91nSaExE2nC4Tr+HbbD11tghVC0Xk2rk6XZgH48Bfvdqy
wNnrbCQluNPPRJTiTcJt0vY9XOp88rh5GtQvb0vED3rGONqbgj1++RWq4eB71Ze3bWJoG/BY2peZ
V81J3YppN8KvnmC9xm9KFXf78jEzxVogHmv+10/uQ2kmTmW+sl89//3d/faTwvlTx2BnQI3cej4B
gn+XJfgCjU7c+B2Gy/VpW6Adspo4aOEy2r2TFEwBJR59Px/hkkxTDtPEq4hpbFy9CsTpRGPhQywN
IArpOvcJDB1JxpLjXwA9nu4x9Erpg8NydYcV8q4Fj3a0w04PvHoaKF7AYnj9r15C3mCaGH15N/GX
dRyozVUdotJm//RooPmr9GDlogvG1asLt2DEFSyqs49PjZnY6z1JIUjhCrHbdrdwVeSPD7/82U36
a0PXzN5/ed4meYDAIlvyWezByR57YlVOHA6qKDKAjNPo0VNd2WsZrAZczUmYd8Nc0DXWFgsGXdb+
Pj9bLuFkwfHsV/jcilrNVrC3xPEcVB5bCJBu+tHc/3jRzy9V7u12PPjy1fnLf8M1bYYNXqoMYmcI
MrrCvIjEGZPnzH/7pWV3UnVYXtP9zD8bBJawPPSg8hzfG1xLqgUxd1uwoaQnNq5fNv3mM7BTyitO
jPBZU+N57+DdTSus7+9hxgfOlMD1LFyIcRZYlZ6AVgBWiLt5O94ae7uf5uZX39hq30HItW3IwNtj
XbG5i82a5RkUw6euZN5utrdsVDU3B998QOTPfMy2Lw8UV/7TE0vRQb2dj3wKa922sezTZKBVcBAh
ewp8rLeqMTzzTd5gdF9MfDFeB3vJjU8Jv/mQmF8/Xcn87OGvH5UeoheuNyZtoFvIL6zUip2teuU3
EO30YhZYi7GXQ4JmODJOhKNjoGXc23lZ8PrBET6t6ahOm1FJ4Hue8JGDbzp+50VidI1zYn/5NGu/
PB9GpUiIi2APtnt10+A9Zrj5IwoMXfnPLYaMtGB8fbGKzYmmK4FDVAOP7pN7PX6EGw8m3sbYVSqm
/svbv/3S3LExrRdur0dAV0OR6KEV2ivArALaSnOIPBbXbBW8mYWHzKtnRp8jm0jtC/7lIzJjvbM1
ZkwNcBxjz6gdO3WxDvLyP3n8iHfDMEeVhpahtYiED0a4gYQvAHhZz1mQQzvj2FTooH9g2pnxUy1c
z3G/QO41+fgmv9/16j53InidtwN22HqxJ0WaWlg/O5O4/DSry9ktY8RFckmMnNT19pt/GFtoEIcV
7xmXHs0efnkAsdktHug3jwDmcv3gr3+ADb0TC355xLxkjy38m8+Xupi+vCyuyZWR2QNqkYztNrEp
5fBtA1/+SU7qOaSjfRwNiNdtJV4uzdlCj8Ee9Keowclzn9rzOx580Tlgnxhf/53z5z399QcEB2VY
L4fbIYG7Zb+bWW7Zwg2H1h4qh7Dxlur6BCNphhJeRaBhudy74VjApIcnolnetvQjoK+Bqf7yDSvZ
ne11etodEJfTmZwSp6SfzIIQ/nhK2r63sP/yEvDTp5g4D/vHI1FANI8kR35WZ/OYJeI69NV80K6H
4cc74Nd/iVI4YcZ/6w+cy2LEVk3Xekvnzx40nCZ/5017SvsoaaA4wmEGP/809gflN+/8zrs4OqpN
HUBhN7gEx02V/fgoeOv7JzaEnW5/+nbK4YFpVWKp62ugz7EcAYfYiZiPd24v2TPv4OmepFi5ca29
fcx4E+RWf2MtKZLsux4RKHkzYo8/1PWs+g8FHvf+hq9hbAyrbYAA1IPznY8UMCOSlLPQb1n5y891
dXWIlsAZwxyr/nIa1ibhGKivh/uP96rCo8AKWG5iih2ytvUKljOLtppPiJaaSkZyzZsPh/Cke8K6
HkKKMNxg6CgyPuVMby/BYdzAt1/Aqgl1QHmuFw/JlRUI3lovXBIjkaCi0AfWjp/zQN+i76GUuj1W
u6tQbzQJi1//Q47Zvgk31EY5+s1btHd8qnmlCUpY0J00MwPu6XY6Nw4Y/B0mJ029Z8K3Xg5VDwxi
N2kdbnibJIBmrva4NwfqUUdkD9ehq3AybyWgh0vcQMl6nrH+bO6Ani3Tg3xav+b3OZHUWbouzi9P
E/XDbJQy8bABOUoEIh3eTb3gx5hDZpplYrYarteOrhA+WD8k13Hn28L3fCH7/TCwwnyycOaOYYc+
CZFndoPrMOXXRwB//PuqqShc1v1Sodun2eELfUQZazFIh9/5MMEblm3eGxgFphT3xK7BPtzecR2g
vGtdYtuGaf/lLy/mvmDt9BbVJU5zA247PSWyI+OMP7e5A+fxExGHxH09L3fkA1aIOiIvhzCkAWZL
+M/vqYD/+vf/xRMF3P//iYI5nmXsWecS0GT9NJBboIk9IozD+paABW1D3xHXP3P1pu1vmxj5B5Nc
ztO1Jln2EaHREcbbgYdgL0xximDk4RXrrXQFbE6aHKJN34j96AvKGpqhIazGA8HVlALqneXvQGxf
YL/N56Fzx1mCH8kOvM8aOl/CHfZQuC87nOiKDDiTaRnYvQaXWEvcg87rkA+t4+aS4yZPlPbOLQXc
KfGJ+mCHcKvikYfeWJhEwc9BJbJeKTDPGIxNjxo2Xx4XD9ZOV+BEGvlwzWSph1mVXHG4v2p0siJz
j455csB3M12zZX8INNTFrxsOYnMM5zEy9gCr0YALvVQBfyuPOXz0zICtIgvquT2oLXx/boiYtbnW
KzgOHsw0n2C3yvyBTlu3R7J35/HROBj2wr7OJbCvBiXYLid12ZdeAaXM22Pzzb4zbsd3Fpomy5i5
TFVtvs/6FOZ0QzNMlvMgPDuDQbJyTInFyVK9fPisBw/LHolk9kFIeTkukHR9X7x1qcuafI75DOOD
qhIcU03lhgiMsNx6Z6aVdKOCtJsCqMTm1RMT3GRje4xHxNyfisd14aveoiLj4aVBOsl6dAEUJT2D
eP34wOZUYbDMVhSgODAqr5WPMlgRqXN4eNOAXO9PQMe4PqaIHUWbGM+hytb3+51DgjAiRmEamaBV
jg6rx0PBGhz34fb7vm3Wi95y+OQh6z/DDZy9s4k15qzTdX7uWnhdu4Vowuc1UKvUIMxO+4A89rdF
XUEkJ8gp2JD4a1nVo2mdAvBMiwPReAYDIbseLJimM4/1Ia6z1Q1IC1fbS7B0mlq6iEnDIPEoip5w
TC/ZTIbNg1YZBNi4KA8wwa7dYIbYN84MzwsF5fNqACx20vw7H2OZ72doJu3qUQGcwZa9FQNJ9W4j
7plv1IU6x72I+dcHR8smD+yt6hpAOu2NnfQiqZMcIA16HduSILmIYGmP8QyL8Gzj/O40IX8K6R4W
dnEnhp+VNs3NgIE8VFkiH1pq07fDePB2mSeCTVAMXdq9FwSGWcHeS5RDway5EbT9OJO8mHIq6ARD
0UM7SJTFZbJhyZ8OGiJNITfhAVT6IFWB9rLMEDyYbr1uo+ShI2Np+CHtHZVFiWShU68Q7KvKWV0T
ij2Y3B53bMK2BnS1ggDdinuLk/g9h+vSQR9usjVjg4tjm3uht4EemSjgIjz64RhIbYeOYSfhAKae
uoDHs0dveIqwS0QOkO96AcocHz/eyVJvhtwwf8+XZ50lyoltEsHHzS2J/PLNYSJCkIOXKABiG/dz
uM7mJwft9sxxjqFM2ZeJC5APkuqt+tKARazTFLKpw+IgahDd0GWSILi36cwIljqwtjJtwNrFR687
Gh+bwLdsQJJ3MQkXi7OXXRq30AibjmReCodFpXsP9Vd+8/rLbgcWTWvEgw9rxlvcDGc8t78FUMmL
M1bKyhqIWyd79NU7rD/McFjlUZrhbtrl3ra4RbY+LrKDvvrlMe/sBnhBfM9ipgUEm5v7Gsg+lFr0
ue52WHEhHChV9svf/Yo/1ZmSU2hq0F67vYd2e6XmjGxIIXnSCct99bBpxO0M2NmmQR6B4YWbBL9E
JkEZjiXbVL/330GAzSHGW94Mqxb4PooMA+PMuK8hRaCt4LfeiI72WbhSaerFMDnnuBCBNPCv05MX
eSe9kRMZeDpr+l6Cj2ZYMRbmVF2S9hyh9rBU5HxDwTB99RrBNxGJd4gDdUoQ26Hrh1ewTFyWEgMn
MVyNWva4iDKASla6iH2EMbHRptPx4pUSLF6wIoVrHFWBqPcGBrfGxZkof8AC3WBGpamF3usueQNt
P1uL6PltEMOOrhlRlDWA4w6OJMj1K6AxpCxs9k9zXm16zYjxbgt40bmCKFf1pU4gMhNk4XtAXEY7
2+yHzzoYVX1EFMLJ2cIcVwccw14iSX7h6aDupBw1DK+SOFfHerRZXEK3Zu/4MtycYfED1IA3GQVv
bS0l5IRaS5FsJBE+3Z8ZWM5dUsDr1kDso/SWNecuyZFYazHOAvswkPkyVrCL3zdsOL1qT/tH1kHx
fLWJa0W9vYCIdVC0UxacRzu2/tjCMUc89QFR/Sc7rFsAW1CbEsBXfxpD6kbbjCBPZ3JMWUnlrvKr
QKLRnomynq72htdTA9Wywlgx+re93QV+gaZrlPhaCaeMRUvsQ2puKbnyjEvZTShz+JgbD9vGfc3I
Vux62KelSe6u7WXbtmd0eC/hDVvs6U1X9+kukDbBA1sv5Ax8cL0a8DvfwNKHMQb+FUsSOmutRr56
klH30VlgiHTFA916qtldWrRQp4ZHYv0qZONH1BIUydwFn4uuy9ZQXyrYHraKSMo1C2keGQmMDAtj
W25dmzN6uTmItR5jA+9cexOqWwMTaW6IEx0BXfP50IMi3s84RZxvb9o0RqLHYfXrz1vWM2Waw25g
TsQp1a6mFzvVkA6F0AOBfav5py2ksCyB7DHXRbdH7SAkUD3nGSn8g2Jz5TkpfvqDf37O967j/fTH
2w2smm3AeFqwbTIW66fzYtPeOaeINHcNKymrqNwJJpVYaO4VP471k66OUybIXG4P8vMftvU1C5Jh
tLCWSMdw2/a8hroA8TiHXAMoXqLtV09EWktlYC9r3cBDlnAET/rBpvHpE0MNvEQSdh2wh5t+klAt
BQq+UncBr7dELfSygoic7k8AZr61YohjSZkZvVTpurt9CdHoOOQiX2d7e8WGBKOoHLEe67LKyQHS
IVTLhdylVqaCP44S/OYDYklXCawiMxdwpI1PFHRUAZt27w2GBy2bSTWJdDG4Jw9++pzw8gDWg/Lw
Ybitz5ndgyzsdrT3QLV6nreUvhTy0UcOkF29NKxa1FGFiBMM6NvM1et3n+fQGdyHhVVlpDgfhcTe
ltvgQ1Re3tj6zOZAg5dVwXI/zfM+fL7V5SKZLBz1hCW3HNXZ9umlFiJuvOBgsOyaK/fO9jdP5Bmf
DD1tiQXGS94REwCXslUliAAO17vHPlbF5q3hS1zWV0nOX7/rU66MkUbcO4lM3lLXGvMWvKabh809
p1MuhKoCrbNTYPXB2tn03MkF6u3iQjRl96JDdH8qaEdj2xNFjdpTBfwRvuJ+xZavRPX0Xd9B08jN
233z1nKUoAHdXTDh0y6VBjZx3RleBl8iD5Wh6mwUnxiYVOOJmqjesPD4zIPpU76IH6FPSJUMJ+Dq
cytRT/AEONUOW1G7pgsx5KujbjYfdND6oCuRLoJis9yz3NCV80N864dh2Jgz60P75WfzhznBjFYX
UYGXu5UQo1nN+jM+ngb6LNIR52Vg13xz229w6ot1ZmRutkdbmRaI308OuwzzofPnkRlQig4MkRnl
BTZ1vejwMbce0b+bsYyDmcDXLTYI3vmsukyeK0H/+LmTk/t+2etXH9BXv3Akp9dw1ZNrjm5JomFb
BMqwksJj4flY+iTbhY9slbVChMc3Z3mcvSM2vUWTB7GInXn6IGvgtf1tgbL34GfqNabKDyLnwfsU
c+S0ykr9zRN7dBzyCUcgrepWXe869DPFJidR49S1Asn4O584IjkHltey+ujrP9h7/TcAAAD//0yc
Se+DsHbF9+9TVG8bPYUQgk13jCGMJkAmqaqAAAlDmI2N1O9ewb+tukw2gO3c+zvn3CA2xawsswvE
RardQ5VI8fKF9X3jl4nNjKXfeA+GZLige39Xgnm/fG2B6PVhAj/7CcjFEyBISmzgc92PRf07+Ay0
+JhMPZeAeJhlSoXzJ1aQLtRIIa+HGMLuWnAYtbfEytj24sPpPkhYLo5Msayfwao/JiDVo8XUo5hD
wTYTlzowKQ4edzxD96HH6Hzt3PhA318Zrv0L6zw2l4Vyig+dsfhibaxiZabDIsOXotyxdWdxPL2z
vBPan7S4Hzt1YhqbGhU8IxCxpT06i4Sc78FZLgeXdN9MIUPWmhDcH+H0dX80WAwacELxVDE2lbcO
js33ehbokuXTr5qs/hjvxhQkzX1AWsTkynymb44fym+CNP6dF+QewCf4cPUHSeePBFjsTzVY67Xb
Jr5TLBf1y4NMaGR0w6ZRkFymviDt7i2yijheaGY0O3DhM2M6oHlQZrmVZ6HcMQo2Dju5YDpJumz9
063hQwYH8T4y0GPTBKPfc+7zG+lLqHROiOXfy+1HKPEpPKFdiczi3MRzMD06SBsFIbTpfXEuPXDK
5CeSA79RcDX2IRAe5YiUzDj0uG5pCS5k8rDMtUQZ8VCJcId9BxuJoARTcr7Jwv0kIKzWryKesxsU
oRQwFcoy9l7MCX/lhSDsTy4bfvxi5b+74BVEw/4ELgXzMkLI28pTQeZQOYA+kMcKyyg9JwZd9sXK
WxGcvFifeCNagvnNtiYs2/fNPbxFt2B0fWaFflefkWG8YdGu9UwYcv6ENaVIgiOaw3njfZfY6RiP
CtU5WL3CC7qGvKgclYph4EcnXyQxYJ1A04s7nO0rwcq4q/thlnkKV72J3upuV8yYczyw8iG21IGJ
CTeeGLBHB2aaa8cqZnC1Upj0soJVOfQsUkoZ5PnW4qasKjNrsZ9FuvEJ0vaYtZZHV/LQmOMMyY2q
FrPFoC+IWtohJfRJP0feqwFRen+h8xfxAL+bisJWZlukjpJakDlKz/CuqBPKWEfqGfXTesDchxqy
Vt4YFXgIocz4e2w776ggUV9NcOM/hCsxoFFHOaHOvDeOJO4UTG4vRkJ7Tu2J19p7cPzZrA07zYin
WYEtGDtnL5/E2OaQOOKqoNivS5BG3ys+p+l7IXz9vAPOhPF0zM5dPF0irobc/Tm48Gfgnrhs3sAs
5o5I1Nx9sfK7DN/ccp72veH0c9jVPFSjsz71pHpYf/Vt6z/uSwZL977KCdz0vv5RDmDRom8tPEsw
oTPZlQUVLa0EQt18kBf83kUFm2kGIZku7hz8hILcx4sMu+uHw67tDsH84BoPar+j6VL/iYPSf1Ef
7jw7Qjf+nfdkyD4mv2f8x4RtS1EO0iAOYBmFcvVz9OAQl6oL0F2WNz1eEHBUIZw4h8GpoCNllqOh
3OohcrXaUmZ8LnIhgom03q/aY5Eg9cRT00T2xdIs9iKVEGz+XcQGrEJHiUB4lO948j6EA/QklYnw
+pg7pD38Wln7TwoOnmHiYO9ewCFi4wsPvphg6XF2Y/JOvoPgZz2eaELGfvDwCQKilweUveMqXpi5
neHeL2x0eaW0aIIpa6C1U0J8bo5Qmdb7gbKuFSiSC2zNc7ULgSUeT0hpv8HCXth9ArQkOk1c9LQC
knP2zIN3GaE0+nUx5gvB3fohMm/7DMzjvp7/eN8TolNAj4/ShMazJNgdvt+iZaxAFVa/D6FT0Cw4
lUgqDKLGI/eTTgW1fjCB5XAZsMZxVKG7iev4HSfpyJmrHhCh3InwnV4lHK9+5Z/+uTbERas+tJiV
t8ECdRtpWz+uoJHDs1HfsBU926CXxjSEsiRKSJuTLB7k1qRw4+uVD625Hi85BPPxhcQFLGB4OtoA
Jw4xE6tfvdX/cXjwsIYIvY5ZbBH0Kn2hezAUrf5LgE8vRAHHVyyWoqMVU/Qk64R8V2ExyYqAtt2l
hLXodm6LRnGhXLBEgt80EkJT/+vp9LEpeDbFdeO7gBJLU2FX8zWWfj0PKCm9CUz8yUJGS3zAKJUv
AmLZT2zvpcAiyUQ66FD+ivV7dywWDR0uwsY7pn19FMTUiuHURV8DubxnLkurJQMPu/iKdDZ4Wivv
5tBt2Bpf3ggVzMPbJfDxuPQoYOrfMosVu/LP18NOcT7Hx5KE522/3OPhlcUUvxMRXpLbB7uTAqxZ
Q34oCFSl+LmevyX4XCkkKkgnPrjERRu2UIUpbn/oZcm9NT86WQVXNzBcIln3gN1+H7Rv6ER70+qX
s61GsCuuCpZNa9+3jSCHwv1GNeQ41hSv/uMTdLjPJsG73vpx3IEddNJswnZ1223r0wDolyXWYkVR
CP/gTXg9HmVkBZofk5CLPOD8EnHaL4KgLLJMPPArwvhPrzBcfk4gME8KNsSos2YH2C6c+uMDqRmR
FfbAXT04+Mt1ascvBkuuIm7r1yhtSNWTSCURUGMxwDKVMKBc2rAgUMwzPsOHvIA7BCwUC4Fi66QT
izB3MRferI7/1nvaJ+0AsAH3bt5xUjDHEkhhtTprm/+3fMTsAt/Wr0Oql/YBVaH8hVplLNhJ0DcY
mofIwnk8z2jVI+BPj+qc+dj8TYs80g8r4A8ZsVPbY0FK23lCu9h3CGmFtKz7sQMbH2e4EmM8vm4+
PBl9Nx2/x19MDXzbwdquzi7VNS6gVXaE/Hw4dNPJfNyCptbSaeMxZOylg1UOvRTBv+cXOBAvD4Ur
WdvIKpdhd3ihN1vl+a1/LCfuHZBE1xlwG1z7jwfmMSlswUSZ7xZH6aQM13chComvULdEyyEYqm+s
wiN3VV0e9yFgstK3hX1lF+hsXphg2Z0TERqfR+8yQk0UAj2vA7fbycfo1V2COfctD2z+hyuCW0z3
SqpC98hHEytarbXqUUagtweHlJSn/ayxbgOvr3FNjNIxmM61zIIdp+juyYLFsvkTYKfOBlL53Fgo
2o8U5sg6IC0yzJitIBOCQ/+LsBYuu2XsfmYHV78XnbukK5b8HX/BaXEK7GjcMZ5OEadCTdy1WAb4
aVFvTFkojjxCF6SFMX302QAnvXEm0B/d4vh8vnfQe94ZjMyaial5+TTgNJ1tdxlVZ2HXeiMYIlWx
Ai8fsHAnhhMK1n64TfDRlQMrXhg4nzIHrXqhX/2SBpjRqUBb3jPrOsdAl/GOf/nM0WWcL6zIhcXi
4xkVrap/Gbj6wzg7fHswv0IzAuwB+thKjToehfuD2fIZ9/BCpMdR5ndw9X+wnXymfiHPORVe2Yu6
ewPseqzMXw8e+N6duG5itvVqoJl7PrpPjt7PL343gQh5FZYuMt+TYCwZ4MWihdTIXorZKHwOfONj
htzOT63RMMIENPqnQuYON4B8v456NE+ShcURa8WsVJEM7OQCkSK0UXA8iacGJr9njpX7WbEO/h5Q
2DeBPbGMGSik7ZYIzvkUIn313/7226/EFEt0R5YuOb9laDF3hBG6mgpZ++OfPnSz0x5M1V2UBXL5
SDgzayZY9JPwhHjJOfz8iEM/lq00Q4OdXkhkKafQaWeY4K/esY5SUPXMyXDX7AMsrTzM5g2YYQnc
HDv5L1PIPWCeMEDf73oeWzAyEDJw//mJfzw6XhPOBR8kN9jh9GqhvRGncPMbjP3QLeTDhPKfvn1S
b1r50LuDIh58lN6tR0+mz7Hc6id6Z7c2GFd9Dbd6pr4+AvjLL2nm3dCm75ansafw1vuie1r9FaLV
QgcIjr/IIMGwLFv+8QmNLe+8Ff0vDiMoqTNB2u7wjoc1D4Db9UWztKxZsd2Ij55Khi8jA5V29bPh
ut8ueHwqi37xnAgMM5zW5zEBPdcmC5MYIiTZqRPQWiEqhPNgY3HNPwf2PYsQQa6Y5v0p77vDWDBw
1aPIPecF6EorqkFZHSiy33YZLyyCHfCDIEBbP1xAbidwlBsV31eeJc00XOCcDyFa9ehCyY1+4TUX
b+g2J/tg9aP8bX2R6PlBPzSPCwMPQlhNY9eIMcMTrobiyCGk6DJWliQUI5g04TAtq3+3hOrtCQCn
ZijJfrd+uGdJA3La2O6xbtSA9Zkgh/Ph2LksV2uAIMtVwarfXCEcm2Be8yFQfXUZq9/mHNN31jTw
tK/2a791A1a8LTJMo/yKH6lNerr5mbfAAS6jWUzQOMY3EXZDzf6dx2k9nzxzebdYzr/rP3DvhBEC
w2lcZs3TjkoFGX7j4ywME0D4BzWFHBkHLGaVGZNERwyAuhlg+d3XwfI4RCFc8xF8LatlIU8BNjDc
i/OUWbmjNDQNGehQ7orDqyFZbHTIQ6HNkIZU56dbnXhbRGHzfzR2WCekPzaFeYra6cA4MJgb4WDC
fAzeyL2zY0yM+skCbOz27nLI835y8ghu648zpgkUmjrXSWhZ/YcvBz1QJuuNRJC2pYZVdNn3Q51+
Q0HahS2+nQodrP57At338+b+zFlcjqvfz3f0fsSiPp43vnX/d6LgH//2b/+xvbOgbt5ptQ4GjCkZ
//V/owL/Ov5rqKOq+nuxwTREefrPf/+fCYR/tn1Tt+N/jk2Z/oZ11OB/Zg3+OTZjVP3/7/+xXuq/
/vHfAAAA//8DABRmDy0yQQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8de4074cabab1761-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 06 Nov 2024 09:24:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=x5PII5RVMIqdfvKeVAkGagxJB2AxMCyJ0gTPb1N50IY-1730885045-1.0.1.1-ce_bjxr9cge40CXZs7EabhKOHinhSnhXms6y6V_wjvUDb9HpHZqJP2zpH.mRyYtuXzcXbUJe1E3z6rgQ9hgoRw;
path=/; expires=Wed, 06-Nov-24 09:54:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=J49wMkCLbhRcg317CjZxQQPvnk7_gQ9us.9URQLB6.E-1730885045549-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-3-small
openai-organization:
- langchain
openai-processing-ms:
- '77'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999989'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_6c0109ace538fa672e4397b3d3f2c170
status:
code: 200
message: OK
version: 1
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/cassettes/7dc87e56-553e-4b4d-8caf-161d5e8d1f8a.yaml | interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Say hello!"}], "model": "gpt-3.5-turbo", "stream":
false}'
headers: {}
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQMU/DMBSE9/yKh+cGNYlKmyxIUESRKhaWIoQix3FSU8fPil9UStX/jpykLSwe
7vyd73wMAJgqWQZMbDmJxuowfdyXGzFbPu9en9bmTUYPcbIuNrRc/Uzf2cQTWHxJQWfqVmBjtSSF
ZrBFKzlJnxrNo3gxj5O7pDcaLKX2WG0pTG5nIXVtgeE0imcjuUUlpGMZfAQAAMf+9B1NKb9ZBtPJ
WWmkc7yWLLtcAmAtaq8w7pxyxA2xydUUaEiavvZKao03sMI9CG7gBQYADtgBYckP92zkTpcHNda2
xcKXM53WF71SRrlt3kru0PhwR2gH/BQAfPbDun9dmW2xsZQT7qTxgfG4i12/8mqmo0dIXP9h0mCs
x9zBkWzySplatrZVw8jK5kW8KJK0qviCBafgFwAA//8DALhSFoDuAQAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e348e5e061696-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:43 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=VxJGpPui1KukpkZoQI_29GiVZIAwfcg.Ji9vyPgAlUQ-1712872363-1.0.1.1-s05qTC1APPCLiLzFkW8lQtgrImQyiBO6KfsP3J_b7P_6Iz85yInQRPFUF5RJEIWug5j09StCDdKDJll2X1jsNw;
path=/; expires=Thu, 11-Apr-24 22:22:43 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=gpJP2m8H7zfXOMJfTV1U.z3UCHnLDNgvmmsFO3sYtm4-1712872363652-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-3.5-turbo-0125
openai-organization:
- langchain
openai-processing-ms:
- '231'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '2000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '1999971'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_47928d9333db220d2f5bb6ec837faed5
status:
code: 200
message: OK
- request:
body: '{"input": ["Hello! How can I assist you today?", "Hello!"], "model": "text-embedding-3-small",
"encoding_format": "base64"}'
headers: {}
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SWy66rvJaF+/UUW7tLSeEa23+PewgQzB1SLSCEhIQQbgZzXr601ilVqTpuTA/Z
8tQc3/C//uPPn7992dbV/PefP3/fz2n++58/tVsxF3//+fNf//Hnz58///pd/5+y7sr6dnt+ml/5
7+bzc6u3v//8Yf+38n+if/78lf0D54n2GjtLGTcWij1KsbzV93FTI79Goi2eSZjsh53atShD+c1f
iOFfB4fymW2Bj7fellUEcclmXvuEsVV63vg6hQ67dJGF3p6TEDuP+Z0GBV+hQ9Xt2BtIPVJGtnK4
LxH1NrlVolWEG4WFna/4mrY42vhSfSL0ohXWB0cZuTh1VYhGNGI3QadoeiiCBTsfOUTrserMwvt+
lCYixxjXnw94lq/rAB2985fDsyLtLnw3BqyG2mO3VbNxC2qeh8BoUk8IBLWcGv3tgfyonrA1LhFY
08VskDTLN6xUZlPO2GYn5KiO7o3utQBr5GcDvOeXkpyysHV2W5g6GOmSiK0zNEvhZX4r6BhXQIyi
z9s1mFoGoWN9x9qgJmBn6bRCM8lbfNfvZyAoAm9DbDADwVa8teM5WhkY6fqFqHs7gOleOzV06eqS
G3ykJa9x24qcHXuecJ0GjU57oyLzNatYM/hHuZRxb4OWBKIn5lXW8gWr2cj2vvMiPGHY0qBganip
ESJ6IRnaV5dXER0PfIhzCzUR2128Gn4TvC/iuik7N3l+jySYwoVdDjgSrHU+gpkzFCJ3e+oI30Ck
aEiqkFzrS72v2/HmHyOYddgFr6qdaTfmoN5didR1oETrW9ZUtDYFIY5w9cqtmwIdlQv0ST6fYyBs
MSeC8wk05ASlvKXiqIcoNcCXXFBTRRvYbyGope6GMUtMh60znaJvAKEnEYMt6fUWQlQb145kb+SO
K0FajGTw9InTL2+nF68dBUNOzgtv3GJHSJfehS6lLj5x+OzQeZUYoJaX3GPeJ03bIjKEiLcjhZyr
txbtmBMrcHj6AvZ//LInzs2DTZJcsDK5Q0t6S5fhOUIWqZlbAtbBKVJ4wWONbUH77PsyaA36mb+f
94nt8l6SGt5gPWJPsc4lm9U3FeKLI2Ftuw/j9rVlGb1OpUW0oPHBwu0HE5xcNiDF1VQ1FrLkBdOC
KuT8wiEQtHfdHS0gX0glEHanD4lJYalYDDZoivdJ5ncWmf1YYPUTWhH3eCgU6h/vQ7Ap+S0H9iSU
nEgk+MT6mrNyQGAhQBshlvcu9w3pMkVT369Ydz7WSPOo89Bs5yW+3ds0YvPcDxGbzA9cO4wNaPga
PfjaQg1fmWwF21YxAyzCRsZJO3blwlwcHjqQ+5JTNKQOn51zFnL3rsQRfxyjjRmeFTLvMoP9Ea/7
rByYI7Sf/NujD+EzUidMXPR47DWWPfGlUTm3RVgujE/OPW+A+XIZVmiW9OStCpdGfE8XExackWL9
YUX7Fog3HQ7nfMD6ybnsWzddTSi9LjPWH6QDLObqFXZZSrCydEG0wbal0OMHx5MO6AxYDeg1ZMbn
d4GSnQDywJUPh3w+Yw2tF2cHOmXR896tCz0dH+Mar2yHnnIYLox3uY6sic0els/jF2u635e8inMd
GC4+E510ailoeWQhOt6RB9HW//KDQvVQYPLLO2q0oYuyjxtiuWa8UrDaoQaG8PE9YTzI0dLKnIme
CUxwtS5Am6bks6D8fh+xPchHrZfOnI/k6/om1yUwW+F05RZEJpBj+5747UoL6kP7650XsRFptH8y
1MC8cllsZB+m3b27FcJzG32IXoRUo+jrW4g7H/NlO7vpvkrRU0bCKd/JLY3mnQILeCAZHk9czArr
rECxK3hCYkmqremiLb1cXeA66Q2HQ4McyhTYh+iu50S9c/u4DS/vCO6f9uiB57Q5GxtrLLpkdMGu
/+zGze6wCX/O9yTuZJWsPZwqFDlRRQxjadq9NIEMNUNilsPh/WkXIxVyZFhMiNV+PpfcGCcFCiNV
I2ZbSOCHHykSSKMSn5/e7bQN4wqxILPk9lifI2X5tw0wynOceTiJ9mw82nDRrw3RKjpHG4qoj9Zu
jYhMeA1wwylgoKT0Gb5z7b6vI+gpPBTOhyjibdZ2AEwZJZBZiOz4mbM19vUFCnIN8UlhQkCxJLLA
PF4NHJXZFO2GATqoZoHswZxRSgFXhQsnUzJwxtyDdp8lTkZhP5nEYQ+fkYq1zCNf0yucMigHP/7m
4e99Lg8+2pa9zjkgjWriTHuNYFXZywrtYk6IVQWHcisPqgt33LPEstdYE2A5qjDh3zapJsNo+fTw
UtHbhCY5Syva995yZeDxvUNOvVuWrBYPFDD3m4JT+WuWu6MEObpYFSaRK6Gd9nTRoezbtseb+sfp
c87wjlLxCYgDv4K2rqZoo+9nOBAv8KJxvc8sREdvtXAak3Xc1HO7QPtU11g15mQU2jYtYAbFhCS1
b4x7d+ltyCWMjP3zrY32H17CzKg/WAmx3u4F8lzIyFaLK7muwXbEjwG40qEkl1EetSleYffrf6xv
HuOQzmdYCbOffmEv6nekcaj1iNncGeuvtdlpwTo2LO3tiqOxX8BWKGuMam4wMJaHp7MxgevClTfp
T37F0SKxm4y8oWsxXubQmb9yFKKL9LwS/WHtYOOdQEUBwznken2dI5qRYwOHF92w3NJK46p3y8Bx
u6gkT0o0rqVmD/D2ZP3/8dvvPOcPUSRG7RutwKppCq+jHuBK8W1tZ13HhMHXsQg2IqFdu25a4Ork
Lq6+4jVaiuIlonnjbRy7ZTpu6WDa//bf2X2U415v7xTyJGqJ04R2uXbp7iGkyRuJ8+mqrU5xgzC4
NxMuoyIe2Xd1gHDb3Re2vkDWuPV7rYH8NXRil421s0KndLD077dFgpsAaAOnHDjfF8VaRg4RHSLH
BC9qLOReXR7a9u1YCL952mCjPz1KGsXSAisvJIsIy6DkdhR1iO2teQHJp9m3+L6uoB8eF3LNMuTQ
KN4m5C1PgA1p10dBPpEJHuPKxf4+jiN1K4VBPkg47MV4bveg9Y/gNw8LWsVgY+dBB87B57A8Yh9Q
YT7GYHSPMk4r1yg3OPsNNEe9I+5Pv7em0HR4zvDB4/qTUlLL0ztUkDLELg9Ozqpt2QLhonrEo25U
bj95DWAAP0Q+JKW2mY4Zgn69BNjB7wlQG7wYsNibTpKjfCgn/bG4R5s/VUROlve+HbaHhU5hk+CS
UhCtF1HnkReLDblc4xOY9AAuv//XhWWCcV/AM6iR6sUGCR6Ph7bVDebBI2dbXFX7V6MNfOXQybgT
ls83LeKO+NvD9BGs+N/8TIOgQ3fsN0QeerTTSR6f8JdnRjua5cQb0wCeW1yR9CIt+xrc+wY25bQR
xbmq+/Lp/QUePWoRZXLtlmZttsCQSMlyzOMUsO1k8aCPOdvbrIsPiPDdICKNbJIbEOV2DsrehsnQ
PolbKQgsinXN4feoOzi7PYJ9jQq9gCr/vmNbyW1A724Cj9iNPeILTVRS//Zq0BvAHltWOkd0zsoF
9o343wAAAP//LJld22pAFIZ/UAf5nnHoW8KMUOksKlESmsH8+n3x7vOiWGvN/dxrxsHnu+9Hb+BL
aGBvpEVjHmr2cr534ByvNfXUujYJAW6qjp25R4B4bzAcHCNa+Ro1H6P5y0egcGWfXni3zd/JNpkh
eHkptjjB98f2NinqcEkP+HpMCiBw3+9Sj/eAGnsvSkTFbRWQRuWdcEkxJmM3cRqcd9IHseelzH9c
6nNgRzZvAp5O5vO8KN4VMZzPOMavyR8na+/A36kwsS4agTkKg5PC7zFkeF+d9vUgFqmgPjnUYWPl
rau+8ArXpjjtMpgw6bor1IfxzLC1OfisH/dlqc4ZdjCOxiBvd3KUqvdauVKrrq41UbLXAIU6hFhX
pZDxWnAYVd26+fSo6nLOIK/HKqGsob5dX8z5IkwI2JpfUXO+R72ozgmCX0/T8ZkZBRPEn3KCN/m4
wUEnPhK6U7UBVhccIN4GIGF5eYhV8bu7L/NHNsfWSjq4/B+qWfiWz6/JrGBrlz1FR96ph9c3HmHX
6i2NLHxL5quOWngOegHBQr8x1m7OCJ4frkEEtyv9AX6GHVzOHzK1L3WZf78UnL1sS4/ofPIF377M
8NG1HyRM0q4mw1Wf1XgvMUJNa8fITcpHuOZTnb7FfCpoRCCAGwu7SucxpqRDCnQdpdhxRYWNh0pE
EOLnFXGh4dUM2ZWmtp/ohAt5zwNyNK472PW7I0UGMRLOejYImraTkI17FgGLXesFP3cHIvEQ0WTq
f6CTz62uYicBVU3IUytgfMUx2pbGjn3tkDuB7fQ5ka0T3+rpY3oneAiZjvdT65r84wtfMHulFr2L
H8788BWLwE1wXTTGTgZmOCwDWdh6NDj60B/am6xA980Csv3Gh3x8F+IGhueR4HyDI1Owx2Onyi/8
o8Fg2/233B8a1fFSgNd5wC35EhSDZeBLpVk+14rRXhWHy5H6wrusx+PTd2B6pj8cXGZs8jxXznDz
9mwa3D4CG3nCXVX7dbpgFF58wJbzGxDAoyUvvJnQSEagCia4ke2TL/rf4S5w8KoqDja2oZLMnLnT
wPI88Zr/f+kp0JSVn3Uhl012980OLucz3tlHlhPhFTawuQ0JfcAL349EsFp4VSUHCfaN84erhzN4
CCcdP9hzC4Z0H0sq/44ff/UwPg6apmbG64LzLf/1p2L+ZPCUZje8zovhfg5m5T20HsYTCfzuERw3
EI3xB8HTdPTH4UgJJPr+iwOmhCanRfEe1tm8x8FIcn8cpHmAqfzIaGDbExhmLqsA2gkSNfuWMHI3
dg0kbhkS8P715oj50wiXeqM2l5mAEfMpqHLTyrSY09TkFl4EoeH0RBwYyaf2IlewrrJm7bdk0mdQ
QIEe6mXedmyu2fRa+xE7QlonU9QfOrjwMr6OBjJpYQUQDHsfIqApdc9qhiWgA0mmZsM9zenMFAXu
bT2mZiCrYHxrvgZt8R0tfHb3CS7jlyq5rEBSkyS+EHV0A6ezsPgdhZiT9pY0uObH0Llu8hFOkaSu
ecw3qof/48XtHYa9/kbt8v2/vE9uKFr5zRwf6YlA17n/sIbtO2PuRSXQqRIBbclvNmlNZQTf37pY
eKhLphp1CP4ug0Kt3+WUzLQ/CpA3K44arXNKuvJkEFWklYH9ajj4A9CNQj09kxEbd+FXUwCQAb/j
R8E43D3yZb6M0C12I5m66Vuz18/cwcVvYM8SA3/mIr+AxzpNCLiLXsK+UlyoP5qp9HHdKmBKnWsJ
3xIPqGd0WzZFwNyp7JhKaOvYBhvLzxio3yCScIIdD3DFReGAJIkNEvJxv7xPVsBimh2Mr1oKWFeg
PfwY5nHhn5H92GmS1IsDrmQ6d3bCLz4Frj7GeEnPfuxaN4LnUSBrPZp0tq0MtvfHG++dBbiaSOBk
n2+vOH+5szku/kZt/INL7WC6+dP7HWdwf9s9sZFxF/YWKx2t8x5rucrlZMkjcPJnnuqhfu9/SXCO
4G6oL2iTWcdcLKWXprJx2FIPTuL/+d7sjAj7xc3oR3VTpHCpN7ItBaFf61VdeRP7Zb3OGwtcwmiD
cQCsfvJPxwZuexDjfeHd619XjoHqfXsfa5ZR1Wzc2J1yuaQN9eOmYWu9q0t/YTdzDj5b/V8ZOyeM
23tdz6sPOBFDohr3kPtRsIcWzF0b48ylV3O+hs87yBTNxU5naux3cXlH6c0LpegNRjAAfX9XDu9g
xOFnO5njF2lX9QaLnqItfvqTGVVXcOhfKc6jQFv5l8DEAhIByZXrmVfCK5Cs6r30r5r8urNZwE9a
POmdzUEygVN5Ui7U+GL9SXJAV/+5+ojouW3zCQTgBd0Lfa2+0Jx5JlpqBM489oTfM2GHOlMg+MQ6
Nbenxh8PQ7+BU26nNNmeHHNc+BUu/oc6e4kDEzi1Kfj435Gih03NKc/aCvbiq0GyWfTmmO6vkvJL
jif8WN7X7166nGofQIiGryQnI2nyHdzbZkx4cxgWnnwMcDoUMnYZ15osLitO3Z34E3br2q6F4lk3
0NunAw1Z8QZ0OW9h97B+VJsOFhD8nt3B9z5YNFrOO/Eyjxx8TZFJbcNJ6tV/gcYCNQ2vO76f9pV0
Bb9iOyHxfnzUTBTUDO4ucUW1W8YnbKgDTrlJVYL1m5yaa36AN/m8QXPrnPLVf8Jasr44Lg+on/TL
DcFnXGyxLQeHml6D7Qhau+rXeqnF2bauqrHNMD2aw9BP4jQPqvO8+xR7StdPXTkiaHmXFKOibxN2
aGgDi/oQ48W/9sxTri1YfDHWhfxijpYlR1CyyjdJgfhizG2yCm7c44xEWbr3IzwrHYCftqOebryS
3tC7DQjx945Gy6j66RQegvXzhDv2j9UX7f/63Zmktp/z6ebAuEornJwrq+Zd26uU5fmgTS1DNrqe
J8EXH+Q4Nc6aKXzajIAlbxLefXKLn+tn+JxcFaNaLtisbFGmLPxP1MUfz7tPJcCfZYh/vDYiXe7A
wpso0XKtXv3gkhK+NCT622fSx5dAU2dnHBmfZz0C8dbCm399YgOypOeNMczg/kqP1DQQl0x0x1Xg
8KgGovrtKx+bEBVQ7fca3pVSnIguSzbw+tUJ3kmNV3Oyp8Zw+lUlYtlp7n/L9cDSf8v9bX8aUNYq
TXnoaHh2Qc3WvG7uznvsrP7m3Hrzmg/X31M/V597cwsPPyJPzifX9so/HjSLOUyW+dWs/bb6wmQK
BKWEjSXX2Cuvvs/kcWzhMxNqIp4fKpgbOyQQf4YjvvmtlfNzaO/hst/BGu8M9a8SJwjWeW84aVv/
4PgJFFdVchpszzrjH9E9hcJERor8TQcm+/y1IDkJAyEd3dQTjb8xHMwW4WTZ54jBO9TAcr4h/vy5
14PreQq4Xij+7xuFMkRr3iTNU2PmZN3e3Pp56hM16cfPr74C6/fyqQdOJzC2CuHg5ig0hIN9DOjh
JFhw8U/Utnafftzy3xJAo9sRvnu25jhZhgWlK2qxwZ0l0N0Os6UKrrGneOHhuSAfA0TcviHAon4u
au5ngFwwN4QTCsscB0kZIAqmCF9i8qlnGW03wNzOEg71Zuz/eHD3TvfU+RY/NlWHKFaTSC1p+PHr
ZFqeH1j9izM5t1wMMq+BVK9jaqLHCcxHfhsDu69b7LNXs/SvhODC74S/feeaGbmH4BBvfnS37H8I
Ut1StvLdFhvcxmdjiS4b4NWHDzXrIDWprELr7/34B+0HRpVUHAia4bX44tyniy9VJjs/UA0PEyN8
eLtCcH9EVJeLxBx/Xy+AcxY6hMnpnIz8Yy9B//CoEay5CvxW3nvzHwtbLy9kQyxkBNpjpqNNzA39
eCKogh2e0d9+Y+VDsN5vD4Zb0vXRTYOLr8JZEhD2VQQzBZH2jXB67B8+O/fzTuUyDtHHRAbzFz0H
SUEYFYtfl/sl/0Vwq9UBDTBIcnF6ZgOY702H133bzGsXCHYeNOmujmXzzz/cjd3qi6Z84J7SBizz
jjq/A1/TX5Kc1n0n1d5alc87jkRg+pUltioSgPaZ3zLIPrO1+LV3P1twLICJBkKtLity1gdFC1k+
FNgP/cYfp1t5kued8sHoO2Y9++wVBxiy1uOg617172pjA8p+NOMsmnf+VLl9BdZ9S+KY0P/JNpTg
nUL9z99P2w+X/V3fDXO3Z0+e38Be4W/4lL2yRGDOnQCQows2iqJh0+Xw5FZfT3UWWvkQ0dhQwpw6
aFryzLjkCzjdkY61lu/81QcCmd9mWL+Xrs8+Uz7L9PaSacAf0X+elm6PBw5d/ZAv/IjUYXr3a55J
pritG3i7bUW6f59fNdMPaaGa6nHAoRK7Nb/seyBPFIWw6NmxOQFJBtquDqmF+FvOeUIVK5VZe9Sg
pDbH8/2mAcy9W7TZFoCR0qMSFDfcE1/P/dP/8xWTfTlgvX3dlnq4xHDhD9KfMUx6YSg1eLw/FKod
3JlNg9UrYPWje+vyYrOswxJG3l2ja55k99dTgqZtJfQsfjEQG07n1Ie8XfPhNRlcWr/UT800As1u
BDR7PE7QeRY+PU2OmowzF1Xq4gfwecelvVBrqgNPnVPSfwAAAP//nFpJ0rOwkrzL2/pFmMEgaclk
ZhAzeAcY2+ABzCBAEX33Dn//tld9AakkZWWWqtJmBtlhPfWgwFXIh3/zQZr4aoSwm/nEu9vWuPXc
u/6bx+HzoxF//R4oQXl6l+SMEXZYTp4CqEcoJ5JeD/H091/Oz/WHmO84VikYkzv8z58r4H/++/9w
FLD/t6MgSDgZywTfwf42hx7CAFrYX8Rp3E0J2PDTZkdirivbrZ0zvEVhQSbJXmUeLxyyOKhuxsFH
T8BXtL/zAcztasOKf8sBc78nNTz40Ub86ttQbnV6DYHUH4mTuyVYGxBO4KCfGpw+1WUcBMfXYKhX
rj8KsUtZl4u/ULoyRxwtDxlwuccrsKkqjxhnc3RGfb0GsBDeHjkL2kxpowgRWAMYEveWjvFWNDUH
3dPXImrDj/GsMK0Cx9uOsaNT02H6Q2BDf3k2+HYLOHXnb/cvbNsix0miad3cJ9YJZYEr4tu13Cqq
b62G9ux1walPJ3VCuiAB/3j/4tt+VwEbfdgaJtIyYWWK4m46bd0bqtv5QBze2rp19iofGplLsNM/
gnE199MJJXvIY2lXDLB2zuMJPmFBiR0fZ5XSz9JANhAF7FTMp+JyrrdR7InGsqqt6rDvXwctt5fD
AkcaVux47w8oToWS+JeD1FFTcN7Ad42J4F6JuvUhvxuUHtTEXxt47+YDgl+Ys1glthppKn9FoIc3
n3OWLa4vlD0Z1wya+Fr6G206QFqXmxD7YM8+jPhXvH4ywEGnnHWS2rcUrNdCPKDQ225YO8gYbJGd
RAjuzMO/D08Z7PFLvcPPhUQkr0RAif99lej7ihyiq1vr7OnnU0MwY0TszDIr7iK4+l98WFnMU7xd
urKE9as5+XA36pg7Ft0ByDtrYW/bdbofNvyG+CsR4vPXF6DbXYPw8tQiEpnlqm6cthWoavuENNRv
6TJGHx00T0Ukf/Hy1sGy4ei3HDbaqRv/rVdf7QLLYH5TGgTMARHnK/r03KZgqlPFhy+7DbEEw9tI
LJ7bYYeYD75dHD/meeZ8B8MdyAtzv/LVJNanBYb9e/NplETOLnwUE52scSdnoXiqK6O8VjFqtwHf
0kAe+adQPIGjFB9suecj/Vqtp8GsYN4kNVMRrGqdLdA9ew5uLPcZM6DqTpClekN0kbk7tBIVESq1
wRAnNKizt8niw/PnOxPDMW/jYPWfFV0HTsGWfZFj5tCdC7B8p4Xcsrmm7Jc/rmK7UUS8cIVg/Hqh
izzFVUjT1yCmB9I2aDy/EMGL63W7M0k++kBfw7HFuSp/DgIbGZlPcBCdQ5UmNfZhO9yu2PoM3bgG
dhuhZ3N947CxlnhPehjA5bss2AoumcMnteEi1+N4nEVJoE5IevfoI08SjtWrR1eLe3z/8Iy9MuYc
0rwCCfaeGeDbXq3d2oia+C+/zGWVKLvnZvLr2N3IWX9azvye9wQMuQqIV1zDeL/5lzvQIFvhhDYy
ZXbhWILffj7Dz0+w/eVb0ScMzsMHovSZzhJUrt9yYa9EHfkwmRegipnmP6Tv4ExOvblwMZmMFKHB
OrugZm/IpHVPcl2A41rRk48maE/+nB6PYFNTRhFmzTn64CrgiolzK4PS6Rti06a2Qz5dcUIM//Ww
Cax43NZ13WHxMSqf3+ymotZBNtGlvF/8U4QugDtsxlv8u38DwBdY2nJ9IzY/HrHx23/rub4HNyEJ
cBmykTrTUtDgFD0Z//TMFcqXMSih+KAzVh6vm7NT9+jCP/4PG8uPt6kxEgh7VOFwS0y6fevVRT1X
QKxf5OdIPfseoPJkYlyPdIu3LH238Ic3Yrdtpa78tChimlxrnJdXaWRipm3Fx5JdiG5tHJ2X2ZSg
vtMNe4NdxlSbtwRF49SSAGYRmFlZ+iIzpyIxZhx2Ez4/J7QluoLdSmbVaT31EZSu3NE/BfQAdrp+
V7GfKSYm7+t0DtvjCSyXtSXZ6XtWOQP8HBLC08M38TCA1X9GC3q8oe63S+2Pe7a1b6SxN5MYwpyP
pFG2CLIYTuRy+Obj+pd/cHuZC6U0r+a/81L53BBHeb7UJU+sAtVqGBPDe4cOV92qn4PkmxA92eVx
Zd5hAkjaSuT6BHw8Ij+4IyztGrk2nymeRAbfYUKZKw7oyR2pVV6fwHB7zhetUYm5utNKFDdMjGVP
rQAdrL6B9/YJcfzjq3d572skd2aGiwgJYFF9t4XKIb5g53pQRjKYVQ/B3NkE/86/3RLGRVesrDiQ
Y4YOP31DVVgAcl4fzEipXrfA4E2Aq8t3irfaixZ0bI/Lj38klZPkV4N++CXn1Mid9XEzehg9M/zH
p2B72/oKg9m846pijIrt1yyAmSGWpBFaj/L1Uaqh4k4+9tG2gllZbl+YurVFfvxabZO02LC+mgW2
R/vTrUaPVpjt5Q2fl9odGV/6mFBRaYqlh2yOXK2vEsqTt0aSWQrGjfsUNqjGSPJBxRgxO5bNG3qd
6ZPbcOKrBe1Jgd7pkOIwHPtqm/W1hctlb4lXWD/87qcCds4BY896ew6Xe0fpZHGHDMvxw3Mouw1P
SL+HjniGCbqtuA1fYPfMgi+fZ1DtwzwlIu1ilchevjvjgXxrWIWLSYy67TtKnVJDz9yIfXRwLh2X
fj4NvFix5PNGpDvTLvAFVK59SW6MoDjcZzRL8BzeZ6wam6Zy3jLZkNpS7tNiV0eamQ8benvHYM3L
V2f9VFuJZDPVMNawEjNIFxTRzawcN8/3g65Zdy8QSISGaCwvqbwRMTqc+ZONz3l2jrdJeptIKwcO
ly14jjR9aDvcs8+F/OGJGT9qD2VQs0RKbMGh4elXwY8vkZQ9EKpv7vES0g6ljEP+so5vU6I2Ms0y
ITKawbgwmZjBeSb8MvZvlVKtBzusXdcluZUvzvbHD4k0TdgQdVllygHp8C73K6m7Qe54ZpokGJVX
i3ivRAJU9w4t7ByIiTacf44ent//8ncZIizSVcoUBUxy35NSlEdAjZAEsItRu1BGKbvhqIgR+NWb
/i4FUszcH48I0S5UsRGvrspuS27Cr7NE/rBWd+ebzgIDH0tywddYzAGlPQigGVw+GFepVdH9Zbdw
Mudl2Qz1o667ZDF/+kIumtRVaxQHb9jzfYqjr+d0HBPCHfzqkyU9Wun4jRCxgSfBgag/PWTDlhdB
6NGbz6mb4nD7OPvwEr7upMCK5/TdImX/3rOwqa1u7pbp0OtsH2tzolNWb2MdziS4YnW4ueNEMrlF
wi1KiXqV391Irg8Fzbxo+4JrU2fyVGmFrNhsWHftVF1a3j0ICKmFvxpXQveL4GrQA82MrVspjcwd
oS8k6V0iRXWn6myOVglsHLDEGCR/3NN54wB7ub9Iqj8Hdd2FYwHaO7sRD5amw7T6UxFPSF+JrAmu
un6E6AmTAeXEPqSKwx0u647ELolxthojoF3IBJAy93p57W9YbcQvFfi8Hy5Eq2OjG/l8M9EqSWdc
31In5g6X0w6/ibIvkHnNYGKUeYURnVnsnfOvSuxbZcJZs47EnrsX2FD+suHSlphIjNs5VJSFGo5C
ZhI/zBh1S3xPgmU6XIl3cl7OppXbE7mn3sJB0+XqxhR5jaTPdP45LpRxrxqfgbR7BqTmslu13qpG
/MOvz+dH4myr5/lQ3nlrIS9kj7xqDAyEOuUWKn8slTtwZxu+vYwl59hQ4r2EDIMyHMw4dj7X7gkE
T4e7oDvEwyND96vdr/B3/zi1byzY5HUL0E9/sPI69N2moSADW3ftfd6q5YpOc5ZBIeuHBW0zHVc8
BBHk8WrgIs7UeGto66It3tkF+rgAq1LMECxmZxHr81zjZ9a0DEwrnixLX4NqDpR9Rz/+w7b3xuqq
NGsCtXLkfvG3VbdcThEMEkYmiigyHb2kpg/MxyAtyHrPzj99EM7L1T8oZd0xoMx1CPtjhc3L16/4
CCoK/MOLu2Y2pUca/ybwVUukuazinSs7HXaqmhG538i4ZEHwRbaeQv/TFF71q8d3NHWdRDRJ+Dpb
OCgBvBbPwd8fr5u6Fp+LDXPF+F8AAAD//0ycSw+ySpeF5+dXfPmm5kRuUkXPuIMgVQiomHQ6gIqC
yrUKqKT/ewff052eMuFS1FprP3tDRPqXu8Rz0TFJ2eoJpT64OUB8xZqt+JXxIa8u8HvO2Ch3UDfn
Aa/53Ji2n40rQ/dcYOtUltUMYpiCp7k8scnuGhBMer+DiFEePZkXVMvJj22ggUnH+Wzu2bzVl0h5
tUuH7W9YGMvblGRwK8GeQC8ZjOnV6pMCPoX50+NKXPMhLCNpj8Y70HvBeyscxEtXUMT2tC8vr7yG
QRckFK/+Ov6pt4hdY1QYTT+pWOwgXi4Y649S9Jmu1yGQcpTiQ243MRlkPwUBKEZsZBXvj/wrqoET
yJj6l3I2qGGfVHgSNoiqSaVXA9twG6Wfrpia+6LK2U+v61l643SbnqulW56yIk6OjKCQR9V84p9n
ReFbiyYdcCtxWGJJfgHVwOqmDMCsX1RB2RvPlIi+u60G+9Bm0M+oTVgJWbwU9RVBJ1HOSGxVVAlH
VRWUVl47oK4FWJsP6qCoYiZRR6yKWOCO3ATBxxXRFlWjT2LtAiEGoYtzPlUN0T3XHIwPpydWv5ri
s+haRfCnV/a5+eRkCzwCaSHFOFS3m4pxXhAC4j731P24XD7DdAeBtG8XIkyOzxio/Az+8p57OYb+
5Hh4ktk1V8i5fzz8xe9YBp9BGmCtXJu4232ygRN6rBPUnhkz6fR4gd/7YHxPcz7thbYBvPu54iDd
A59eES9ArXu12Fz5xtS/kA1/+pBeqdZzw7A/gEFJLeyap/RXn6eQCt6WHhwhq6aYnQiMg1qmgdWr
8dRVkaSsfkEf1WNnkG+vZso36g4EvttzLARYcOEdnq5kLuK2Hy5PtOykqpTw/ha+K4ZcuwFSvRzp
/ny/Maaq7hm4i3QlU9Z0+cBezQdObTghucW0n9f1gcW7kLChoW1FCnvSoVjmDtmSQ9DPaveR4U4S
ECHX98WfIUgHqJ90gr3Rg3EHj3oBFxTtqKvaPJj5V/RRxIc/4iB91RWjOz4Flj088dVTL+z98dAE
zFrYo9nulIrd36kOx3EUKcLHwWBPnCbQ5gwPyfpmZF/tGUXQKsMM36+bsl/wdY7kMosKUnIHw+Cu
QTmAfX2r8c9/uE7lPLD7yjoNoHavZv52UqFHdxx9mB42lksEa7ixLhJ2LNc35uXESuXWcjrGimv5
tCm34e4WRh4OJN/yhVXvwbq/cVYOQsz6owZhCT2OFAhLgG1DrlC6erPByGaf1V/FDGTkuKeFTF0g
rvlEXustqt33KGdjsgzKqxRnwtXz2I9ou4fg9B749YuTdz7H+MrBeuoPWJ3JXLU6eTRwrcep/Q2h
QcDBlGDz3le4iCrqL/stCcF5EncY22rMeC59FD/+QuDqv5P1DlT5ng8ZLsKxy8mxuCF4eJjhyj8e
YL5uP9OfvB+32S5erjvThi/1sFC1rZ+sMQ1mKlqEPHyQacPGVj5myje97fB+VEi1GOhQwgG6A7V9
MLMpHPe6vOUCBwe06fvlW29UqBdXjUarvjMUmAdFtHmEH+vzFj76JQTK4BywgSzqzzPXlvB0y050
Pzgta/P3Jvn5FUYgfuSDdpUXON41E2MD+GCh77SEKKFXbIUS549aYA3w6m4ZkVMUxqtfbsCa5/E5
bXJ/Ztc6UmTQLNhZeQMZzg8BHMhNoE49+/3Sp3Oh7A3hTX1XqeKZ19wGUjP6ok/LVDbvD3H24zX4
MDhfwIhw6EBm0iNW36FfsdS3TIgs/UP1iMiA+XVIQHLVfGzZYQS4kc4QGM8hpdZzF+fzjszdj0dQ
xCOxYgPmXaV6v1yqEeESz4O2wF3zIXtsqb5vMPc8cDLaxEeM6iDN5+etK2Fn2B/qtRhXvB5uCvi4
uD0OF/3LZhkJHLy8XyHFF9vOxeZl/tEnBNc89McfPp/3k+KrDfypxlGi2JG70OOoLhXDkbbAx0e8
k83LuBntJRtMGBzfDb56Vu/P+f4JAbtSD8nlcI5FKX9KMJchI9M79PvpkNcZ7OlN/+lbThxFT5RJ
1a01T5B85T8JGID4IKC7nHKyoIMO0/uDUPw8bfxJDI8NUCf1tx5GvOy+ngervNex3ddRPgepXICV
p5FXv1GM+RMeXSA6bk4vb0tjwnvzKaDpKQZ1Pdb5i1ofEJR68YJtY9YNQcfHBJo9wKTeaBRMU0wl
OA9Wgi9p/O5XvpmBH9/0lRcFLE+lDRiUzKJaLegM7CAQYOLMC1Uja/YXdFZLpREcStExd/KxvO4m
cGWzhT7JolXLIPsZ1JYnpM7sbfIJqdSFjf/tsB3e+5iZfvSBq39RY8O9jHH1X/jjBWs9AkRXTjPQ
CehCkSLU/vThn4JC/dNIXdKP1XSLlRQCXuzwvtM1JtwabQN2KNnjgus0n5hHPoLJ89KTJT9/89kU
rQ307HaPdm0rxdN+uoRyfw/a33rEzSygDrZpnOD9QxP973V5ZvD52djUaQDI5y82E+l7dD5/6hOm
l9Zd5p+iRSSBv8XzykNBLGUH7Cs3WM2oqA7KHV6u6FnfdjF5npiq8OV3QCUGPCMnPzdhl18tJBy8
k88pdXRQsm35wl5vcTErvwDCh8g6tIuq2VjWfAm64XSkh9x244l3+gT8+Mch107+6scm7EtSEB6g
BqzPi1P4uZIwAunST7OAGkhjJaOm5Y4G+eXBMAcOUsaqYmt+PoOVj2BbLvdsdp2bAHvq89g5772c
R7KZgkqpMno4jxtG09Dr4I9HuF3R/VlvkAbPF/WrjeiTtV8D00vWUKce0nzVMuG3P7E1PZN+eRd4
gCs/JBy7ooqfQ2UDoYw4atCayycdHksADrKP+J0ZMHHVGwV87iY9VO2zX/mspPi+GqPOfzqGcFRd
DurjJcDr+9HPO7JrQCLvKmxQqetZfG84yKJExMVXMitR5ILXH76g6XlWddZ54eDKa2nKmz2YpMTL
gMLDiDpO9PEp4UQOou2rQ4uuzYCI6tLB01nX6aGqST7vjuFdscjIkAK1TT/+8j6/dq+BRzhGd4+s
gccwjP7wKqYppAPLWfpQT6zk/Hc/wK/rA9ZAw6pfPvrpBfaBec/HzKxT0FXjGxvCsQErn5ekybF8
ihvHYnNlyDbgDhzAmDhZLDqXXQPda1JSc2MbPp9M/QKTq+ETpsURm7u8yqBYkgQHKx9c+dcC134a
RcM0Vy1/u+k/3kYP4ejFi6syF3o44qn2kLdgGFGpK7L71GhMay5mqBtTqIqpRM8Pe1j1YJ7gNXtd
sXblpT/1DihfMaXOazCqObhJOlQtHP/hQxx75hNUrVdJzc/rYbC7VhdQGD8vbPJVCwaPLzjoPnsV
OwqUeyJZEgKnZGmp/W3f/+jNz0/1LerYdIKmDkERzyTRKWHTJJUR6PwhwoXxvfST4zkTICzJ8SM/
tfG41tfwd32G/1V6dmmBDfcCPGPzldKcfWRKYNp0GgKuUuXLZN0IMOO4wiqThmqpZX4A1rbdUj0/
JkZ/1+o75OVixob/vfVjvMd3uDzKCOte7ftLUm2IXCT4Qb3guQ5QqmMHjYbdkWCq73w+iGWhKPWw
o/528ABz+Ez41ffYuuZBPM3wqEI7Tn3qLzLph8uGQnAZpIowDT36tuVjDtZ07V5zee13oip3oOPa
GTtBX+eL7xYdcK9GjP/4YVkWBWRRY9L0tqmraWMNBxgWh+TXX2QzafQ75O5mggvhvI1ZdO0jmCyx
iIN5jHv621/quRgI9Ro153nN7f653linBuMTNYOSXg9EWeuJZcxPKfhm6gPft99TT9a8ClY+h6a1
HuGPdlXC/o5btG0cCyz2iajg1z8UDriJ51sURoC8HJ36wcHOp0/qNpC/KFtsJyOKueVW6fDtlUd6
H6a5nx8JRDAbFYiEp8/FzbB/FcpLJsLqL6YxtLq7kXV/39DgqXY9q88zp9x2QYO4mNdy4SUeoJxi
08XHvC/Akl4WT8mdJ0/RqfPyee13AVrIMQ2i/Sf++f2vn0QzaDM2YWsYYI84g/zyeX96mhNMXtKR
/vyKq4iaKOh2sTBqeBM0RxSrSqTxN4ruQVUtsT4skIRiS+ZhgDF7WCcEbZDfsF5xYz5Xn1QAj/tr
g4BKypxi2kHoBBKmoT9EbAHBkSjcx/lSY93/xDMeJpBizqIIxNt8tO6vRNlvkpaeho8DRL20Cpjv
zRzR7qgy7j0NkYzDu0ht7esYi1qb6H8nCv7617/+8/fPgk9zu7/XwYDxPo9//9+owN/i38Mne7//
/NiADFl5//d//DOB8O+2bz7t+F9jU9+/wzpq8M+swb/HZsze///4X+up/vuv/wEAAP//AwCx1ZQX
MkEAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 872e3491aec7f96b-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 11 Apr 2024 21:52:43 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=86wbuh5pk243ju9trndw.208JsB0RgsMlJzzka.XRjo-1712872363-1.0.1.1-71FT_fqDV6k5c6MFLAYEY.4lV1w2FBMCehcjxocck04EUPwsZbbSJXjDEbgrvgUr454H4H3GHxke4Y1iKmN9eg;
path=/; expires=Thu, 11-Apr-24 22:22:43 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ld8W4oDgFAViXhWljDeLAQzCh21yCy.SBEJQBe4CuCY-1712872363901-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-3-small
openai-organization:
- langchain
openai-processing-ms:
- '22'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999989'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_4987e8aef364277a3f77fdcce5223997
status:
code: 200
message: OK
version: 1
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_async_client.py | import asyncio
import datetime
import uuid
import pytest
from pydantic import BaseModel
from langsmith import utils as ls_utils
from langsmith.async_client import AsyncClient
from langsmith.schemas import DataType, Run
@pytest.mark.asyncio
async def test_indexed_datasets():
class InputsSchema(BaseModel):
name: str # type: ignore[annotation-unchecked]
age: int # type: ignore[annotation-unchecked]
async with AsyncClient() as client:
# Create a new dataset
try:
dataset = await client.create_dataset(
"test_dataset_for_integration_tests_" + uuid.uuid4().hex,
inputs_schema_definition=InputsSchema.model_json_schema(),
)
example = await client.create_example(
inputs={"name": "Alice", "age": 30},
outputs={"hi": "hello"},
dataset_id=dataset.id,
)
await client.index_dataset(dataset_id=dataset.id)
async def check_similar_examples():
examples = await client.similar_examples(
{"name": "Alice", "age": 30}, dataset_id=dataset.id, limit=1
)
return len(examples) == 1
await wait_for(check_similar_examples, timeout=20)
examples = await client.similar_examples(
{"name": "Alice", "age": 30}, dataset_id=dataset.id, limit=1
)
assert examples[0].id == example.id
finally:
await client.delete_dataset(dataset_id=dataset.id)
# Helper function to wait for a condition
async def wait_for(condition, timeout=10):
start_time = asyncio.get_event_loop().time()
while True:
try:
if await condition():
return
except Exception:
if asyncio.get_event_loop().time() - start_time > timeout:
raise TimeoutError("Condition not met within the timeout period")
await asyncio.sleep(0.1)
@pytest.fixture
async def async_client():
ls_utils.get_env_var.cache_clear()
client = AsyncClient()
yield client
await client.aclose()
@pytest.mark.asyncio
async def test_create_run(async_client: AsyncClient):
project_name = "__test_create_run" + uuid.uuid4().hex[:8]
run_id = uuid.uuid4()
await async_client.create_run(
name="test_run",
inputs={"input": "hello"},
run_type="llm",
project_name=project_name,
id=run_id,
start_time=datetime.datetime.now(datetime.timezone.utc),
)
async def check_run():
try:
run = await async_client.read_run(run_id)
return run.name == "test_run"
except ls_utils.LangSmithError:
return False
await wait_for(check_run)
run = await async_client.read_run(run_id)
assert run.name == "test_run"
assert run.inputs == {"input": "hello"}
@pytest.mark.asyncio
async def test_list_runs(async_client: AsyncClient):
project_name = "__test_list_runs"
run_ids = [uuid.uuid4() for _ in range(3)]
meta_uid = str(uuid.uuid4())
for i, run_id in enumerate(run_ids):
await async_client.create_run(
name=f"test_run_{i}",
inputs={"input": f"hello_{i}"},
run_type="llm",
project_name=project_name,
id=run_id,
start_time=datetime.datetime.now(datetime.timezone.utc),
end_time=datetime.datetime.now(datetime.timezone.utc),
extra={"metadata": {"uid": meta_uid}},
)
filter_ = f'and(eq(metadata_key, "uid"), eq(metadata_value, "{meta_uid}"))'
async def check_runs():
runs = [
run
async for run in async_client.list_runs(
project_name=project_name, filter=filter_
)
]
return len(runs) == 3
await wait_for(check_runs)
runs = [
run
async for run in async_client.list_runs(
project_name=project_name, filter=filter_
)
]
assert len(runs) == 3
assert all(isinstance(run, Run) for run in runs)
@pytest.mark.asyncio
async def test_create_dataset(async_client: AsyncClient):
dataset_name = "__test_create_dataset" + uuid.uuid4().hex[:8]
dataset = await async_client.create_dataset(dataset_name, data_type=DataType.kv)
assert dataset.name == dataset_name
assert dataset.data_type == DataType.kv
await async_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.asyncio
async def test_create_example(async_client: AsyncClient):
dataset_name = "__test_create_example" + uuid.uuid4().hex[:8]
dataset = await async_client.create_dataset(dataset_name)
example = await async_client.create_example(
inputs={"input": "hello"}, outputs={"output": "world"}, dataset_id=dataset.id
)
assert example.inputs == {"input": "hello"}
assert example.outputs == {"output": "world"}
await async_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.asyncio
async def test_list_examples(async_client: AsyncClient):
dataset_name = "__test_list_examples" + uuid.uuid4().hex[:8]
dataset = await async_client.create_dataset(dataset_name)
for i in range(3):
await async_client.create_example(
inputs={"input": f"hello_{i}"},
outputs={"output": f"world_{i}"},
dataset_id=dataset.id,
)
examples = [
example async for example in async_client.list_examples(dataset_id=dataset.id)
]
assert len(examples) == 3
await async_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.asyncio
async def test_create_feedback(async_client: AsyncClient):
project_name = "__test_create_feedback" + uuid.uuid4().hex[:8]
run_id = uuid.uuid4()
await async_client.create_run(
name="test_run",
inputs={"input": "hello"},
run_type="llm",
project_name=project_name,
id=run_id,
start_time=datetime.datetime.now(datetime.timezone.utc),
)
feedback = await async_client.create_feedback(
run_id=run_id,
key="test_key",
score=0.9,
value="test_value",
comment="test_comment",
)
assert feedback.run_id == run_id
assert feedback.key == "test_key"
assert feedback.score == 0.9
assert feedback.value == "test_value"
assert feedback.comment == "test_comment"
token = await async_client.create_presigned_feedback_token(
run_id=run_id, feedback_key="test_presigned_key"
)
await async_client.create_feedback_from_token(
token.id, score=0.8, value="presigned_value", comment="presigned_comment"
)
await async_client.create_feedback_from_token(
str(token.url), score=0.9, value="presigned_value", comment="presigned_comment"
)
async def check_feedback():
feedbacks = [
feedback async for feedback in async_client.list_feedback(run_ids=[run_id])
]
return sum(feedback.key == "test_presigned_key" for feedback in feedbacks) == 2
await wait_for(check_feedback, timeout=10)
feedbacks = [
feedback async for feedback in async_client.list_feedback(run_ids=[run_id])
]
presigned_feedbacks = [f for f in feedbacks if f.key == "test_presigned_key"]
assert len(presigned_feedbacks) == 2
assert all(f.value == "presigned_value" for f in presigned_feedbacks)
assert len(presigned_feedbacks) == 2
for feedback in presigned_feedbacks:
assert feedback.value == "presigned_value"
assert feedback.comment == "presigned_comment"
assert feedback.score in {0.8, 0.9}
assert set(f.score for f in presigned_feedbacks) == {0.8, 0.9}
shared_run_url = await async_client.share_run(run_id)
run_is_shared = await async_client.run_is_shared(run_id)
assert run_is_shared, f"Run isn't shared; failed link: {shared_run_url}"
@pytest.mark.asyncio
async def test_list_feedback(async_client: AsyncClient):
project_name = "__test_list_feedback"
run_id = uuid.uuid4()
await async_client.create_run(
name="test_run",
inputs={"input": "hello"},
run_type="llm",
project_name=project_name,
id=run_id,
start_time=datetime.datetime.now(datetime.timezone.utc),
)
for i in range(3):
await async_client.create_feedback(
run_id=run_id,
key=f"test_key_{i}",
score=0.9,
value=f"test_value_{i}",
comment=f"test_comment_{i}",
)
async def check_feedbacks():
feedbacks = [
feedback async for feedback in async_client.list_feedback(run_ids=[run_id])
]
return len(feedbacks) == 3
await wait_for(check_feedbacks, timeout=10)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_experiment_manager.py | import uuid
from langsmith.client import Client
from langsmith.evaluation._runner import _ExperimentManager
def test_experiment_manager_existing_name():
client = Client()
dataset_name = f"Test Dups: {str(uuid.uuid4())}"
ds = client.create_dataset(dataset_name)
client.create_example(inputs={"un": "important"}, dataset_id=ds.id)
prefix = "Some Test Prefix"
try:
manager = _ExperimentManager(dataset_name, experiment=prefix, client=client)
assert manager is not None
original_name = manager._experiment_name
assert original_name.startswith(prefix)
client.create_project(original_name, reference_dataset_id=ds.id)
manager.start()
new_name = manager._experiment_name
assert new_name.startswith(prefix)
assert new_name != original_name
finally:
client.delete_dataset(dataset_id=ds.id)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_prompts.py | from typing import Literal
from uuid import uuid4
import pytest
from langchain_core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
PromptTemplate,
)
from langchain_core.runnables.base import RunnableSequence
import langsmith.schemas as ls_schemas
import langsmith.utils as ls_utils
from langsmith.client import (
Client,
convert_prompt_to_anthropic_format,
convert_prompt_to_openai_format,
)
@pytest.fixture
def langsmith_client() -> Client:
return Client(timeout_ms=(50_000, 90_000))
@pytest.fixture
def prompt_template_1() -> ChatPromptTemplate:
return ChatPromptTemplate.from_template("tell me a joke about {topic}")
@pytest.fixture
def prompt_template_2() -> ChatPromptTemplate:
return ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human", "{question}"),
]
)
@pytest.fixture
def prompt_template_3() -> PromptTemplate:
return PromptTemplate.from_template("Summarize the following text: {text}")
@pytest.fixture
def prompt_with_model() -> dict:
return {
"id": ["langsmith", "playground", "PromptPlayground"],
"lc": 1,
"type": "constructor",
"kwargs": {
"last": {
"id": ["langchain", "schema", "runnable", "RunnableBinding"],
"lc": 1,
"type": "constructor",
"kwargs": {
"bound": {
"id": ["langchain", "chat_models", "openai", "ChatOpenAI"],
"lc": 1,
"type": "constructor",
"kwargs": {
"openai_api_key": {
"id": ["OPENAI_API_KEY"],
"lc": 1,
"type": "secret",
}
},
},
"kwargs": {},
},
},
"first": {
"id": ["langchain", "prompts", "chat", "ChatPromptTemplate"],
"lc": 1,
"type": "constructor",
"kwargs": {
"messages": [
{
"id": [
"langchain",
"prompts",
"chat",
"SystemMessagePromptTemplate",
],
"lc": 1,
"type": "constructor",
"kwargs": {
"prompt": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"lc": 1,
"type": "constructor",
"kwargs": {
"template": "You are a chatbot.",
"input_variables": [],
"template_format": "f-string",
},
}
},
},
{
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate",
],
"lc": 1,
"type": "constructor",
"kwargs": {
"prompt": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate",
],
"lc": 1,
"type": "constructor",
"kwargs": {
"template": "{question}",
"input_variables": ["question"],
"template_format": "f-string",
},
}
},
},
],
"input_variables": ["question"],
},
},
},
}
@pytest.fixture
def chat_prompt_template():
return ChatPromptTemplate.from_messages(
[
("system", "You are a chatbot"),
("user", "{question}"),
]
)
def test_current_tenant_is_owner(langsmith_client: Client):
settings = langsmith_client._get_settings()
assert langsmith_client._current_tenant_is_owner(settings.tenant_handle or "-")
assert langsmith_client._current_tenant_is_owner("-")
assert not langsmith_client._current_tenant_is_owner("non_existent_owner")
def test_list_prompts(langsmith_client: Client):
response = langsmith_client.list_prompts(limit=10, offset=0)
assert isinstance(response, ls_schemas.ListPromptsResponse)
assert len(response.repos) <= 10
def test_get_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
url = langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
assert isinstance(url, str)
assert langsmith_client._prompt_exists(prompt_name)
prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(prompt, ls_schemas.Prompt)
assert prompt.repo_handle == prompt_name
langsmith_client.delete_prompt(prompt_name)
def test_prompt_exists(langsmith_client: Client, prompt_template_2: ChatPromptTemplate):
non_existent_prompt = f"non_existent_{uuid4().hex[:8]}"
assert not langsmith_client._prompt_exists(non_existent_prompt)
existent_prompt = f"existent_{uuid4().hex[:8]}"
assert langsmith_client.push_prompt(existent_prompt, object=prompt_template_2)
assert langsmith_client._prompt_exists(existent_prompt)
langsmith_client.delete_prompt(existent_prompt)
def test_update_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
updated_data = langsmith_client.update_prompt(
prompt_name,
description="Updated description",
is_public=True,
tags=["test", "update"],
)
assert isinstance(updated_data, dict)
updated_prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(updated_prompt, ls_schemas.Prompt)
assert updated_prompt.description == "Updated description"
assert updated_prompt.is_public
assert set(updated_prompt.tags) == set(["test", "update"])
langsmith_client.delete_prompt(prompt_name)
def test_delete_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
assert langsmith_client._prompt_exists(prompt_name)
langsmith_client.delete_prompt(prompt_name)
assert not langsmith_client._prompt_exists(prompt_name)
def test_pull_prompt_object(
langsmith_client: Client, prompt_template_1: ChatPromptTemplate
):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
manifest = langsmith_client.pull_prompt_commit(prompt_name)
assert isinstance(manifest, ls_schemas.PromptCommit)
assert manifest.repo == prompt_name
langsmith_client.delete_prompt(prompt_name)
def test_pull_prompt(langsmith_client: Client, prompt_template_1: ChatPromptTemplate):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
# test pulling with just prompt name
pulled_prompt = langsmith_client.pull_prompt(prompt_name)
assert isinstance(pulled_prompt, ChatPromptTemplate)
assert (
pulled_prompt.metadata and pulled_prompt.metadata["lc_hub_repo"] == prompt_name
)
# test pulling with private owner (-) and name
pulled_prompt_2 = langsmith_client.pull_prompt(f"-/{prompt_name}")
assert pulled_prompt == pulled_prompt_2
# test pulling with tenant handle and name
tenant_handle = langsmith_client._get_settings().tenant_handle
pulled_prompt_3 = langsmith_client.pull_prompt(f"{tenant_handle}/{prompt_name}")
assert pulled_prompt.metadata and pulled_prompt_3.metadata
assert (
pulled_prompt.metadata["lc_hub_commit_hash"]
== pulled_prompt_3.metadata["lc_hub_commit_hash"]
)
assert pulled_prompt_3.metadata["lc_hub_owner"] == tenant_handle
# test pulling with handle, name and commit hash
tenant_handle = langsmith_client._get_settings().tenant_handle
pulled_prompt_4 = langsmith_client.pull_prompt(
f"{tenant_handle}/{prompt_name}:latest"
)
assert pulled_prompt_3 == pulled_prompt_4
# test pulling without handle, with commit hash
assert pulled_prompt_4.metadata
pulled_prompt_5 = langsmith_client.pull_prompt(
f"{prompt_name}:{pulled_prompt_4.metadata['lc_hub_commit_hash']}"
)
assert pulled_prompt_5.metadata
assert (
pulled_prompt_4.metadata["lc_hub_commit_hash"]
== pulled_prompt_5.metadata["lc_hub_commit_hash"]
)
langsmith_client.delete_prompt(prompt_name)
def test_push_and_pull_prompt(
langsmith_client: Client, prompt_template_2: ChatPromptTemplate
):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
push_result = langsmith_client.push_prompt(prompt_name, object=prompt_template_2)
assert isinstance(push_result, str)
pulled_prompt = langsmith_client.pull_prompt(prompt_name)
assert isinstance(pulled_prompt, ChatPromptTemplate)
langsmith_client.delete_prompt(prompt_name)
# should fail
with pytest.raises(ls_utils.LangSmithUserError):
langsmith_client.push_prompt(
f"random_handle/{prompt_name}", object=prompt_template_2
)
def test_pull_prompt_include_model(langsmith_client: Client, prompt_with_model: dict):
prompt_name = f"test_prompt_with_model_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_with_model)
pulled_prompt = langsmith_client.pull_prompt(prompt_name, include_model=True)
assert isinstance(pulled_prompt, RunnableSequence)
if getattr(pulled_prompt, "first", None):
first = getattr(pulled_prompt, "first")
assert isinstance(first, BasePromptTemplate)
assert first.metadata and first.metadata["lc_hub_repo"] == prompt_name
else:
assert False, "pulled_prompt.first should exist, incorrect prompt format"
langsmith_client.delete_prompt(prompt_name)
def test_like_unlike_prompt(
langsmith_client: Client, prompt_template_1: ChatPromptTemplate
):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
langsmith_client.like_prompt(prompt_name)
prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(prompt, ls_schemas.Prompt)
assert prompt.num_likes == 1
langsmith_client.unlike_prompt(prompt_name)
prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(prompt, ls_schemas.Prompt)
assert prompt.num_likes == 0
langsmith_client.delete_prompt(prompt_name)
def test_get_latest_commit_hash(
langsmith_client: Client, prompt_template_1: ChatPromptTemplate
):
prompt_name = f"test_prompt_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
commit_hash = langsmith_client._get_latest_commit_hash(f"-/{prompt_name}")
assert isinstance(commit_hash, str)
assert len(commit_hash) > 0
langsmith_client.delete_prompt(prompt_name)
def test_create_prompt(langsmith_client: Client):
prompt_name = f"test_create_prompt_{uuid4().hex[:8]}"
created_prompt = langsmith_client.create_prompt(
prompt_name,
description="Test description",
readme="Test readme",
tags=["test", "create"],
is_public=False,
)
assert isinstance(created_prompt, ls_schemas.Prompt)
assert created_prompt.repo_handle == prompt_name
assert created_prompt.description == "Test description"
assert created_prompt.readme == "Test readme"
assert set(created_prompt.tags) == set(["test", "create"])
assert not created_prompt.is_public
langsmith_client.delete_prompt(prompt_name)
def test_create_commit(
langsmith_client: Client,
prompt_template_2: ChatPromptTemplate,
prompt_template_3: PromptTemplate,
):
prompt_name = f"test_create_commit_{uuid4().hex[:8]}"
try:
# this should fail because the prompt does not exist
commit_url = langsmith_client.create_commit(
prompt_name, object=prompt_template_2
)
pytest.fail("Expected LangSmithNotFoundError was not raised")
except ls_utils.LangSmithNotFoundError as e:
assert str(e) == "Prompt does not exist, you must create it first."
except Exception as e:
pytest.fail(f"Unexpected exception raised: {e}")
langsmith_client.push_prompt(prompt_name, object=prompt_template_3)
commit_url = langsmith_client.create_commit(prompt_name, object=prompt_template_2)
assert isinstance(commit_url, str)
assert prompt_name in commit_url
prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(prompt, ls_schemas.Prompt)
assert prompt.num_commits == 2
# try submitting different types of unaccepted manifests
try:
# this should fail
commit_url = langsmith_client.create_commit(prompt_name, object={"hi": "hello"})
except ls_utils.LangSmithError as e:
err = str(e)
assert "Manifest must have an id field" in err
assert "400 Client Error" in err
except Exception as e:
pytest.fail(f"Unexpected exception raised: {e}")
try:
# this should fail
commit_url = langsmith_client.create_commit(prompt_name, object={"id": ["hi"]})
except ls_utils.LangSmithError as e:
err = str(e)
assert "Manifest type hi is not supported" in err
assert "400 Client Error" in err
except Exception as e:
pytest.fail(f"Unexpected exception raised: {e}")
langsmith_client.delete_prompt(prompt_name)
def test_push_prompt(
langsmith_client: Client,
prompt_template_3: PromptTemplate,
prompt_template_2: ChatPromptTemplate,
):
prompt_name = f"test_push_new_{uuid4().hex[:8]}"
url = langsmith_client.push_prompt(
prompt_name,
object=prompt_template_3,
is_public=True,
description="New prompt",
tags=["new", "test"],
)
assert isinstance(url, str)
assert prompt_name in url
prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(prompt, ls_schemas.Prompt)
assert prompt.is_public
assert prompt.description == "New prompt"
assert "new" in prompt.tags
assert "test" in prompt.tags
assert prompt.num_commits == 1
# test updating prompt metadata but not manifest
url = langsmith_client.push_prompt(
prompt_name,
is_public=False,
description="Updated prompt",
)
updated_prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(updated_prompt, ls_schemas.Prompt)
assert updated_prompt.description == "Updated prompt"
assert not updated_prompt.is_public
assert updated_prompt.num_commits == 1
# test updating prompt manifest but not metadata
url = langsmith_client.push_prompt(
prompt_name,
object=prompt_template_2,
)
assert isinstance(url, str)
langsmith_client.delete_prompt(prompt_name)
@pytest.mark.parametrize("is_public,expected_count", [(True, 1), (False, 1)])
def test_list_prompts_filter(
langsmith_client: Client,
prompt_template_1: ChatPromptTemplate,
is_public: bool,
expected_count: int,
):
prompt_name = f"test_list_filter_{uuid4().hex[:8]}"
langsmith_client.push_prompt(
prompt_name, object=prompt_template_1, is_public=is_public
)
response = langsmith_client.list_prompts(is_public=is_public, query=prompt_name)
assert response.total == expected_count
if expected_count > 0:
assert response.repos[0].repo_handle == prompt_name
langsmith_client.delete_prompt(prompt_name)
def test_update_prompt_archive(
langsmith_client: Client, prompt_template_1: ChatPromptTemplate
):
prompt_name = f"test_archive_{uuid4().hex[:8]}"
langsmith_client.push_prompt(prompt_name, object=prompt_template_1)
langsmith_client.update_prompt(prompt_name, is_archived=True)
archived_prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(archived_prompt, ls_schemas.Prompt)
assert archived_prompt.is_archived
langsmith_client.update_prompt(prompt_name, is_archived=False)
unarchived_prompt = langsmith_client.get_prompt(prompt_name)
assert isinstance(unarchived_prompt, ls_schemas.Prompt)
assert not unarchived_prompt.is_archived
langsmith_client.delete_prompt(prompt_name)
@pytest.mark.parametrize(
"sort_field, sort_direction",
[
(ls_schemas.PromptSortField.updated_at, "desc"),
],
)
def test_list_prompts_sorting(
langsmith_client: Client,
prompt_template_1: ChatPromptTemplate,
sort_field: ls_schemas.PromptSortField,
sort_direction: Literal["asc", "desc"],
):
prompt_names = [f"test_sort_{i}_{uuid4().hex[:8]}" for i in range(3)]
for name in prompt_names:
langsmith_client.push_prompt(name, object=prompt_template_1)
response = langsmith_client.list_prompts(
sort_field=sort_field, sort_direction=sort_direction, limit=10
)
assert len(response.repos) >= 3
sorted_names = [
repo.repo_handle for repo in response.repos if repo.repo_handle in prompt_names
]
assert sorted_names == sorted(sorted_names, reverse=(sort_direction == "desc"))
for name in prompt_names:
langsmith_client.delete_prompt(name)
def test_convert_to_openai_format(chat_prompt_template: ChatPromptTemplate):
invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"})
res = convert_prompt_to_openai_format(
invoked,
)
expected = {
"messages": [
{"content": "You are a chatbot", "role": "system"},
{"content": "What is the meaning of life?", "role": "user"},
],
"model": "gpt-3.5-turbo",
"stream": False,
"n": 1,
"temperature": 0.7,
}
assert {k: res[k] for k in expected.keys()} == expected
def test_convert_to_anthropic_format(chat_prompt_template: ChatPromptTemplate):
invoked = chat_prompt_template.invoke({"question": "What is the meaning of life?"})
res = convert_prompt_to_anthropic_format(invoked, {"model_name": "claude-2"})
assert res == {
"model": "claude-2",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "What is the meaning of life?"}],
"system": "You are a chatbot",
}
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_client.py | """LangSmith langchain_client Integration Tests."""
import datetime
import io
import logging
import os
import random
import string
import sys
import time
import uuid
from datetime import timedelta
from typing import Any, Callable, Dict
from unittest import mock
from uuid import uuid4
import pytest
from freezegun import freeze_time
from pydantic import BaseModel
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from langsmith.client import ID_TYPE, Client
from langsmith.schemas import DataType
from langsmith.utils import (
LangSmithConnectionError,
LangSmithError,
get_env_var,
)
logger = logging.getLogger(__name__)
def wait_for(
condition: Callable[[], bool], max_sleep_time: int = 120, sleep_time: int = 3
):
"""Wait for a condition to be true."""
start_time = time.time()
while time.time() - start_time < max_sleep_time:
try:
if condition():
return
except Exception:
time.sleep(sleep_time)
total_time = time.time() - start_time
raise ValueError(f"Callable did not return within {total_time}")
@pytest.fixture
def langchain_client() -> Client:
get_env_var.cache_clear()
return Client()
def test_datasets(langchain_client: Client) -> None:
"""Test datasets."""
csv_content = "col1,col2\nval1,val2"
blob_data = io.BytesIO(csv_content.encode("utf-8"))
description = "Test Dataset"
input_keys = ["col1"]
output_keys = ["col2"]
filename = "".join(random.sample(string.ascii_lowercase, 10)) + ".csv"
new_dataset = langchain_client.upload_csv(
csv_file=(filename, blob_data),
description=description,
input_keys=input_keys,
output_keys=output_keys,
)
assert new_dataset.id is not None
assert new_dataset.description == description
dataset = langchain_client.read_dataset(dataset_id=new_dataset.id)
dataset_id = dataset.id
dataset2 = langchain_client.read_dataset(dataset_id=dataset_id)
assert dataset.id == dataset2.id
datasets = list(langchain_client.list_datasets())
assert len(datasets) > 0
assert dataset_id in [dataset.id for dataset in datasets]
# Test Example CRD
example = langchain_client.create_example(
inputs={"col1": "addedExampleCol1"},
outputs={"col2": "addedExampleCol2"},
dataset_id=new_dataset.id,
)
example_value = langchain_client.read_example(example.id)
assert example_value.inputs is not None
assert example_value.inputs["col1"] == "addedExampleCol1"
assert example_value.outputs is not None
assert example_value.outputs["col2"] == "addedExampleCol2"
examples = list(
langchain_client.list_examples(dataset_id=new_dataset.id) # type: ignore
)
assert len(examples) == 2
assert example.id in [example.id for example in examples]
langchain_client.update_example(
example_id=example.id,
inputs={"col1": "updatedExampleCol1"},
outputs={"col2": "updatedExampleCol2"},
metadata={"foo": "bar"},
)
updated_example = langchain_client.read_example(example.id)
assert updated_example.id == example.id
updated_example_value = langchain_client.read_example(updated_example.id)
assert updated_example_value.inputs["col1"] == "updatedExampleCol1"
assert updated_example_value.outputs is not None
assert updated_example_value.outputs["col2"] == "updatedExampleCol2"
assert (updated_example_value.metadata or {}).get("foo") == "bar"
new_example = langchain_client.create_example(
inputs={"col1": "newAddedExampleCol1"},
outputs={"col2": "newAddedExampleCol2"},
dataset_id=new_dataset.id,
)
example_value = langchain_client.read_example(new_example.id)
assert example_value.inputs is not None
assert example_value.inputs["col1"] == "newAddedExampleCol1"
assert example_value.outputs is not None
assert example_value.outputs["col2"] == "newAddedExampleCol2"
langchain_client.update_examples(
example_ids=[new_example.id, example.id],
inputs=[{"col1": "newUpdatedExampleCol1"}, {"col1": "newNewUpdatedExampleCol"}],
outputs=[
{"col2": "newUpdatedExampleCol2"},
{"col2": "newNewUpdatedExampleCol2"},
],
metadata=[{"foo": "baz"}, {"foo": "qux"}],
)
updated_example = langchain_client.read_example(new_example.id)
assert updated_example.id == new_example.id
assert updated_example.inputs["col1"] == "newUpdatedExampleCol1"
assert updated_example.outputs is not None
assert updated_example.outputs["col2"] == "newUpdatedExampleCol2"
assert (updated_example.metadata or {}).get("foo") == "baz"
updated_example = langchain_client.read_example(example.id)
assert updated_example.id == example.id
assert updated_example.inputs["col1"] == "newNewUpdatedExampleCol"
assert updated_example.outputs is not None
assert updated_example.outputs["col2"] == "newNewUpdatedExampleCol2"
assert (updated_example.metadata or {}).get("foo") == "qux"
langchain_client.delete_example(example.id)
examples2 = list(
langchain_client.list_examples(dataset_id=new_dataset.id) # type: ignore
)
assert len(examples2) == 2
langchain_client.delete_dataset(dataset_id=dataset_id)
def test_list_examples(langchain_client: Client) -> None:
"""Test list_examples."""
examples = [
("Shut up, idiot", "Toxic", ["train", "validation"]),
("You're a wonderful person", "Not toxic", "test"),
("This is the worst thing ever", "Toxic", ["train"]),
("I had a great day today", "Not toxic", "test"),
("Nobody likes you", "Toxic", "train"),
("This is unacceptable. I want to speak to the manager.", "Not toxic", None),
]
dataset_name = "__test_list_examples" + uuid4().hex[:4]
dataset = langchain_client.create_dataset(dataset_name=dataset_name)
inputs, outputs, splits = zip(
*[({"text": text}, {"label": label}, split) for text, label, split in examples]
)
langchain_client.create_examples(
inputs=inputs, outputs=outputs, splits=splits, dataset_id=dataset.id
)
example_list = list(langchain_client.list_examples(dataset_id=dataset.id))
assert len(example_list) == len(examples)
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, offset=1, limit=2)
)
assert len(example_list) == 2
example_list = list(langchain_client.list_examples(dataset_id=dataset.id, offset=1))
assert len(example_list) == len(examples) - 1
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["train"])
)
assert len(example_list) == 3
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["validation"])
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["test"])
)
assert len(example_list) == 2
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["train", "test"])
)
assert len(example_list) == 5
langchain_client.update_example(
example_id=[
example.id
for example in example_list
if example.metadata is not None
and "test" in example.metadata.get("dataset_split", [])
][0],
split="train",
)
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["test"])
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, splits=["train"])
)
assert len(example_list) == 4
langchain_client.create_example(
inputs={"text": "What's up!"},
outputs={"label": "Not toxic"},
metadata={"foo": "bar", "baz": "qux"},
dataset_name=dataset_name,
)
example_list = list(langchain_client.list_examples(dataset_id=dataset.id))
assert len(example_list) == len(examples) + 1
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, metadata={"foo": "bar"})
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(dataset_id=dataset.id, metadata={"baz": "qux"})
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(
dataset_id=dataset.id, metadata={"foo": "bar", "baz": "qux"}
)
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(
dataset_id=dataset.id, metadata={"foo": "bar", "baz": "quux"}
)
)
assert len(example_list) == 0
example_list = list(
langchain_client.list_examples(
dataset_id=dataset.id, filter='exists(metadata, "baz")'
)
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(
dataset_id=dataset.id, filter='has("metadata", \'{"foo": "bar"}\')'
)
)
assert len(example_list) == 1
example_list = list(
langchain_client.list_examples(
dataset_id=dataset.id, filter='exists(metadata, "bazzz")'
)
)
assert len(example_list) == 0
langchain_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.slow
def test_similar_examples(langchain_client: Client) -> None:
inputs = [{"text": "how are you"}, {"text": "good bye"}, {"text": "see ya later"}]
outputs = [
{"response": "good how are you"},
{"response": "ta ta"},
{"response": "tootles"},
]
dataset_name = "__test_similar_examples" + uuid4().hex[:4]
dataset = langchain_client.create_dataset(
dataset_name=dataset_name,
inputs_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"text": {"type": "string"},
},
"required": ["text"],
"additionalProperties": False,
},
outputs_schema={
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"response": {"type": "string"},
},
"required": ["response"],
"additionalProperties": False,
},
)
langchain_client.create_examples(
inputs=inputs, outputs=outputs, dataset_id=dataset.id
)
langchain_client.index_dataset(dataset_id=dataset.id)
# Need to wait for indexing to finish.
time.sleep(5)
similar_list = langchain_client.similar_examples(
{"text": "howdy"}, limit=2, dataset_id=dataset.id
)
assert len(similar_list) == 2
langchain_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.skip(reason="This test is flaky")
def test_persist_update_run(langchain_client: Client) -> None:
"""Test the persist and update methods work as expected."""
project_name = "__test_persist_update_run" + uuid4().hex[:4]
if langchain_client.has_project(project_name):
langchain_client.delete_project(project_name=project_name)
try:
start_time = datetime.datetime.now()
revision_id = uuid4()
run: dict = dict(
id=uuid4(),
name="test_run",
run_type="llm",
inputs={"text": "hello world"},
project_name=project_name,
api_url=os.getenv("LANGCHAIN_ENDPOINT"),
start_time=start_time,
extra={"extra": "extra"},
revision_id=revision_id,
)
langchain_client.create_run(**run)
run["outputs"] = {"output": ["Hi"]}
run["extra"]["foo"] = "bar"
run["name"] = "test_run_updated"
langchain_client.update_run(run["id"], **run)
wait_for(lambda: langchain_client.read_run(run["id"]).end_time is not None)
stored_run = langchain_client.read_run(run["id"])
assert stored_run.name == run["name"]
assert stored_run.id == run["id"]
assert stored_run.outputs == run["outputs"]
assert stored_run.start_time == run["start_time"]
assert stored_run.revision_id == str(revision_id)
finally:
langchain_client.delete_project(project_name=project_name)
@pytest.mark.parametrize("uri", ["http://localhost:1981", "http://api.langchain.minus"])
def test_error_surfaced_invalid_uri(uri: str) -> None:
get_env_var.cache_clear()
client = Client(api_url=uri, api_key="test")
# expect connect error
with pytest.raises(LangSmithConnectionError):
client.create_run("My Run", inputs={"text": "hello world"}, run_type="llm")
def test_create_dataset(langchain_client: Client) -> None:
dataset_name = "__test_create_dataset" + uuid4().hex[:4]
if langchain_client.has_dataset(dataset_name=dataset_name):
langchain_client.delete_dataset(dataset_name=dataset_name)
dataset = langchain_client.create_dataset(dataset_name, data_type=DataType.llm)
ground_truth = "bcde"
example = langchain_client.create_example(
inputs={"input": "hello world"},
outputs={"output": ground_truth},
dataset_id=dataset.id,
)
initial_version = example.modified_at
loaded_dataset = langchain_client.read_dataset(dataset_name=dataset_name)
assert loaded_dataset.data_type == DataType.llm
example_2 = langchain_client.create_example(
inputs={"input": "hello world 2"},
outputs={"output": "fghi"},
dataset_id=dataset.id,
)
langchain_client.update_example(
example_id=example.id,
inputs={"input": "hello world"},
outputs={"output": "bcde"},
)
initial_examples = list(
langchain_client.list_examples(dataset_id=dataset.id, as_of=initial_version)
)
assert len(initial_examples) == 1
latest_examples = list(langchain_client.list_examples(dataset_id=dataset.id))
assert len(latest_examples) == 2
latest_tagged_examples = list(
langchain_client.list_examples(dataset_id=dataset.id, as_of="latest")
)
assert len(latest_tagged_examples) == 2
assert latest_tagged_examples == latest_examples
diffs = langchain_client.diff_dataset_versions(
loaded_dataset.id, from_version=initial_version, to_version="latest"
)
assert diffs.examples_added == [example_2.id]
assert diffs.examples_removed == []
assert diffs.examples_modified == [example.id]
langchain_client.delete_dataset(dataset_id=dataset.id)
def test_dataset_schema_validation(langchain_client: Client) -> None:
dataset_name = "__test_create_dataset" + uuid4().hex[:4]
if langchain_client.has_dataset(dataset_name=dataset_name):
langchain_client.delete_dataset(dataset_name=dataset_name)
class InputSchema(BaseModel):
input: str
class OutputSchema(BaseModel):
output: str
dataset = langchain_client.create_dataset(
dataset_name,
data_type=DataType.kv,
inputs_schema=InputSchema.model_json_schema(),
outputs_schema=OutputSchema.model_json_schema(),
)
# confirm we store the schema from the create request
assert dataset.inputs_schema == InputSchema.model_json_schema()
assert dataset.outputs_schema == OutputSchema.model_json_schema()
# create an example that matches the schema, which should succeed
langchain_client.create_example(
inputs={"input": "hello world"},
outputs={"output": "hello"},
dataset_id=dataset.id,
)
# create an example that does not match the input schema
with pytest.raises(LangSmithError):
langchain_client.create_example(
inputs={"john": 1},
outputs={"output": "hello"},
dataset_id=dataset.id,
)
# create an example that does not match the output schema
with pytest.raises(LangSmithError):
langchain_client.create_example(
inputs={"input": "hello world"},
outputs={"john": 1},
dataset_id=dataset.id,
)
# assert read API includes the schema definition
read_dataset = langchain_client.read_dataset(dataset_id=dataset.id)
assert read_dataset.inputs_schema == InputSchema.model_json_schema()
assert read_dataset.outputs_schema == OutputSchema.model_json_schema()
langchain_client.delete_dataset(dataset_id=dataset.id)
@freeze_time("2023-01-01")
def test_list_datasets(langchain_client: Client) -> None:
ds1n = "__test_list_datasets1" + uuid4().hex[:4]
ds2n = "__test_list_datasets2" + uuid4().hex[:4]
try:
dataset1 = langchain_client.create_dataset(
ds1n, data_type=DataType.llm, metadata={"foo": "barqux"}
)
dataset2 = langchain_client.create_dataset(ds2n, data_type=DataType.kv)
assert dataset1.url is not None
assert dataset2.url is not None
datasets = list(
langchain_client.list_datasets(dataset_ids=[dataset1.id, dataset2.id])
)
assert len(datasets) == 2
assert dataset1.id in [dataset.id for dataset in datasets]
assert dataset2.id in [dataset.id for dataset in datasets]
assert dataset1.data_type == DataType.llm
assert dataset2.data_type == DataType.kv
# Sub-filter on data type
datasets = list(langchain_client.list_datasets(data_type=DataType.llm.value))
assert len(datasets) > 0
assert dataset1.id in {dataset.id for dataset in datasets}
# Sub-filter on name
datasets = list(
langchain_client.list_datasets(
dataset_ids=[dataset1.id, dataset2.id], dataset_name=ds1n
)
)
assert len(datasets) == 1
# Sub-filter on metadata
datasets = list(
langchain_client.list_datasets(
dataset_ids=[dataset1.id, dataset2.id], metadata={"foo": "barqux"}
)
)
assert len(datasets) == 1
finally:
# Delete datasets
for name in [ds1n, ds2n]:
try:
langchain_client.delete_dataset(dataset_name=name)
except LangSmithError:
pass
@pytest.mark.skip(reason="This test is flaky")
def test_create_run_with_masked_inputs_outputs(
langchain_client: Client, monkeypatch: pytest.MonkeyPatch
) -> None:
project_name = "__test_create_run_with_masked_inputs_outputs" + uuid4().hex[:4]
monkeypatch.setenv("LANGCHAIN_HIDE_INPUTS", "true")
monkeypatch.setenv("LANGCHAIN_HIDE_OUTPUTS", "true")
if langchain_client.has_project(project_name):
langchain_client.delete_project(project_name=project_name)
try:
run_id = uuid4()
langchain_client.create_run(
id=run_id,
project_name=project_name,
name="test_run",
run_type="llm",
inputs={"prompt": "hello world"},
outputs={"generation": "hi there"},
start_time=datetime.datetime.now(datetime.timezone.utc),
end_time=datetime.datetime.now(datetime.timezone.utc),
hide_inputs=True,
hide_outputs=True,
)
run_id2 = uuid4()
langchain_client.create_run(
id=run_id2,
project_name=project_name,
name="test_run_2",
run_type="llm",
inputs={"messages": "hello world 2"},
start_time=datetime.datetime.now(datetime.timezone.utc),
hide_inputs=True,
)
langchain_client.update_run(
run_id2,
outputs={"generation": "hi there 2"},
end_time=datetime.datetime.now(datetime.timezone.utc),
hide_outputs=True,
)
wait_for(lambda: langchain_client.read_run(run_id).end_time is not None)
stored_run = langchain_client.read_run(run_id)
assert "hello" not in str(stored_run.inputs)
assert stored_run.outputs is not None
assert "hi" not in str(stored_run.outputs)
wait_for(lambda: langchain_client.read_run(run_id2).end_time is not None)
stored_run2 = langchain_client.read_run(run_id2)
assert "hello" not in str(stored_run2.inputs)
assert stored_run2.outputs is not None
assert "hi" not in str(stored_run2.outputs)
finally:
langchain_client.delete_project(project_name=project_name)
@freeze_time("2023-01-01")
def test_create_chat_example(
monkeypatch: pytest.MonkeyPatch, langchain_client: Client
) -> None:
from langchain.schema import FunctionMessage, HumanMessage
dataset_name = "__createChatExample-test-dataset"
try:
existing_dataset = langchain_client.read_dataset(dataset_name=dataset_name)
langchain_client.delete_dataset(dataset_id=existing_dataset.id)
except LangSmithError:
# If the dataset doesn't exist,
pass
dataset = langchain_client.create_dataset(dataset_name)
input = [HumanMessage(content="Hello, world!")]
generation = FunctionMessage(
name="foo",
content="",
additional_kwargs={"function_call": {"arguments": "args", "name": "foo"}},
)
# Create the example from messages
langchain_client.create_chat_example(input, generation, dataset_id=dataset.id)
# Read the example
examples = []
for example in langchain_client.list_examples(dataset_id=dataset.id):
examples.append(example)
assert len(examples) == 1
assert examples[0].inputs == {
"input": [
{
"type": "human",
"data": {"content": "Hello, world!"},
},
],
}
assert examples[0].outputs == {
"output": {
"type": "function",
"data": {
"content": "",
"additional_kwargs": {
"function_call": {"arguments": "args", "name": "foo"}
},
},
},
}
langchain_client.delete_dataset(dataset_id=dataset.id)
@pytest.mark.parametrize("use_multipart_endpoint", [True, False])
def test_batch_ingest_runs(
langchain_client: Client, use_multipart_endpoint: bool
) -> None:
_session = "__test_batch_ingest_runs"
trace_id = uuid4()
trace_id_2 = uuid4()
run_id_2 = uuid4()
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S%fZ"
)
later_time = (
datetime.datetime.now(datetime.timezone.utc) + timedelta(seconds=1)
).strftime("%Y%m%dT%H%M%S%fZ")
"""
Here we create:
- run 1: a top level trace with inputs and outputs
- run 3: a top level trace with an error with inputs and outputs
- run 2: a child of run 1 with inputs, no outputs
and we update:
- run 2 (the child): to add outputs
"""
runs_to_create = [
{
"id": str(trace_id),
"session_name": _session,
"name": "run 1",
"run_type": "chain",
"dotted_order": f"{current_time}{str(trace_id)}",
"trace_id": str(trace_id),
"inputs": {"input1": 1, "input2": 2},
"outputs": {"output1": 3, "output2": 4},
},
{
"id": str(trace_id_2),
"session_name": _session,
"name": "run 3",
"run_type": "chain",
"dotted_order": f"{current_time}{str(trace_id_2)}",
"trace_id": str(trace_id_2),
"inputs": {"input1": 1, "input2": 2},
"error": "error",
},
{
"id": str(run_id_2),
"session_name": _session,
"name": "run 2",
"run_type": "chain",
"dotted_order": f"{current_time}{str(trace_id)}."
f"{later_time}{str(run_id_2)}",
"trace_id": str(trace_id),
"parent_run_id": str(trace_id),
"inputs": {"input1": 5, "input2": 6},
},
]
runs_to_update = [
{
"id": str(run_id_2),
"dotted_order": f"{current_time}{str(trace_id)}."
f"{later_time}{str(run_id_2)}",
"trace_id": str(trace_id),
"parent_run_id": str(trace_id),
"outputs": {"output1": 4, "output2": 5},
},
]
if use_multipart_endpoint:
langchain_client.multipart_ingest(create=runs_to_create, update=runs_to_update)
else:
langchain_client.batch_ingest_runs(create=runs_to_create, update=runs_to_update)
runs = []
wait = 4
for _ in range(15):
try:
runs = list(
langchain_client.list_runs(
project_name=_session,
run_ids=[str(trace_id), str(run_id_2), str(trace_id_2)],
)
)
if len(runs) == 3:
break
raise LangSmithError("Runs not created yet")
except LangSmithError:
time.sleep(wait)
wait += 1
else:
raise ValueError("Runs not created in time")
assert len(runs) == 3
# Write all the assertions here
assert len(runs) == 3
# Assert inputs and outputs of run 1
run1 = next(run for run in runs if run.id == trace_id)
assert run1.inputs == {"input1": 1, "input2": 2}
assert run1.outputs == {"output1": 3, "output2": 4}
# Assert inputs and outputs of run 2
run2 = next(run for run in runs if run.id == run_id_2)
assert run2.inputs == {"input1": 5, "input2": 6}
assert run2.outputs == {"output1": 4, "output2": 5}
# Assert inputs and outputs of run 3
run3 = next(run for run in runs if run.id == trace_id_2)
assert run3.inputs == {"input1": 1, "input2": 2}
assert run3.error == "error"
def test_multipart_ingest_empty(
langchain_client: Client, caplog: pytest.LogCaptureFixture
) -> None:
runs_to_create: list[dict] = []
runs_to_update: list[dict] = []
# make sure no warnings logged
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=runs_to_create, update=runs_to_update)
assert not caplog.records
def test_multipart_ingest_create_then_update(
langchain_client: Client, caplog: pytest.LogCaptureFixture
) -> None:
_session = "__test_multipart_ingest_create_then_update"
trace_a_id = uuid4()
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S%fZ"
)
runs_to_create: list[dict] = [
{
"id": str(trace_a_id),
"session_name": _session,
"name": "trace a root",
"run_type": "chain",
"dotted_order": f"{current_time}{str(trace_a_id)}",
"trace_id": str(trace_a_id),
"inputs": {"input1": 1, "input2": 2},
}
]
# make sure no warnings logged
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=runs_to_create, update=[])
assert not caplog.records
runs_to_update: list[dict] = [
{
"id": str(trace_a_id),
"dotted_order": f"{current_time}{str(trace_a_id)}",
"trace_id": str(trace_a_id),
"outputs": {"output1": 3, "output2": 4},
}
]
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=[], update=runs_to_update)
assert not caplog.records
def test_multipart_ingest_update_then_create(
langchain_client: Client, caplog: pytest.LogCaptureFixture
) -> None:
_session = "__test_multipart_ingest_update_then_create"
trace_a_id = uuid4()
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S%fZ"
)
runs_to_update: list[dict] = [
{
"id": str(trace_a_id),
"dotted_order": f"{current_time}{str(trace_a_id)}",
"trace_id": str(trace_a_id),
"outputs": {"output1": 3, "output2": 4},
}
]
# make sure no warnings logged
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=[], update=runs_to_update)
assert not caplog.records
runs_to_create: list[dict] = [
{
"id": str(trace_a_id),
"session_name": _session,
"name": "trace a root",
"run_type": "chain",
"dotted_order": f"{current_time}{str(trace_a_id)}",
"trace_id": str(trace_a_id),
"inputs": {"input1": 1, "input2": 2},
}
]
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=runs_to_create, update=[])
assert not caplog.records
def test_multipart_ingest_create_wrong_type(
langchain_client: Client, caplog: pytest.LogCaptureFixture
) -> None:
_session = "__test_multipart_ingest_create_then_update"
trace_a_id = uuid4()
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S%fZ"
)
runs_to_create: list[dict] = [
{
"id": str(trace_a_id),
"session_name": _session,
"name": "trace a root",
"run_type": "agent",
"dotted_order": f"{current_time}{str(trace_a_id)}",
"trace_id": str(trace_a_id),
"inputs": {"input1": 1, "input2": 2},
}
]
# make sure no warnings logged
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
langchain_client.multipart_ingest(create=runs_to_create, update=[])
# this should 422
assert len(caplog.records) == 1, "Should get 1 warning for 422, not retried"
assert all("422" in record.message for record in caplog.records)
@freeze_time("2023-01-01")
def test_get_info() -> None:
langchain_client = Client(api_key="not-a-real-key")
info = langchain_client.info
assert info
assert info.version is not None # type: ignore
assert info.batch_ingest_config is not None # type: ignore
assert info.batch_ingest_config["size_limit"] > 0 # type: ignore
@pytest.mark.skip(reason="This test is flaky")
@pytest.mark.parametrize("add_metadata", [True, False])
@pytest.mark.parametrize("do_batching", [True, False])
def test_update_run_extra(add_metadata: bool, do_batching: bool) -> None:
langchain_client = Client()
run_id = uuid4()
run: Dict[str, Any] = {
"id": run_id,
"name": "run 1",
"start_time": datetime.datetime.now(datetime.timezone.utc),
"run_type": "chain",
"inputs": {"input1": 1, "input2": 2},
"outputs": {"output1": 3, "output2": 4},
"extra": {
"metadata": {
"foo": "bar",
}
},
"tags": ["tag1", "tag2"],
}
if do_batching:
run["trace_id"] = run_id
dotted_order = run["start_time"].strftime("%Y%m%dT%H%M%S%fZ") + str(run_id) # type: ignore
run["dotted_order"] = dotted_order
revision_id = uuid4()
langchain_client.create_run(**run, revision_id=revision_id) # type: ignore
def _get_run(run_id: ID_TYPE, has_end: bool = False) -> bool:
try:
r = langchain_client.read_run(run_id) # type: ignore
if has_end:
return r.end_time is not None
return True
except LangSmithError:
return False
wait_for(lambda: _get_run(run_id))
created_run = langchain_client.read_run(run_id)
assert created_run.metadata["foo"] == "bar"
assert created_run.metadata["revision_id"] == str(revision_id)
# Update the run
if add_metadata:
run["extra"]["metadata"]["foo2"] = "baz" # type: ignore
run["tags"] = ["tag3"]
langchain_client.update_run(run_id, **run) # type: ignore
wait_for(lambda: _get_run(run_id, has_end=True))
updated_run = langchain_client.read_run(run_id)
assert updated_run.metadata["foo"] == "bar" # type: ignore
assert updated_run.revision_id == str(revision_id)
if add_metadata:
updated_run.metadata["foo2"] == "baz" # type: ignore
assert updated_run.tags == ["tag3"]
else:
assert updated_run.tags == ["tag1", "tag2"]
assert updated_run.extra["runtime"] == created_run.extra["runtime"] # type: ignore
def test_surrogates():
chars = "".join(chr(cp) for cp in range(0, sys.maxunicode + 1))
trans_table = str.maketrans("", "", "")
all_chars = chars.translate(trans_table)
langchain_client = Client()
langchain_client.create_run(
name="test_run",
inputs={
"text": [
"Hello\ud83d\ude00",
"Python\ud83d\udc0d",
"Surrogate\ud834\udd1e",
"Example\ud83c\udf89",
"String\ud83c\udfa7",
"With\ud83c\udf08",
"Surrogates\ud83d\ude0e",
"Embedded\ud83d\udcbb",
"In\ud83c\udf0e",
"The\ud83d\udcd6",
"Text\ud83d\udcac",
"收花🙄·到",
]
},
run_type="llm",
end_time=datetime.datetime.now(datetime.timezone.utc),
)
langchain_client.create_run(
name="test_run",
inputs={
"text": all_chars,
},
run_type="llm",
end_time=datetime.datetime.now(datetime.timezone.utc),
)
def test_runs_stats():
langchain_client = Client()
# We always have stuff in the "default" project...
stats = langchain_client.get_run_stats(project_names=["default"], run_type="llm")
assert stats
def test_slow_run_read_multipart(
langchain_client: Client, caplog: pytest.LogCaptureFixture
):
myobj = {f"key_{i}": f"val_{i}" for i in range(500)}
id_ = str(uuid.uuid4())
current_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S%fZ"
)
run_to_create = {
"id": id_,
"session_name": "default",
"name": "trace a root",
"run_type": "chain",
"dotted_order": f"{current_time}{id_}",
"trace_id": id_,
"inputs": myobj,
}
class CB:
def __init__(self):
self.called = 0
self.start_time = None
def __call__(self, monitor: MultipartEncoderMonitor):
self.called += 1
if not self.start_time:
self.start_time = time.time()
logger.debug(
f"[{self.called}]: {monitor.bytes_read} bytes,"
f" {time.time() - self.start_time:.2f} seconds"
" elapsed",
)
if self.called == 1:
time.sleep(6)
def create_encoder(*args, **kwargs):
encoder = MultipartEncoder(*args, **kwargs)
encoder = MultipartEncoderMonitor(encoder, CB())
return encoder
with caplog.at_level(logging.WARNING, logger="langsmith.client"):
with mock.patch(
"langsmith.client.rqtb_multipart.MultipartEncoder", create_encoder
):
langchain_client.create_run(**run_to_create)
time.sleep(1)
start_time = time.time()
while time.time() - start_time < 8:
myobj["key_1"]
assert not caplog.records
def test_examples_length_validation(langchain_client: Client) -> None:
"""Test that mismatched lengths raise ValueError for create and update examples."""
dataset_name = "__test_examples_length_validation" + uuid4().hex[:4]
dataset = langchain_client.create_dataset(dataset_name=dataset_name)
# Test create_examples validation
inputs = [{"text": "hello"}, {"text": "world"}]
outputs = [{"response": "hi"}] # One less than inputs
with pytest.raises(ValueError) as exc_info:
langchain_client.create_examples(
inputs=inputs, outputs=outputs, dataset_id=dataset.id
)
assert "Length of outputs (1) does not match length of inputs (2)" in str(
exc_info.value
)
# Create some valid examples for testing update
langchain_client.create_examples(
inputs=[{"text": "hello"}, {"text": "world"}],
outputs=[{"response": "hi"}, {"response": "earth"}],
dataset_id=dataset.id,
)
example_ids = [
example.id for example in langchain_client.list_examples(dataset_id=dataset.id)
]
# Test update_examples validation
with pytest.raises(ValueError) as exc_info:
langchain_client.update_examples(
example_ids=example_ids,
inputs=[{"text": "new hello"}], # One less than example_ids
outputs=[{"response": "new hi"}, {"response": "new earth"}],
)
assert "Length of inputs (1) does not match length of examples (2)" in str(
exc_info.value
)
# Clean up
langchain_client.delete_dataset(dataset_id=dataset.id)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_llm_evaluator.py | import pytest
from langsmith import Client, aevaluate, evaluate
from langsmith.evaluation.llm_evaluator import (
CategoricalScoreConfig,
ContinuousScoreConfig,
LLMEvaluator,
)
def test_llm_evaluator_init() -> None:
evaluator = LLMEvaluator(
prompt_template="Is the response vague? Y/N\n{input}",
score_config=CategoricalScoreConfig(
key="vagueness",
choices=["Y", "N"],
description="Whether the response is vague. Y for yes, N for no.",
include_explanation=True,
),
)
assert evaluator is not None
assert evaluator.prompt.input_variables == ["input"]
assert evaluator.score_schema == {
"title": "vagueness",
"description": "Whether the response is vague. Y for yes, N for no.",
"type": "object",
"properties": {
"score": {
"type": "string",
"enum": ["Y", "N"],
"description": "The score for the evaluation, one of Y, N.",
},
"explanation": {
"type": "string",
"description": "The explanation for the score.",
},
},
"required": ["score", "explanation"],
}
# Try a continuous score
evaluator = LLMEvaluator(
prompt_template="Rate the response from 0 to 1.\n{input}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
)
assert evaluator is not None
assert evaluator.prompt.input_variables == ["input"]
assert evaluator.score_schema == {
"title": "rating",
"description": "The rating of the response, from 0 to 1.",
"type": "object",
"properties": {
"score": {
"type": "number",
"minimum": 0,
"maximum": 1,
"description": "The score for the evaluation, "
"between 0 and 1, inclusive.",
},
},
"required": ["score"],
}
# Test invalid model
with pytest.raises(ValueError):
LLMEvaluator(
prompt_template="Rate the response from 0 to 1.\n{input}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
model_provider="invalid",
)
evaluator = LLMEvaluator(
prompt_template="Rate the response from 0 to 1.\n{input} {output} {expected}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
)
assert evaluator is not None
assert set(evaluator.prompt.input_variables) == {"input", "output", "expected"}
with pytest.raises(ValueError):
# Test invalid input variable without map_variables
LLMEvaluator(
prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
)
evaluator = LLMEvaluator(
prompt_template="Rate the response from 0 to 1.\n{input} {output} {hello}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
map_variables=lambda run, example: {"hello": "world"},
)
assert evaluator is not None
assert set(evaluator.prompt.input_variables) == {"input", "output", "hello"}
def test_from_model() -> None:
from langchain_openai import ChatOpenAI
evaluator = LLMEvaluator.from_model(
ChatOpenAI(),
prompt_template="Rate the response from 0 to 1.\n{input}",
score_config=ContinuousScoreConfig(
key="rating",
description="The rating of the response, from 0 to 1.",
include_explanation=False,
),
)
assert evaluator is not None
assert evaluator.prompt.input_variables == ["input"]
assert evaluator.score_schema == {
"title": "rating",
"description": "The rating of the response, from 0 to 1.",
"type": "object",
"properties": {
"score": {
"type": "number",
"minimum": 0,
"maximum": 1,
"description": "The score for the evaluation, "
"between 0 and 1, inclusive.",
},
},
"required": ["score"],
}
async def test_evaluate() -> None:
client = Client()
client.clone_public_dataset(
"https://beta.smith.langchain.com/public/06785303-0f70-4466-b637-f23d38c0f28e/d"
)
dataset_name = "Evaluate Examples"
def predict(inputs: dict) -> dict:
return {"answer": "Yes"}
async def apredict(inputs: dict) -> dict:
return {"answer": "Yes"}
reference_accuracy = LLMEvaluator(
prompt_template="Is the output accurate with respect to the expected output? "
"Y/N\nOutput: {output}\nExpected: {expected}",
score_config=CategoricalScoreConfig(
key="reference_accuracy",
choices=["Y", "N"],
description="Whether the output is accurate with respect to "
"the expected output.",
include_explanation=False,
),
)
accuracy = LLMEvaluator(
prompt_template=[
(
"system",
"Is the output accurate with respect to the context and "
"question? Y/N",
),
("human", "Context: {context}\nQuestion: {question}\nOutput: {output}"),
],
score_config=CategoricalScoreConfig(
key="accuracy",
choices=["Y", "N"],
description="Whether the output is accurate with respect to "
"the context and question.",
include_explanation=True,
),
map_variables=lambda run, example: {
"context": example.inputs.get("context", "") if example else "",
"question": example.inputs.get("question", "") if example else "",
"output": run.outputs.get("output", "") if run.outputs else "",
},
model_provider="anthropic",
model_name="claude-3-haiku-20240307",
)
results = evaluate(
predict,
data=dataset_name,
evaluators=[reference_accuracy, accuracy],
experiment_prefix=__name__ + "::test_evaluate.evaluate",
)
results.wait()
await aevaluate(
apredict,
data=dataset_name,
evaluators=[reference_accuracy, accuracy],
experiment_prefix=__name__ + "::test_evaluate.aevaluate",
)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_context_propagation.py | import asyncio
import pytest
from httpx import AsyncClient
from uvicorn import Config, Server
from langsmith import traceable
from langsmith.run_helpers import get_current_run_tree
from tests.integration_tests.fake_server import fake_app
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
async def fake_server():
config = Config(app=fake_app, loop="asyncio", port=8000, log_level="info")
server = Server(config=config)
asyncio.create_task(server.serve())
await asyncio.sleep(0.1)
yield
try:
await server.shutdown()
except RuntimeError:
pass
@traceable
async def the_parent_function():
async with AsyncClient(app=fake_app, base_url="http://localhost:8000") as client:
headers = {}
if span := get_current_run_tree():
headers.update(span.to_headers())
response = await client.post("/fake-route", headers=headers)
assert response.status_code == 200
return response.json()
@traceable
async def the_root_function(foo: str):
return await the_parent_function()
@pytest.mark.asyncio
async def test_tracing_fake_server(fake_server):
result = await the_root_function(
"test input",
langsmith_extra={
"metadata": {"some-cool-value": 42},
"tags": ["did-propagate"],
"project_name": "distributed-tracing",
},
)
assert result["message"] == "Fake route response"
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/conftest.py | import pytest
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
|
0 | lc_public_repos/langsmith-sdk/python/tests | lc_public_repos/langsmith-sdk/python/tests/integration_tests/test_runs.py | import asyncio
import time
import uuid
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from typing import AsyncGenerator, Generator, Optional, Sequence
import pytest # type: ignore
from langsmith import utils as ls_utils
from langsmith.client import Client
from langsmith.run_helpers import trace, traceable
from langsmith.run_trees import RunTree
@pytest.fixture
def langchain_client() -> Generator[Client, None, None]:
yield Client()
def poll_runs_until_count(
langchain_client: Client,
project_name: str,
count: int,
max_retries: int = 10,
sleep_time: int = 2,
require_success: bool = True,
filter_: Optional[str] = None,
):
retries = 0
while retries < max_retries:
try:
runs = list(
langchain_client.list_runs(project_name=project_name, filter=filter_)
)
if len(runs) == count:
if not require_success or all(
[run.status == "success" for run in runs]
):
return runs
except ls_utils.LangSmithError:
pass
time.sleep(sleep_time)
retries += 1
raise AssertionError(f"Failed to get {count} runs after {max_retries} attempts.")
def test_nested_runs(
langchain_client: Client,
):
project_name = "__My Tracer Project - test_nested_runs"
run_meta = uuid.uuid4().hex
@traceable(run_type="chain")
def my_run(text: str):
my_llm_run(text)
return text
@traceable(run_type="llm")
def my_llm_run(text: str):
return f"Completed: {text}"
@traceable(run_type="chain", tags=["foo", "bar"]) # type: ignore
def my_chain_run(text: str):
return my_run(text)
my_chain_run(
"foo",
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
for _ in range(15):
try:
runs = list(
langchain_client.list_runs(
project_name=project_name,
filter=f"and(eq(metadata_key,'test_run'),eq(metadata_value,'{run_meta}'))",
)
)
assert len(runs) == 3
break
except (ls_utils.LangSmithError, AssertionError):
time.sleep(1)
else:
raise AssertionError("Failed to get runs after 15 attempts.")
assert len(runs) == 3
runs_dict = {run.name: run for run in runs}
assert runs_dict["my_chain_run"].parent_run_id is None
assert runs_dict["my_chain_run"].run_type == "chain"
assert runs_dict["my_chain_run"].tags == ["foo", "bar"]
assert runs_dict["my_run"].parent_run_id == runs_dict["my_chain_run"].id
assert runs_dict["my_run"].run_type == "chain"
assert runs_dict["my_llm_run"].parent_run_id == runs_dict["my_run"].id
assert runs_dict["my_llm_run"].run_type == "llm"
assert runs_dict["my_llm_run"].inputs == {"text": "foo"}
async def test_list_runs_multi_project(langchain_client: Client):
project_names = [
"__My Tracer Project - test_list_runs_multi_project",
"__My Tracer Project - test_list_runs_multi_project2",
]
@traceable(run_type="chain")
async def my_run(text: str):
return "Completed: " + text
run_meta = uuid.uuid4().hex
for project_name in project_names:
await my_run(
"foo",
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))'
poll_runs_until_count(langchain_client, project_names[0], 1, filter_=filter_)
runs = list(
langchain_client.list_runs(
project_name=project_names,
filter=filter_,
)
)
assert len(runs) == 2
assert all([run.outputs["output"] == "Completed: foo" for run in runs]) # type: ignore
assert runs[0].session_id != runs[1].session_id
async def test_nested_async_runs(langchain_client: Client):
"""Test nested runs with a mix of async and sync functions."""
project_name = "__My Tracer Project - test_nested_async_runs"
executor = ThreadPoolExecutor(max_workers=1)
@traceable(run_type="chain")
async def my_run(text: str):
await my_llm_run(text)
my_sync_tool(text, my_arg=20)
return text
@traceable(run_type="llm")
async def my_llm_run(text: str):
# The function needn't accept a run
await asyncio.sleep(0.2)
return f"Completed: {text}"
@traceable(run_type="tool")
def my_sync_tool(text: str, *, my_arg: int = 10):
return f"Completed: {text} {my_arg}"
@traceable(run_type="chain") # type: ignore
async def my_chain_run(text: str):
return await my_run(text)
meta = uuid.uuid4().hex
await my_chain_run(
"foo",
langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}),
)
executor.shutdown(wait=True)
_filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))'
poll_runs_until_count(langchain_client, project_name, 4, filter_=_filter)
runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter))
assert len(runs) == 4
runs_dict = {run.name: run for run in runs}
assert runs_dict["my_chain_run"].parent_run_id is None
assert runs_dict["my_chain_run"].run_type == "chain"
assert runs_dict["my_run"].parent_run_id == runs_dict["my_chain_run"].id
assert runs_dict["my_run"].run_type == "chain"
assert runs_dict["my_llm_run"].parent_run_id == runs_dict["my_run"].id
assert runs_dict["my_llm_run"].run_type == "llm"
assert runs_dict["my_llm_run"].inputs == {"text": "foo"}
assert runs_dict["my_sync_tool"].parent_run_id == runs_dict["my_run"].id
assert runs_dict["my_sync_tool"].run_type == "tool"
assert runs_dict["my_sync_tool"].inputs == {
"text": "foo",
"my_arg": 20,
}
async def test_nested_async_runs_with_threadpool(langchain_client: Client):
"""Test nested runs with a mix of async and sync functions."""
project_name = "__My Tracer Project - test_nested_async_runs_with_threadpol"
@traceable(run_type="llm")
async def async_llm(text: str):
return f"Baby LLM: {text}"
@traceable(run_type="llm")
def my_llm_run(text: str):
# The function needn't accept a run
return f"Completed: {text}"
@traceable(run_type="tool")
def my_tool_run(text: str):
val = asyncio.run(async_llm(text))
return f"Completed: {text} - val: {val}"
@traceable(run_type="chain")
def my_run(text: str, *, run_tree: Optional[RunTree] = None):
llm_run_result = my_llm_run(text)
thread_pool = ThreadPoolExecutor(max_workers=1)
for i in range(3):
thread_pool.submit(
my_tool_run,
f"Child Tool {i}",
langsmith_extra={
"run_tree": run_tree,
"metadata": getattr(run_tree, "metadata", {}),
},
)
thread_pool.shutdown(wait=True)
return llm_run_result
executor = ThreadPoolExecutor(max_workers=1)
@traceable(run_type="chain")
async def my_chain_run(text: str, run_tree: RunTree):
thread_pool = ThreadPoolExecutor(max_workers=3)
for i in range(2):
thread_pool.submit(
my_run,
f"Child {i}",
langsmith_extra=dict(run_tree=run_tree, metadata=run_tree.metadata),
)
thread_pool.shutdown(wait=True)
return text
meta = uuid.uuid4().hex
await my_chain_run(
"foo",
langsmith_extra=dict(project_name=project_name, metadata={"test_run": meta}),
)
executor.shutdown(wait=True)
filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))'
poll_runs_until_count(langchain_client, project_name, 17, filter_=filter_)
runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_))
trace_runs = list(
langchain_client.list_runs(
trace_id=runs[0].trace_id, project_name=project_name, filter=filter_
)
)
assert len(trace_runs) == 17
assert len(runs) == 17
assert sum([run.run_type == "llm" for run in runs]) == 8
assert sum([run.name == "async_llm" for run in runs]) == 6
assert sum([run.name == "my_llm_run" for run in runs]) == 2
assert sum([run.run_type == "tool" for run in runs]) == 6
assert sum([run.run_type == "chain" for run in runs]) == 3
# sort by dotted_order
runs = sorted(runs, key=lambda run: run.dotted_order)
trace_runs = sorted(trace_runs, key=lambda run: run.dotted_order)
assert runs == trace_runs
# Check that all instances of async_llm have a parent with
# the same name (my_tool_run)
name_to_ids_map = defaultdict(list)
for run in runs:
name_to_ids_map[run.name].append(run.id)
for run in runs:
if run.name == "async_llm":
assert run.parent_run_id in name_to_ids_map["my_tool_run"]
if run.name == "my_tool_run":
assert run.parent_run_id in name_to_ids_map["my_run"]
if run.name == "my_llm_run":
assert run.parent_run_id in name_to_ids_map["my_run"]
if run.name == "my_run":
assert run.parent_run_id in name_to_ids_map["my_chain_run"]
if run.name == "my_chain_run":
assert run.parent_run_id is None
async def test_context_manager(langchain_client: Client) -> None:
project_name = "__My Tracer Project - test_context_manager"
@traceable(run_type="llm")
async def my_llm(prompt: str) -> str:
return f"LLM {prompt}"
meta = uuid.uuid4().hex
with trace(
"my_context", "chain", project_name=project_name, metadata={"test_run": meta}
) as run_tree:
await my_llm("foo")
with trace("my_context2", "chain", run_tree=run_tree) as run_tree2:
runs = [my_llm("baz"), my_llm("qux")]
with trace("my_context3", "chain", run_tree=run_tree2):
await my_llm("quux")
await my_llm("corge")
await asyncio.gather(*runs)
run_tree.end(outputs={"End val": "my_context2"})
_filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{meta}"))'
poll_runs_until_count(langchain_client, project_name, 8, filter_=_filter)
runs_ = list(langchain_client.list_runs(project_name=project_name, filter=_filter))
assert len(runs_) == 8
def test_sync_generator(langchain_client: Client):
project_name = "__My Tracer Project - test_sync_generator"
run_meta = uuid.uuid4().hex
@traceable(run_type="chain")
def my_generator(num: int) -> Generator[str, None, None]:
for i in range(num):
yield f"Yielded {i}"
results = list(
my_generator(
5,
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
)
assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"]
_filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))'
poll_runs_until_count(
langchain_client, project_name, 1, max_retries=20, filter_=_filter
)
runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter))
run = runs[0]
assert run.run_type == "chain"
assert run.name == "my_generator"
assert run.outputs == {
"output": ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"]
}
def test_sync_generator_reduce_fn(langchain_client: Client):
project_name = "__My Tracer Project - test_sync_generator_reduce_fn"
run_meta = uuid.uuid4().hex
def reduce_fn(outputs: Sequence) -> dict:
return {"my_output": " ".join(outputs)}
@traceable(run_type="chain", reduce_fn=reduce_fn)
def my_generator(num: int) -> Generator[str, None, None]:
for i in range(num):
yield f"Yielded {i}"
results = list(
my_generator(
5,
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
)
filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))'
assert results == ["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"]
poll_runs_until_count(
langchain_client, project_name, 1, max_retries=20, filter_=filter_
)
runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_))
run = runs[0]
assert run.run_type == "chain"
assert run.name == "my_generator"
assert run.outputs == {
"my_output": " ".join(
["Yielded 0", "Yielded 1", "Yielded 2", "Yielded 3", "Yielded 4"]
)
}
async def test_async_generator(langchain_client: Client):
project_name = "__My Tracer Project - test_async_generator"
run_meta = uuid.uuid4().hex
@traceable(run_type="chain")
async def my_async_generator(num: int) -> AsyncGenerator[str, None]:
for i in range(num):
await asyncio.sleep(0.1)
yield f"Async yielded {i}"
results = [
item
async for item in my_async_generator(
5,
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
]
assert results == [
"Async yielded 0",
"Async yielded 1",
"Async yielded 2",
"Async yielded 3",
"Async yielded 4",
]
_filter = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))'
poll_runs_until_count(
langchain_client, project_name, 1, max_retries=20, filter_=_filter
)
runs = list(langchain_client.list_runs(project_name=project_name, filter=_filter))
run = runs[0]
assert run.run_type == "chain"
assert run.name == "my_async_generator"
assert run.outputs == {
"output": [
"Async yielded 0",
"Async yielded 1",
"Async yielded 2",
"Async yielded 3",
"Async yielded 4",
]
}
async def test_async_generator_reduce_fn(langchain_client: Client):
project_name = "__My Tracer Project - test_async_generator_reduce_fn"
run_meta = uuid.uuid4().hex
def reduce_fn(outputs: Sequence) -> dict:
return {"my_output": " ".join(outputs)}
@traceable(run_type="chain", reduce_fn=reduce_fn)
async def my_async_generator(num: int) -> AsyncGenerator[str, None]:
for i in range(num):
await asyncio.sleep(0.1)
yield f"Async yielded {i}"
results = [
item
async for item in my_async_generator(
5,
langsmith_extra=dict(
project_name=project_name, metadata={"test_run": run_meta}
),
)
]
assert results == [
"Async yielded 0",
"Async yielded 1",
"Async yielded 2",
"Async yielded 3",
"Async yielded 4",
]
filter_ = f'and(eq(metadata_key, "test_run"), eq(metadata_value, "{run_meta}"))'
poll_runs_until_count(
langchain_client, project_name, 1, max_retries=20, sleep_time=5, filter_=filter_
)
runs = list(langchain_client.list_runs(project_name=project_name, filter=filter_))
run = runs[0]
assert run.run_type == "chain"
assert run.name == "my_async_generator"
assert run.outputs == {
"my_output": " ".join(
[
"Async yielded 0",
"Async yielded 1",
"Async yielded 2",
"Async yielded 3",
"Async yielded 4",
]
)
}
async def test_end_metadata_with_run_tree(langchain_client: Client):
project_name = "__My Tracer Project - test_end_metadata_with_run_tree"
run_id = uuid.uuid4()
run_tree = RunTree(
name="my_chain_run",
id=run_id,
run_type="chain",
project_name=project_name,
)
run_tree.end(metadata={"final_metadata": run_id.hex}, outputs={"result": "success"})
run_tree.post()
filter_ = f'eq(id, "{run_id}")'
poll_runs_until_count(langchain_client, project_name, 1, filter_=filter_)
runs_ = list(langchain_client.list_runs(project_name=project_name, filter=filter_))
run = runs_[0]
assert run.run_type == "chain"
assert run.metadata["final_metadata"] == run_id.hex
assert run.outputs == {"result": "success"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.