index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langsmith-sdk/vendor/pyo3 | lc_public_repos/langsmith-sdk/vendor/pyo3/pyo3-macros/LICENSE-MIT | Copyright (c) 2023-present PyO3 Project and Contributors. https://github.com/PyO3
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
|
0 | lc_public_repos/langsmith-sdk/vendor/pyo3/pyo3-macros | lc_public_repos/langsmith-sdk/vendor/pyo3/pyo3-macros/src/lib.rs | //! This crate declares only the proc macro attributes, as a crate defining proc macro attributes
//! must not contain any other public items.
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
use proc_macro::TokenStream;
use proc_macro2::TokenStream as TokenStream2;
use pyo3_macros_backend::{
build_derive_from_pyobject, build_derive_into_pyobject, build_py_class, build_py_enum,
build_py_function, build_py_methods, pymodule_function_impl, pymodule_module_impl, PyClassArgs,
PyClassMethodsType, PyFunctionOptions, PyModuleOptions,
};
use quote::quote;
use syn::{parse_macro_input, Item};
/// A proc macro used to implement Python modules.
///
/// The name of the module will be taken from the function name, unless `#[pyo3(name = "my_name")]`
/// is also annotated on the function to override the name. **Important**: the module name should
/// match the `lib.name` setting in `Cargo.toml`, so that Python is able to import the module
/// without needing a custom import loader.
///
/// Functions annotated with `#[pymodule]` can also be annotated with the following:
///
/// | Annotation | Description |
/// | :- | :- |
/// | `#[pyo3(name = "...")]` | Defines the name of the module in Python. |
/// | `#[pyo3(submodule)]` | Skips adding a `PyInit_` FFI symbol to the compiled binary. |
/// | `#[pyo3(module = "...")]` | Defines the Python `dotted.path` to the parent module for use in introspection. |
/// | `#[pyo3(crate = "pyo3")]` | Defines the path to PyO3 to use code generated by the macro. |
///
/// For more on creating Python modules see the [module section of the guide][1].
///
/// Due to technical limitations on how `#[pymodule]` is implemented, a function marked
/// `#[pymodule]` cannot have a module with the same name in the same scope. (The
/// `#[pymodule]` implementation generates a hidden module with the same name containing
/// metadata about the module, which is used by `wrap_pymodule!`).
///
#[doc = concat!("[1]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/module.html")]
#[proc_macro_attribute]
pub fn pymodule(args: TokenStream, input: TokenStream) -> TokenStream {
let options = parse_macro_input!(args as PyModuleOptions);
let mut ast = parse_macro_input!(input as Item);
let expanded = match &mut ast {
Item::Mod(module) => {
match pymodule_module_impl(module, options) {
// #[pymodule] on a module will rebuild the original ast, so we don't emit it here
Ok(expanded) => return expanded.into(),
Err(e) => Err(e),
}
}
Item::Fn(function) => pymodule_function_impl(function, options),
unsupported => Err(syn::Error::new_spanned(
unsupported,
"#[pymodule] only supports modules and functions.",
)),
}
.unwrap_or_compile_error();
quote!(
#ast
#expanded
)
.into()
}
#[proc_macro_attribute]
pub fn pyclass(attr: TokenStream, input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as Item);
match item {
Item::Struct(struct_) => pyclass_impl(attr, struct_, methods_type()),
Item::Enum(enum_) => pyclass_enum_impl(attr, enum_, methods_type()),
unsupported => {
syn::Error::new_spanned(unsupported, "#[pyclass] only supports structs and enums.")
.into_compile_error()
.into()
}
}
}
/// A proc macro used to expose methods to Python.
///
/// Methods within a `#[pymethods]` block can be annotated with as well as the following:
///
/// | Annotation | Description |
/// | :- | :- |
/// | [`#[new]`][4] | Defines the class constructor, like Python's `__new__` method. |
/// | [`#[getter]`][5] and [`#[setter]`][5] | These define getters and setters, similar to Python's `@property` decorator. This is useful for getters/setters that require computation or side effects; if that is not the case consider using [`#[pyo3(get, set)]`][11] on the struct's field(s).|
/// | [`#[staticmethod]`][6]| Defines the method as a staticmethod, like Python's `@staticmethod` decorator.|
/// | [`#[classmethod]`][7] | Defines the method as a classmethod, like Python's `@classmethod` decorator.|
/// | [`#[classattr]`][9] | Defines a class variable. |
/// | [`#[args]`][10] | Deprecated way to define a method's default arguments and allows the function to receive `*args` and `**kwargs`. Use `#[pyo3(signature = (...))]` instead. |
/// | <nobr>[`#[pyo3(<option> = <value>)`][pyo3-method-options]</nobr> | Any of the `#[pyo3]` options supported on [`macro@pyfunction`]. |
///
/// For more on creating class methods,
/// see the [class section of the guide][1].
///
/// If the [`multiple-pymethods`][2] feature is enabled, it is possible to implement
/// multiple `#[pymethods]` blocks for a single `#[pyclass]`.
/// This will add a transitive dependency on the [`inventory`][3] crate.
///
#[doc = concat!("[1]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#instance-methods")]
#[doc = concat!("[2]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/features.html#multiple-pymethods")]
/// [3]: https://docs.rs/inventory/
#[doc = concat!("[4]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#constructor")]
#[doc = concat!("[5]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#object-properties-using-getter-and-setter")]
#[doc = concat!("[6]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#static-methods")]
#[doc = concat!("[7]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#class-methods")]
#[doc = concat!("[8]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#callable-objects")]
#[doc = concat!("[9]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#class-attributes")]
#[doc = concat!("[10]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#method-arguments")]
#[doc = concat!("[11]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/class.html#object-properties-using-pyo3get-set")]
#[proc_macro_attribute]
pub fn pymethods(attr: TokenStream, input: TokenStream) -> TokenStream {
let methods_type = if cfg!(feature = "multiple-pymethods") {
PyClassMethodsType::Inventory
} else {
PyClassMethodsType::Specialization
};
pymethods_impl(attr, input, methods_type)
}
/// A proc macro used to expose Rust functions to Python.
///
/// Functions annotated with `#[pyfunction]` can also be annotated with the following `#[pyo3]`
/// options:
///
/// | Annotation | Description |
/// | :- | :- |
/// | `#[pyo3(name = "...")]` | Defines the name of the function in Python. |
/// | `#[pyo3(text_signature = "...")]` | Defines the `__text_signature__` attribute of the function in Python. |
/// | `#[pyo3(pass_module)]` | Passes the module containing the function as a `&PyModule` first argument to the function. |
///
/// For more on exposing functions see the [function section of the guide][1].
///
/// Due to technical limitations on how `#[pyfunction]` is implemented, a function marked
/// `#[pyfunction]` cannot have a module with the same name in the same scope. (The
/// `#[pyfunction]` implementation generates a hidden module with the same name containing
/// metadata about the function, which is used by `wrap_pyfunction!`).
///
#[doc = concat!("[1]: https://pyo3.rs/v", env!("CARGO_PKG_VERSION"), "/function.html")]
#[proc_macro_attribute]
pub fn pyfunction(attr: TokenStream, input: TokenStream) -> TokenStream {
let mut ast = parse_macro_input!(input as syn::ItemFn);
let options = parse_macro_input!(attr as PyFunctionOptions);
let expanded = build_py_function(&mut ast, options).unwrap_or_compile_error();
quote!(
#ast
#expanded
)
.into()
}
#[proc_macro_derive(IntoPyObject, attributes(pyo3))]
pub fn derive_into_py_object(item: TokenStream) -> TokenStream {
let ast = parse_macro_input!(item as syn::DeriveInput);
let expanded = build_derive_into_pyobject::<false>(&ast).unwrap_or_compile_error();
quote!(
#expanded
)
.into()
}
#[proc_macro_derive(IntoPyObjectRef, attributes(pyo3))]
pub fn derive_into_py_object_ref(item: TokenStream) -> TokenStream {
let ast = parse_macro_input!(item as syn::DeriveInput);
let expanded =
pyo3_macros_backend::build_derive_into_pyobject::<true>(&ast).unwrap_or_compile_error();
quote!(
#expanded
)
.into()
}
#[proc_macro_derive(FromPyObject, attributes(pyo3))]
pub fn derive_from_py_object(item: TokenStream) -> TokenStream {
let ast = parse_macro_input!(item as syn::DeriveInput);
let expanded = build_derive_from_pyobject(&ast).unwrap_or_compile_error();
quote!(
#expanded
)
.into()
}
fn pyclass_impl(
attrs: TokenStream,
mut ast: syn::ItemStruct,
methods_type: PyClassMethodsType,
) -> TokenStream {
let args = parse_macro_input!(attrs with PyClassArgs::parse_struct_args);
let expanded = build_py_class(&mut ast, args, methods_type).unwrap_or_compile_error();
quote!(
#ast
#expanded
)
.into()
}
fn pyclass_enum_impl(
attrs: TokenStream,
mut ast: syn::ItemEnum,
methods_type: PyClassMethodsType,
) -> TokenStream {
let args = parse_macro_input!(attrs with PyClassArgs::parse_enum_args);
let expanded = build_py_enum(&mut ast, args, methods_type).unwrap_or_compile_error();
quote!(
#ast
#expanded
)
.into()
}
fn pymethods_impl(
attr: TokenStream,
input: TokenStream,
methods_type: PyClassMethodsType,
) -> TokenStream {
let mut ast = parse_macro_input!(input as syn::ItemImpl);
// Apply all options as a #[pyo3] attribute on the ItemImpl
// e.g. #[pymethods(crate = "crate")] impl Foo { }
// -> #[pyo3(crate = "crate")] impl Foo { }
let attr: TokenStream2 = attr.into();
ast.attrs.push(syn::parse_quote!( #[pyo3(#attr)] ));
let expanded = build_py_methods(&mut ast, methods_type).unwrap_or_compile_error();
quote!(
#ast
#expanded
)
.into()
}
fn methods_type() -> PyClassMethodsType {
if cfg!(feature = "multiple-pymethods") {
PyClassMethodsType::Inventory
} else {
PyClassMethodsType::Specialization
}
}
trait UnwrapOrCompileError {
fn unwrap_or_compile_error(self) -> TokenStream2;
}
impl UnwrapOrCompileError for syn::Result<TokenStream2> {
fn unwrap_or_compile_error(self) -> TokenStream2 {
self.unwrap_or_else(|e| e.into_compile_error())
}
}
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/LICENSE-APACHE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/Cargo.toml | [package]
name = "orjson"
version = "3.10.11"
authors = ["ijl <ijl@mailbox.org>"]
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
edition = "2021"
resolver = "2"
rust-version = "1.72"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/ijl/orjson"
homepage = "https://github.com/ijl/orjson"
readme = "README.md"
keywords = ["fast", "json", "dataclass", "dataclasses", "datetime", "rfc", "8259", "3339"]
include = [
"Cargo.toml",
"CHANGELOG.md",
"data",
"include",
"LICENSE-APACHE",
"LICENSE-MIT",
"pyproject.toml",
"README.md",
"src",
"test/*.py",
"test/requirements.txt",
]
[lib]
name = "orjson"
crate-type = ["cdylib", "rlib"]
[features]
# Do not disable this in production! Only useful if you want to use orjson inside `cargo test`.
default = ["extension-module"]
# Use SIMD intrinsics. This requires Rust on the nightly channel.
unstable-simd = []
# Include runtime-detected functions that use AVX512VL. Requires unstable-simd and amd64.
avx512 = []
no-panic = [
"itoa/no-panic",
"ryu/no-panic",
]
# Avoid bundling libgcc on musl.
unwind = ["unwinding"]
# Build yyjson as a backend and panic if it fails. The default is to attempt
# to build and on failure fall back to another backend.
yyjson = []
# Features detected by build.rs. Do not specify.
inline_int = []
intrinsics = []
optimize = []
# Exposed for testing purposes only, so it can be disabled during `cargo test`.
extension-module = ["pyo3-ffi/extension-module"]
[dependencies]
associative-cache = { version = "2", default-features = false }
bytecount = { version = "^0.6.7", default-features = false, features = ["runtime-dispatch-simd"] }
compact_str = { version = "0.8", default-features = false, features = ["serde"] }
encoding_rs = { version = "0.8", default-features = false }
half = { version = "2", default-features = false, features = ["std"] }
itoa = { version = "1", default-features = false }
itoap = { version = "1", default-features = false, features = ["std", "simd"] }
jiff = { version = "^0.1", default-features = false, features = ["alloc"] }
once_cell = { version = "1", default-features = false, features = ["alloc", "race"] }
pyo3-ffi = { path = "../pyo3/pyo3-ffi", default-features = false}
ryu = { version = "1", default-features = false }
serde = { version = "1", default-features = false }
serde_json = { version = "1", default-features = false, features = ["std", "float_roundtrip"] }
simdutf8 = { version = "0.1", default-features = false, features = ["std", "public_imp", "aarch64_neon"] }
smallvec = { version = "^1.11", default-features = false, features = ["union", "write"] }
unwinding = { version = "=0.2.2", default-features = false, features = ["unwinder"], optional = true }
uuid = { version = "1", default-features = false }
xxhash-rust = { version = "^0.8", default-features = false, features = ["xxh3"] }
[build-dependencies]
cc = { version = "1" }
pyo3-build-config = { path = "../pyo3/pyo3-build-config" }
version_check = { version = "0.9" }
[profile.dev]
codegen-units = 1
debug = 2
debug-assertions = true
incremental = false
lto = "off"
opt-level = 3
overflow-checks = true
[profile.release]
codegen-units = 1
debug = false
incremental = false
lto = "thin"
opt-level = 3
panic = "abort"
[profile.release.build-override]
opt-level = 0
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/requirements.txt | -r bench/requirements.txt
-r integration/requirements.txt
-r test/requirements.txt
maturin
mypy==1.13.0
ruff==0.7.1
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/CHANGELOG.md | # Changelog
## 3.10.11
### Changed
- Improve performance of UUIDs.
- Publish PyPI wheels with trusted publishing and PEP 740 attestations.
- Include text of licenses for vendored dependencies.
## 3.10.10
### Fixed
- Fix `int` serialization on `s390x`. This was introduced in 3.10.8.
### Changed
- Publish aarch64 manylinux_2_17 wheel for 3.13 to PyPI.
## 3.10.9
### Fixed
- Fix `int` serialization on 32-bit Python 3.8, 3.9, 3.10. This was
introduced in 3.10.8.
## 3.10.8
### Changed
- `int` serialization no longer chains `OverflowError` to the
the `__cause__` attribute of `orjson.JSONEncodeError` when range exceeded.
- Compatibility with CPython 3.14 alpha 1.
- Improve performance.
## 3.10.7 - 2024-08-08
### Changed
- Improve performance of stable Rust amd64 builds.
## 3.10.6 - 2024-07-02
### Changed
- Improve performance.
## 3.10.5 - 2024-06-13
### Changed
- Improve performance.
## 3.10.4 - 2024-06-10
### Changed
- Improve performance.
## 3.10.3 - 2024-05-03
### Changed
- `manylinux` amd64 builds include runtime-detected AVX-512 `str`
implementation.
- Tests now compatible with numpy v2.
## 3.10.2 - 2024-05-01
### Fixed
- Fix crash serializing `str` introduced in 3.10.1.
### Changed
- Improve performance.
- Drop support for arm7.
## 3.10.1 - 2024-04-15
### Fixed
- Serializing `numpy.ndarray` with non-native endianness raises
`orjson.JSONEncodeError`.
### Changed
- Improve performance of serializing.
## 3.10.0 - 2024-03-27
### Changed
- Support serializing `numpy.float16` (`numpy.half`).
- sdist uses metadata 2.3 instead of 2.1.
- Improve Windows PyPI builds.
## 3.9.15 - 2024-02-23
### Fixed
- Implement recursion limit of 1024 on `orjson.loads()`.
- Use byte-exact read on `str` formatting SIMD path to avoid crash.
## 3.9.14 - 2024-02-14
### Fixed
- Fix crash serializing `str` introduced in 3.9.11.
### Changed
- Build now depends on Rust 1.72 or later.
## 3.9.13 - 2024-02-03
### Fixed
- Serialization `str` escape uses only 128-bit SIMD.
- Fix compatibility with CPython 3.13 alpha 3.
### Changed
- Publish `musllinux_1_2` instead of `musllinux_1_1` wheels.
- Serialization uses small integer optimization in CPython 3.12 or later.
## 3.9.12 - 2024-01-18
### Changed
- Update benchmarks in README.
### Fixed
- Minimal `musllinux_1_1` build due to sporadic CI failure.
## 3.9.11 - 2024-01-18
### Changed
- Improve performance of serializing. `str` is significantly faster. Documents
using `dict`, `list`, and `tuple` are somewhat faster.
## 3.9.10 - 2023-10-26
### Fixed
- Fix debug assert failure on 3.12 `--profile=dev` build.
## 3.9.9 - 2023-10-12
### Changed
- `orjson` module metadata explicitly marks subinterpreters as not supported.
## 3.9.8 - 2023-10-10
### Changed
- Improve performance.
- Drop support for Python 3.7.
## 3.9.7 - 2023-09-08
### Fixed
- Fix crash in `orjson.loads()` due to non-reentrant handling of persistent
buffer. This was introduced in 3.9.3.
- Handle some FFI removals in CPython 3.13.
## 3.9.6 - 2023-09-07
### Fixed
- Fix numpy reference leak on unsupported array dtype.
- Fix numpy.datetime64 reference handling.
### Changed
- Minor performance improvements.
## 3.9.5 - 2023-08-16
### Fixed
- Remove futex from module import and initialization path.
## 3.9.4 - 2023-08-07
### Fixed
- Fix hash builder using default values.
- Fix non-release builds of orjson copying large deserialization buffer
from stack to heap. This was introduced in 3.9.3.
## 3.9.3 - 2023-08-06
### Fixed
- Fix compatibility with CPython 3.12.
### Changed
- Support i686/x86 32-bit Python installs on Windows.
## 3.9.2 - 2023-07-07
### Fixed
- Fix the `__cause__` exception on `orjson.JSONEncodeError` possibly being
denormalized, i.e., of type `str` instead of `Exception`.
## 3.9.1 - 2023-06-09
### Fixed
- Fix memory leak on chained tracebacks of exceptions raised in `default`. This
was introduced in 3.8.12.
## 3.9.0 - 2023-06-01
### Added
- `orjson.Fragment` includes already-serialized JSON in a document.
## 3.8.14 - 2023-05-25
### Changed
- PyPI `manylinux` wheels are compiled for `x86-64` instead of `x86-64-v2`.
## 3.8.13 - 2023-05-23
### Changed
- Source distribution contains all source code required for an offline build.
- PyPI macOS wheels use a `MACOSX_DEPLOYMENT_TARGET` of 10.15 instead of 11.
- Build uses maturin v1.
## 3.8.12 - 2023-05-07
### Changed
- Exceptions raised in `default` are now chained as the `__cause__` attribute
on `orjson.JSONEncodeError`.
## 3.8.11 - 2023-04-27
### Changed
- `orjson.loads()` on an empty document has a specific error message.
- PyPI `manylinux_2_28_x86_64` wheels are compiled for `x86-64-v2`.
- PyPI macOS wheels are only `universal2` and compiled for
`x86-64-v2` and `apple-m1`.
## 3.8.10 - 2023-04-09
### Fixed
- Fix compatibility with CPython 3.12.0a7.
- Fix compatibility with big-endian architectures.
- Fix crash in serialization.
### Changed
- Publish musllinux 3.11 wheels.
- Publish s390x wheels.
## 3.8.9 - 2023-03-28
### Fixed
- Fix parallel initialization of orjson.
## 3.8.8 - 2023-03-20
### Changed
- Publish ppc64le wheels.
## 3.8.7 - 2023-02-28
### Fixed
- Use serialization backend introduced in 3.8.4 only on well-tested
platforms such as glibc, macOS by default.
## 3.8.6 - 2023-02-09
### Fixed
- Fix crash serializing when using musl libc.
### Changed
- Make `python-dateutil` optional in tests.
- Handle failure to load system timezones in tests.
## 3.8.5 - 2023-01-10
### Fixed
- Fix `orjson.dumps()` invalid output on Windows.
## 3.8.4 - 2023-01-04
### Changed
- Improve performance.
## 3.8.3 - 2022-12-02
### Fixed
- `orjson.dumps()` accepts `option=None` per `Optional[int]` type.
## 3.8.2 - 2022-11-20
### Fixed
- Fix tests on 32-bit for `numpy.intp` and `numpy.uintp`.
### Changed
- Build now depends on rustc 1.60 or later.
- Support building with maturin 0.13 or 0.14.
## 3.8.1 - 2022-10-25
### Changed
- Build maintenance for Python 3.11.
## 3.8.0 - 2022-08-27
### Changed
- Support serializing `numpy.int16` and `numpy.uint16`.
## 3.7.12 - 2022-08-14
### Fixed
- Fix datetime regression tests for tzinfo 2022b.
### Changed
- Improve performance.
## 3.7.11 - 2022-07-31
### Fixed
- Revert `dict` iterator implementation introduced in 3.7.9.
## 3.7.10 - 2022-07-30
### Fixed
- Fix serializing `dict` with deleted final item. This was introduced in 3.7.9.
## 3.7.9 - 2022-07-29
### Changed
- Improve performance of serializing.
- Improve performance of serializing pretty-printed (`orjson.OPT_INDENT_2`)
to be much nearer to compact.
- Improve performance of deserializing `str` input.
- orjson now requires Rust 1.57 instead of 1.54 to build.
## 3.7.8 - 2022-07-19
### Changed
- Build makes best effort instead of requiring "--features".
- Build using maturin 0.13.
## 3.7.7 - 2022-07-06
### Changed
- Support Python 3.11.
## 3.7.6 - 2022-07-03
### Changed
- Handle unicode changes in CPython 3.12.
- Build PyPI macOS wheels on 10.15 instead of 12 for compatibility.
## 3.7.5 - 2022-06-28
### Fixed
- Fix issue serializing dicts that had keys popped and replaced. This was
introduced in 3.7.4.
## 3.7.4 - 2022-06-28
### Changed
- Improve performance.
### Fixed
- Fix deallocation of `orjson.JSONDecodeError`.
## 3.7.3 - 2022-06-23
## Changed
- Improve build.
- Publish aarch64 musllinux wheels.
## 3.7.2 - 2022-06-07
## Changed
- Improve deserialization performance.
## 3.7.1 - 2022-06-03
### Fixed
- Type stubs for `orjson.JSONDecodeError` now inherit from
`json.JSONDecodeError` instead of `ValueError`
- Null-terminate the internal buffer of `orjson.dumps()` output.
## 3.7.0 - 2022-06-03
### Changed
- Improve deserialization performance significantly through the use of a new
backend. PyPI wheels for manylinux_2_28 and macOS have it enabled. Packagers
are advised to see the README.
## 3.6.9 - 2022-06-01
### Changed
- Improve serialization and deserialization performance.
## 3.6.8 - 2022-04-15
### Fixed
- Fix serialization of `numpy.datetime64("NaT")` to raise on an
unsupported type.
## 3.6.7 - 2022-02-14
### Changed
- Improve performance of deserializing almost-empty documents.
- Publish arm7l `manylinux_2_17` wheels to PyPI.
- Publish amd4 `musllinux_1_1` wheels to PyPI.
### Fixed
- Fix build requiring `python` on `PATH`.
## 3.6.6 - 2022-01-21
### Changed
- Improve performance of serializing `datetime.datetime` using `tzinfo` that
are `zoneinfo.ZoneInfo`.
### Fixed
- Fix invalid indexing in line and column number reporting in
`JSONDecodeError`.
- Fix `orjson.OPT_STRICT_INTEGER` not raising an error on
values exceeding a 64-bit integer maximum.
## 3.6.5 - 2021-12-05
### Fixed
- Fix build on macOS aarch64 CPython 3.10.
- Fix build issue on 32-bit.
## 3.6.4 - 2021-10-01
### Fixed
- Fix serialization of `dataclass` inheriting from `abc.ABC` and
using `__slots__`.
- Decrement refcount for numpy `PyArrayInterface`.
- Fix build on recent versions of Rust nightly.
## 3.6.3 - 2021-08-20
### Fixed
- Fix build on aarch64 using the Rust stable channel.
## 3.6.2 - 2021-08-17
### Changed
- `orjson` now compiles on Rust stable 1.54.0 or above. Use of some SIMD
usage is now disabled by default and packagers are advised to add
`--cargo-extra-args="--features=unstable-simd"` to the `maturin build` command
if they continue to use nightly.
- `orjson` built with `--features=unstable-simd` adds UTF-8 validation
implementations that use AVX2 or SSE4.2.
- Drop support for Python 3.6.
## 3.6.1 - 2021-08-04
### Changed
- `orjson` now includes a `pyi` type stubs file.
- Publish manylinux_2_24 wheels instead of manylinux2014.
### Fixed
- Fix compilation on latest Rust nightly.
## 3.6.0 - 2021-07-08
### Added
- `orjson.dumps()` serializes `numpy.datetime64` instances as RFC 3339
strings.
## 3.5.4 - 2021-06-30
### Fixed
- Fix memory leak serializing `datetime.datetime` with `tzinfo`.
- Fix wrong error message when serializing an unsupported numpy type
without default specified.
### Changed
- Publish python3.10 and python3.9 manylinux_2_24 wheels.
## 3.5.3 - 2021-06-01
### Fixed
- `orjson.JSONDecodeError` now has `pos`, `lineno`, and `colno`.
- Fix build on recent versions of Rust nightly.
## 3.5.2 - 2021-04-15
### Changed
- Improve serialization and deserialization performance.
- `orjson.dumps()` serializes individual `numpy.bool_` objects.
## 3.5.1 - 2021-03-06
### Changed
- Publish `universal2` wheels for macOS supporting Apple Silicon (aarch64).
## 3.5.0 - 2021-02-24
### Added
- `orjson.loads()` supports reading from `memoryview` objects.
### Fixed
- `datetime.datetime` and `datetime.date` zero pad years less than 1000 to
four digits.
- sdist pins maturin 0.9.0 to avoid breaks in later 0.9.x.
### Changed
- `orjson.dumps()` when given a non-C contiguous `numpy.ndarray` has
an error message suggesting to use `default`.
## 3.4.8 - 2021-02-04
### Fixed
- aarch64 manylinux2014 wheels are now compatible with glibc 2.17.
### Changed
- Fix build warnings on ppcle64.
## 3.4.7 - 2021-01-19
### Changed
- Use vectorcall APIs for method calls on python3.9 and above.
- Publish python3.10 wheels for Linux on amd64 and aarch64.
## 3.4.6 - 2020-12-07
### Fixed
- Fix compatibility with debug builds of CPython.
## 3.4.5 - 2020-12-02
### Fixed
- Fix deserializing long strings on processors without AVX2.
## 3.4.4 - 2020-11-25
### Changed
- `orjson.dumps()` serializes integers up to a 64-bit unsigned integer's
maximum. It was previously the maximum of a 64-bit signed integer.
## 3.4.3 - 2020-10-30
### Fixed
- Fix regression in parsing similar `dict` keys.
## 3.4.2 - 2020-10-29
### Changed
- Improve deserialization performance.
- Publish Windows python3.9 wheel.
- Disable unsupported SIMD features on non-x86, non-ARM targets
## 3.4.1 - 2020-10-20
### Fixed
- Fix `orjson.dumps.__module__` and `orjson.loads.__module__` not being the
`str` "orjson".
### Changed
- Publish macos python3.9 wheel.
- More packaging documentation.
## 3.4.0 - 2020-09-25
### Added
- Serialize `numpy.uint8` and `numpy.int8` instances.
### Fixed
- Fix serializing `numpy.empty()` instances.
### Changed
- No longer publish `manylinux1` wheels due to tooling dropping support.
## 3.3.1 - 2020-08-17
### Fixed
- Fix failure to deserialize some latin1 strings on some platforms. This
was introduced in 3.2.0.
- Fix annotation of optional parameters on `orjson.dumps()` for `help()`.
### Changed
- Publish `manylinux2014` wheels for amd64 in addition to `manylinux1`.
## 3.3.0 - 2020-07-24
### Added
- `orjson.dumps()` now serializes individual numpy floats and integers, e.g.,
`numpy.float64(1.0)`.
- `orjson.OPT_PASSTHROUGH_DATACLASS` causes `orjson.dumps()` to pass
`dataclasses.dataclass` instances to `default`.
## 3.2.2 - 2020-07-13
### Fixed
- Fix serializing `dataclasses.dataclass` that have no attributes.
### Changed
- Improve deserialization performance of `str`.
## 3.2.1 - 2020-07-03
### Fixed
- Fix `orjson.dumps(..., **{})` raising `TypeError` on python3.6.
## 3.2.0 - 2020-06-30
### Added
- `orjson.OPT_APPEND_NEWLINE` appends a newline to output.
### Changed
- Improve deserialization performance of `str`.
## 3.1.2 - 2020-06-23
### Fixed
- Fix serializing zero-dimension `numpy.ndarray`.
## 3.1.1 - 2020-06-20
### Fixed
- Fix repeated serialization of `str` that are ASCII-only and have a legacy
(non-compact) layout.
## 3.1.0 - 2020-06-08
### Added
- `orjson.OPT_PASSTHROUGH_SUBCLASS` causes `orjson.dumps()` to pass
subclasses of builtin types to `default` so the caller can customize the
output.
- `orjson.OPT_PASSTHROUGH_DATETIME` causes `orjson.dumps()` to pass
`datetime` objects to `default` so the caller can customize the
output.
## 3.0.2 - 2020-05-27
### Changed
- `orjson.dumps()` does not serialize `dataclasses.dataclass` attributes
that begin with a leading underscore, e.g., `_attr`. This is because of the
Python idiom that a leading underscores marks an attribute as "private."
- `orjson.dumps()` does not serialize `dataclasses.dataclass` attributes that
are `InitVar` or `ClassVar` whether using `__slots__` or not.
## 3.0.1 - 2020-05-19
### Fixed
- `orjson.dumps()` raises an exception if the object to be serialized
is not given as a positional argument. `orjson.dumps({})` is intended and ok
while `orjson.dumps(obj={})` is an error. This makes it consistent with the
documentation, `help()` annotation, and type annotation.
- Fix orphan reference in exception creation that leaks memory until the
garbage collector runs.
### Changed
- Improve serialization performance marginally by using the fastcall/vectorcall
calling convention on python3.7 and above.
- Reduce build time.
## 3.0.0 - 2020-05-01
### Added
- `orjson.dumps()` serializes subclasses of `str`, `int`, `list`, and `dict`.
### Changed
- `orjson.dumps()` serializes `dataclasses.dataclass` and `uuid.UUID`
instances by default. The options `OPT_SERIALIZE_DATACLASS` and
`OPT_SERIALIZE_UUID` can still be specified but have no effect.
## 2.6.8 - 2020-04-30
### Changed
- The source distribution vendors a forked dependency.
## 2.6.7 - 2020-04-30
### Fixed
- Fix integer overflows in debug builds.
### Changed
- The source distribution sets the recommended RUSTFLAGS in `.cargo/config`.
## 2.6.6 - 2020-04-24
### Fixed
- Import `numpy` only on first use of `OPT_SERIALIZE_NUMPY` to reduce
interpreter start time when not used.
- Reduce build time by half.
## 2.6.5 - 2020-04-08
### Fixed
- Fix deserialization raising `JSONDecodeError` on some valid negative
floats with large exponents.
## 2.6.4 - 2020-04-08
### Changed
- Improve deserialization performance of floats by about 40%.
## 2.6.3 - 2020-04-01
### Changed
- Serialize `enum.Enum` objects.
- Minor performance improvements.
## 2.6.2 - 2020-03-27
### Changed
- Publish python3.9 `manylinux2014` wheel instead of `manylinux1` for `x86_64`.
- Publish python3.9 `manylinux2014` wheel for `aarch64`.
### Fixed
- Fix compilation failure on 32-bit.
## 2.6.1 - 2020-03-19
### Changed
- Serialization is 10-20% faster and uses about 50% less memory by writing
directly to the returned `bytes` object.
## 2.6.0 - 2020-03-10
### Added
- `orjson.dumps()` pretty prints with an indentation of two spaces if
`option=orjson.OPT_INDENT_2` is specified.
## 2.5.2 - 2020-03-07
### Changed
- Publish `manylinux2014` wheels for `aarch64`.
- numpy support now includes `numpy.uint32` and `numpy.uint64`.
## 2.5.1 - 2020-02-24
### Changed
- `manylinux1` wheels for 3.6, 3.7, and 3.8 are now compliant with the spec by
not depending on glibc 2.18.
## 2.5.0 - 2020-02-19
### Added
- `orjson.dumps()` serializes `dict` keys of type other than `str` if
`option=orjson.OPT_NON_STR_KEYS` is specified.
## 2.4.0 - 2020-02-14
### Added
- `orjson.dumps()` serializes `numpy.ndarray` instances if
`option=orjson.OPT_SERIALIZE_NUMPY` is specified.
### Fixed
- Fix `dataclasses.dataclass` attributes that are `dict` to be effected by
`orjson.OPT_SORT_KEYS`.
## 2.3.0 - 2020-02-12
### Added
- `orjson.dumps()` serializes `dict` instances sorted by keys, equivalent to
`sort_keys` in other implementations, if `option=orjson.OPT_SORT_KEYS` is
specified.
### Changed
- `dataclasses.dataclass` instances without `__slots__` now serialize faster.
### Fixed
- Fix documentation on `default`, in particular documenting the need to raise
an exception if the type cannot be handled.
## 2.2.2 - 2020-02-10
### Changed
- Performance improvements to serializing a list containing elements of the
same type.
## 2.2.1 - 2020-02-04
### Fixed
- `orjson.loads()` rejects floats that do not have a digit following
the decimal, e.g., `-2.`, `2.e-3`.
### Changed
- Build Linux, macOS, and Windows wheels on more recent distributions.
## 2.2.0 - 2020-01-22
### Added
- `orjson.dumps()` serializes `uuid.UUID` instances if
`option=orjson.OPT_SERIALIZE_UUID` is specified.
### Changed
- Minor performance improvements.
- Publish Python 3.9 wheel for Linux.
## 2.1.4 - 2020-01-08
### Fixed
- Specify a text signature for `orjson.loads()`.
### Changed
- Improve documentation.
## 2.1.3 - 2019-11-12
### Changed
- Publish Python 3.8 wheels for macOS and Windows.
## 2.1.2 - 2019-11-07
### Changed
- The recursion limit of `default` on `orjson.dumps()` has been increased from
5 to 254.
## 2.1.1 - 2019-10-29
### Changed
- Publish `manylinux1` wheels instead of `manylinux2010`.
## 2.1.0 - 2019-10-24
### Added
- `orjson.dumps()` serializes `dataclasses.dataclass` instances if
`option=orjson.OPT_SERIALIZE_DATACLASS` is specified.
- `orjson.dumps()` accepts `orjson.OPT_UTC_Z` to serialize UTC as "Z" instead
of "+00:00".
- `orjson.dumps()` accepts `orjson.OPT_OMIT_MICROSECONDS` to not serialize
the `microseconds` attribute of `datetime.datetime` and `datetime.time`
instances.
- `orjson.loads()` accepts `bytearray`.
### Changed
- Drop support for Python 3.5.
- Publish `manylinux2010` wheels instead of `manylinux1`.
## 2.0.11 - 2019-10-01
### Changed
- Publish Python 3.8 wheel for Linux.
## 2.0.10 - 2019-09-25
### Changed
- Performance improvements and lower memory usage in deserialization
by creating only one `str` object for repeated map keys.
## 2.0.9 - 2019-09-22
### Changed
- Minor performance improvements.
### Fixed
- Fix inaccurate zero padding in serialization of microseconds on
`datetime.time` objects.
## 2.0.8 - 2019-09-18
### Fixed
- Fix inaccurate zero padding in serialization of microseconds on
`datetime.datetime` objects.
## 2.0.7 - 2019-08-29
### Changed
- Publish PEP 517 source distribution.
### Fixed
- `orjson.dumps()` raises `JSONEncodeError` on circular references.
## 2.0.6 - 2019-05-11
### Changed
- Performance improvements.
## 2.0.5 - 2019-04-19
### Fixed
- Fix inaccuracy in deserializing some `float` values, e.g.,
31.245270191439438 was parsed to 31.24527019143944. Serialization was
unaffected.
## 2.0.4 - 2019-04-02
### Changed
- `orjson.dumps()` now serializes `datetime.datetime` objects without a
`tzinfo` rather than raising `JSONEncodeError`.
## 2.0.3 - 2019-03-23
### Changed
- `orjson.loads()` uses SSE2 to validate `bytes` input.
## 2.0.2 - 2019-03-12
### Changed
- Support Python 3.5.
## 2.0.1 - 2019-02-05
### Changed
- Publish Windows wheel.
## 2.0.0 - 2019-01-28
### Added
- `orjson.dumps()` accepts a `default` callable to serialize arbitrary
types.
- `orjson.dumps()` accepts `datetime.datetime`, `datetime.date`,
and `datetime.time`. Each is serialized to an RFC 3339 string.
- `orjson.dumps(..., option=orjson.OPT_NAIVE_UTC)` allows serializing
`datetime.datetime` objects that do not have a timezone set as UTC.
- `orjson.dumps(..., option=orjson.OPT_STRICT_INTEGER)` available to
raise an error on integer values outside the 53-bit range of all JSON
implementations.
### Changed
- `orjson.dumps()` no longer accepts `bytes`.
## 1.3.1 - 2019-01-03
### Fixed
- Handle invalid UTF-8 in str.
## 1.3.0 - 2019-01-02
### Changed
- Performance improvements of 15-25% on serialization, 10% on deserialization.
## 1.2.1 - 2018-12-31
### Fixed
- Fix memory leak in deserializing dict.
## 1.2.0 - 2018-12-16
### Changed
- Performance improvements.
## 1.1.0 - 2018-12-04
### Changed
- Performance improvements.
### Fixed
- Dict key can only be str.
## 1.0.1 - 2018-11-26
### Fixed
- pyo3 bugfix update.
## 1.0.0 - 2018-11-23
### Added
- `orjson.dumps()` function.
- `orjson.loads()` function.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/CONTRIBUTING.md | orjson is stable. Contributions will be reviewed only if they are well
documented, tested to the same standard as the rest of the library, not
a breaking change, and important to other users.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/build.rs | // SPDX-License-Identifier: (Apache-2.0 OR MIT)
use std::env;
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=include/yyjson/*");
println!("cargo:rerun-if-env-changed=CC");
println!("cargo:rerun-if-env-changed=CFLAGS");
println!("cargo:rerun-if-env-changed=LDFLAGS");
println!("cargo:rerun-if-env-changed=ORJSON_DISABLE_AVX512");
println!("cargo:rerun-if-env-changed=ORJSON_DISABLE_SIMD");
println!("cargo:rerun-if-env-changed=ORJSON_DISABLE_YYJSON");
println!("cargo:rerun-if-env-changed=RUSTFLAGS");
println!("cargo:rustc-check-cfg=cfg(intrinsics)");
println!("cargo:rustc-check-cfg=cfg(optimize)");
println!("cargo:rustc-check-cfg=cfg(Py_3_10)");
println!("cargo:rustc-check-cfg=cfg(Py_3_11)");
println!("cargo:rustc-check-cfg=cfg(Py_3_12)");
println!("cargo:rustc-check-cfg=cfg(Py_3_13)");
println!("cargo:rustc-check-cfg=cfg(Py_3_14)");
println!("cargo:rustc-check-cfg=cfg(Py_3_8)");
println!("cargo:rustc-check-cfg=cfg(Py_3_9)");
println!("cargo:rustc-check-cfg=cfg(Py_GIL_DISABLED)");
let python_config = pyo3_build_config::get();
for cfg in python_config.build_script_outputs() {
println!("{cfg}");
}
if let Some(true) = version_check::supports_feature("core_intrinsics") {
println!("cargo:rustc-cfg=feature=\"intrinsics\"");
}
if let Some(true) = version_check::supports_feature("optimize_attribute") {
println!("cargo:rustc-cfg=feature=\"optimize\"");
}
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
if env::var("ORJSON_DISABLE_SIMD").is_err() {
// auto build unstable SIMD on nightly
if let Some(true) = version_check::supports_feature("portable_simd") {
println!("cargo:rustc-cfg=feature=\"unstable-simd\"");
}
// auto build AVX512 on x86-64-v4 or supporting native targets
#[cfg(all(target_arch = "x86_64", target_feature = "avx512vl"))]
if let Some(true) = version_check::supports_feature("stdarch_x86_avx512") {
if env::var("ORJSON_DISABLE_AVX512").is_err() {
println!("cargo:rustc-cfg=feature=\"avx512\"");
}
}
}
#[cfg(all(
target_pointer_width = "64",
any(target_arch = "x86_64", target_arch = "aarch64")
))]
println!("cargo:rustc-cfg=feature=\"inline_int\"");
if env::var("ORJSON_DISABLE_YYJSON").is_ok() {
if env::var("CARGO_FEATURE_YYJSON").is_ok() {
panic!("ORJSON_DISABLE_YYJSON and --features=yyjson both enabled.")
}
} else {
match cc::Build::new()
.file("include/yyjson/yyjson.c")
.include("include/yyjson")
.define("YYJSON_DISABLE_NON_STANDARD", "1")
.define("YYJSON_DISABLE_UTF8_VALIDATION", "1")
.define("YYJSON_DISABLE_UTILS", "1")
.define("YYJSON_DISABLE_WRITER", "1")
.try_compile("yyjson")
{
Ok(_) => {
println!("cargo:rustc-cfg=feature=\"yyjson\"");
}
Err(_) => {
if env::var("CARGO_FEATURE_YYJSON").is_ok() {
panic!("yyjson was enabled but the build failed. To build with a different backend do not specify the feature.")
}
}
}
}
}
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/README.md | # orjson
orjson is a fast, correct JSON library for Python. It
[benchmarks](https://github.com/ijl/orjson?tab=readme-ov-file#performance) as the fastest Python
library for JSON and is more correct than the standard json library or other
third-party libraries. It serializes
[dataclass](https://github.com/ijl/orjson?tab=readme-ov-file#dataclass),
[datetime](https://github.com/ijl/orjson?tab=readme-ov-file#datetime),
[numpy](https://github.com/ijl/orjson?tab=readme-ov-file#numpy), and
[UUID](https://github.com/ijl/orjson?tab=readme-ov-file#uuid) instances natively.
Its features and drawbacks compared to other Python JSON libraries:
* serializes `dataclass` instances 40-50x as fast as other libraries
* serializes `datetime`, `date`, and `time` instances to RFC 3339 format,
e.g., "1970-01-01T00:00:00+00:00"
* serializes `numpy.ndarray` instances 4-12x as fast with 0.3x the memory
usage of other libraries
* pretty prints 10x to 20x as fast as the standard library
* serializes to `bytes` rather than `str`, i.e., is not a drop-in replacement
* serializes `str` without escaping unicode to ASCII, e.g., "好" rather than
"\\\u597d"
* serializes `float` 10x as fast and deserializes twice as fast as other
libraries
* serializes subclasses of `str`, `int`, `list`, and `dict` natively,
requiring `default` to specify how to serialize others
* serializes arbitrary types using a `default` hook
* has strict UTF-8 conformance, more correct than the standard library
* has strict JSON conformance in not supporting Nan/Infinity/-Infinity
* has an option for strict JSON conformance on 53-bit integers with default
support for 64-bit
* does not provide `load()` or `dump()` functions for reading from/writing to
file-like objects
orjson supports CPython 3.8, 3.9, 3.10, 3.11, 3.12, 3.13, and 3.14.
It distributes amd64/x86_64, aarch64/armv8, arm7, POWER/ppc64le, and s390x
wheels for Linux, amd64 and aarch64 wheels for macOS, and amd64
and i686/x86 wheels for Windows.
orjson does not and will not support PyPy, embedded Python builds for
Android/iOS, or PEP 554 subinterpreters.
Releases follow semantic versioning and serializing a new object type
without an opt-in flag is considered a breaking change.
orjson is licensed under both the Apache 2.0 and MIT licenses. The
repository and issue tracker is
[github.com/ijl/orjson](https://github.com/ijl/orjson), and patches may be
submitted there. There is a
[CHANGELOG](https://github.com/ijl/orjson/blob/master/CHANGELOG.md)
available in the repository.
1. [Usage](https://github.com/ijl/orjson?tab=readme-ov-file#usage)
1. [Install](https://github.com/ijl/orjson?tab=readme-ov-file#install)
2. [Quickstart](https://github.com/ijl/orjson?tab=readme-ov-file#quickstart)
3. [Migrating](https://github.com/ijl/orjson?tab=readme-ov-file#migrating)
4. [Serialize](https://github.com/ijl/orjson?tab=readme-ov-file#serialize)
1. [default](https://github.com/ijl/orjson?tab=readme-ov-file#default)
2. [option](https://github.com/ijl/orjson?tab=readme-ov-file#option)
3. [Fragment](https://github.com/ijl/orjson?tab=readme-ov-file#fragment)
5. [Deserialize](https://github.com/ijl/orjson?tab=readme-ov-file#deserialize)
2. [Types](https://github.com/ijl/orjson?tab=readme-ov-file#types)
1. [dataclass](https://github.com/ijl/orjson?tab=readme-ov-file#dataclass)
2. [datetime](https://github.com/ijl/orjson?tab=readme-ov-file#datetime)
3. [enum](https://github.com/ijl/orjson?tab=readme-ov-file#enum)
4. [float](https://github.com/ijl/orjson?tab=readme-ov-file#float)
5. [int](https://github.com/ijl/orjson?tab=readme-ov-file#int)
6. [numpy](https://github.com/ijl/orjson?tab=readme-ov-file#numpy)
7. [str](https://github.com/ijl/orjson?tab=readme-ov-file#str)
8. [uuid](https://github.com/ijl/orjson?tab=readme-ov-file#uuid)
3. [Testing](https://github.com/ijl/orjson?tab=readme-ov-file#testing)
4. [Performance](https://github.com/ijl/orjson?tab=readme-ov-file#performance)
1. [Latency](https://github.com/ijl/orjson?tab=readme-ov-file#latency)
2. [Memory](https://github.com/ijl/orjson?tab=readme-ov-file#memory)
3. [Reproducing](https://github.com/ijl/orjson?tab=readme-ov-file#reproducing)
5. [Questions](https://github.com/ijl/orjson?tab=readme-ov-file#questions)
6. [Packaging](https://github.com/ijl/orjson?tab=readme-ov-file#packaging)
7. [License](https://github.com/ijl/orjson?tab=readme-ov-file#license)
## Usage
### Install
To install a wheel from PyPI, install the `orjson` package.
In `requirements.in` or `requirements.txt` format, specify:
```txt
orjson>=3.10,<4
```
In Poetry, specify:
```toml
orjson = "^3"
```
To build a wheel, see [packaging](https://github.com/ijl/orjson?tab=readme-ov-file#packaging).
### Quickstart
This is an example of serializing, with options specified, and deserializing:
```python
>>> import orjson, datetime, numpy
>>> data = {
"type": "job",
"created_at": datetime.datetime(1970, 1, 1),
"status": "🆗",
"payload": numpy.array([[1, 2], [3, 4]]),
}
>>> orjson.dumps(data, option=orjson.OPT_NAIVE_UTC | orjson.OPT_SERIALIZE_NUMPY)
b'{"type":"job","created_at":"1970-01-01T00:00:00+00:00","status":"\xf0\x9f\x86\x97","payload":[[1,2],[3,4]]}'
>>> orjson.loads(_)
{'type': 'job', 'created_at': '1970-01-01T00:00:00+00:00', 'status': '🆗', 'payload': [[1, 2], [3, 4]]}
```
### Migrating
orjson version 3 serializes more types than version 2. Subclasses of `str`,
`int`, `dict`, and `list` are now serialized. This is faster and more similar
to the standard library. It can be disabled with
`orjson.OPT_PASSTHROUGH_SUBCLASS`.`dataclasses.dataclass` instances
are now serialized by default and cannot be customized in a
`default` function unless `option=orjson.OPT_PASSTHROUGH_DATACLASS` is
specified. `uuid.UUID` instances are serialized by default.
For any type that is now serialized,
implementations in a `default` function and options enabling them can be
removed but do not need to be. There was no change in deserialization.
To migrate from the standard library, the largest difference is that
`orjson.dumps` returns `bytes` and `json.dumps` returns a `str`. Users with
`dict` objects using non-`str` keys should specify
`option=orjson.OPT_NON_STR_KEYS`. `sort_keys` is replaced by
`option=orjson.OPT_SORT_KEYS`. `indent` is replaced by
`option=orjson.OPT_INDENT_2` and other levels of indentation are not
supported.
### Serialize
```python
def dumps(
__obj: Any,
default: Optional[Callable[[Any], Any]] = ...,
option: Optional[int] = ...,
) -> bytes: ...
```
`dumps()` serializes Python objects to JSON.
It natively serializes
`str`, `dict`, `list`, `tuple`, `int`, `float`, `bool`, `None`,
`dataclasses.dataclass`, `typing.TypedDict`, `datetime.datetime`,
`datetime.date`, `datetime.time`, `uuid.UUID`, `numpy.ndarray`, and
`orjson.Fragment` instances. It supports arbitrary types through `default`. It
serializes subclasses of `str`, `int`, `dict`, `list`,
`dataclasses.dataclass`, and `enum.Enum`. It does not serialize subclasses
of `tuple` to avoid serializing `namedtuple` objects as arrays. To avoid
serializing subclasses, specify the option `orjson.OPT_PASSTHROUGH_SUBCLASS`.
The output is a `bytes` object containing UTF-8.
The global interpreter lock (GIL) is held for the duration of the call.
It raises `JSONEncodeError` on an unsupported type. This exception message
describes the invalid object with the error message
`Type is not JSON serializable: ...`. To fix this, specify
[default](https://github.com/ijl/orjson?tab=readme-ov-file#default).
It raises `JSONEncodeError` on a `str` that contains invalid UTF-8.
It raises `JSONEncodeError` on an integer that exceeds 64 bits by default or,
with `OPT_STRICT_INTEGER`, 53 bits.
It raises `JSONEncodeError` if a `dict` has a key of a type other than `str`,
unless `OPT_NON_STR_KEYS` is specified.
It raises `JSONEncodeError` if the output of `default` recurses to handling by
`default` more than 254 levels deep.
It raises `JSONEncodeError` on circular references.
It raises `JSONEncodeError` if a `tzinfo` on a datetime object is
unsupported.
`JSONEncodeError` is a subclass of `TypeError`. This is for compatibility
with the standard library.
If the failure was caused by an exception in `default` then
`JSONEncodeError` chains the original exception as `__cause__`.
#### default
To serialize a subclass or arbitrary types, specify `default` as a
callable that returns a supported type. `default` may be a function,
lambda, or callable class instance. To specify that a type was not
handled by `default`, raise an exception such as `TypeError`.
```python
>>> import orjson, decimal
>>>
def default(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
raise TypeError
>>> orjson.dumps(decimal.Decimal("0.0842389659712649442845"))
JSONEncodeError: Type is not JSON serializable: decimal.Decimal
>>> orjson.dumps(decimal.Decimal("0.0842389659712649442845"), default=default)
b'"0.0842389659712649442845"'
>>> orjson.dumps({1, 2}, default=default)
orjson.JSONEncodeError: Type is not JSON serializable: set
```
The `default` callable may return an object that itself
must be handled by `default` up to 254 times before an exception
is raised.
It is important that `default` raise an exception if a type cannot be handled.
Python otherwise implicitly returns `None`, which appears to the caller
like a legitimate value and is serialized:
```python
>>> import orjson, json, rapidjson
>>>
def default(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
>>> orjson.dumps({"set":{1, 2}}, default=default)
b'{"set":null}'
>>> json.dumps({"set":{1, 2}}, default=default)
'{"set":null}'
>>> rapidjson.dumps({"set":{1, 2}}, default=default)
'{"set":null}'
```
#### option
To modify how data is serialized, specify `option`. Each `option` is an integer
constant in `orjson`. To specify multiple options, mask them together, e.g.,
`option=orjson.OPT_STRICT_INTEGER | orjson.OPT_NAIVE_UTC`.
##### OPT_APPEND_NEWLINE
Append `\n` to the output. This is a convenience and optimization for the
pattern of `dumps(...) + "\n"`. `bytes` objects are immutable and this
pattern copies the original contents.
```python
>>> import orjson
>>> orjson.dumps([])
b"[]"
>>> orjson.dumps([], option=orjson.OPT_APPEND_NEWLINE)
b"[]\n"
```
##### OPT_INDENT_2
Pretty-print output with an indent of two spaces. This is equivalent to
`indent=2` in the standard library. Pretty printing is slower and the output
larger. orjson is the fastest compared library at pretty printing and has
much less of a slowdown to pretty print than the standard library does. This
option is compatible with all other options.
```python
>>> import orjson
>>> orjson.dumps({"a": "b", "c": {"d": True}, "e": [1, 2]})
b'{"a":"b","c":{"d":true},"e":[1,2]}'
>>> orjson.dumps(
{"a": "b", "c": {"d": True}, "e": [1, 2]},
option=orjson.OPT_INDENT_2
)
b'{\n "a": "b",\n "c": {\n "d": true\n },\n "e": [\n 1,\n 2\n ]\n}'
```
If displayed, the indentation and linebreaks appear like this:
```json
{
"a": "b",
"c": {
"d": true
},
"e": [
1,
2
]
}
```
This measures serializing the github.json fixture as compact (52KiB) or
pretty (64KiB):
| Library | compact (ms) | pretty (ms) | vs. orjson |
|------------|----------------|---------------|--------------|
| orjson | 0.03 | 0.04 | 1 |
| ujson | 0.18 | 0.19 | 4.6 |
| rapidjson | 0.1 | 0.12 | 2.9 |
| simplejson | 0.25 | 0.89 | 21.4 |
| json | 0.18 | 0.71 | 17 |
This measures serializing the citm_catalog.json fixture, more of a worst
case due to the amount of nesting and newlines, as compact (489KiB) or
pretty (1.1MiB):
| Library | compact (ms) | pretty (ms) | vs. orjson |
|------------|----------------|---------------|--------------|
| orjson | 0.59 | 0.71 | 1 |
| ujson | 2.9 | 3.59 | 5 |
| rapidjson | 1.81 | 2.8 | 3.9 |
| simplejson | 10.43 | 42.13 | 59.1 |
| json | 4.16 | 33.42 | 46.9 |
This can be reproduced using the `pyindent` script.
##### OPT_NAIVE_UTC
Serialize `datetime.datetime` objects without a `tzinfo` as UTC. This
has no effect on `datetime.datetime` objects that have `tzinfo` set.
```python
>>> import orjson, datetime
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0),
)
b'"1970-01-01T00:00:00"'
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0),
option=orjson.OPT_NAIVE_UTC,
)
b'"1970-01-01T00:00:00+00:00"'
```
##### OPT_NON_STR_KEYS
Serialize `dict` keys of type other than `str`. This allows `dict` keys
to be one of `str`, `int`, `float`, `bool`, `None`, `datetime.datetime`,
`datetime.date`, `datetime.time`, `enum.Enum`, and `uuid.UUID`. For comparison,
the standard library serializes `str`, `int`, `float`, `bool` or `None` by
default. orjson benchmarks as being faster at serializing non-`str` keys
than other libraries. This option is slower for `str` keys than the default.
```python
>>> import orjson, datetime, uuid
>>> orjson.dumps(
{uuid.UUID("7202d115-7ff3-4c81-a7c1-2a1f067b1ece"): [1, 2, 3]},
option=orjson.OPT_NON_STR_KEYS,
)
b'{"7202d115-7ff3-4c81-a7c1-2a1f067b1ece":[1,2,3]}'
>>> orjson.dumps(
{datetime.datetime(1970, 1, 1, 0, 0, 0): [1, 2, 3]},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_NAIVE_UTC,
)
b'{"1970-01-01T00:00:00+00:00":[1,2,3]}'
```
These types are generally serialized how they would be as
values, e.g., `datetime.datetime` is still an RFC 3339 string and respects
options affecting it. The exception is that `int` serialization does not
respect `OPT_STRICT_INTEGER`.
This option has the risk of creating duplicate keys. This is because non-`str`
objects may serialize to the same `str` as an existing key, e.g.,
`{"1": true, 1: false}`. The last key to be inserted to the `dict` will be
serialized last and a JSON deserializer will presumably take the last
occurrence of a key (in the above, `false`). The first value will be lost.
This option is compatible with `orjson.OPT_SORT_KEYS`. If sorting is used,
note the sort is unstable and will be unpredictable for duplicate keys.
```python
>>> import orjson, datetime
>>> orjson.dumps(
{"other": 1, datetime.date(1970, 1, 5): 2, datetime.date(1970, 1, 3): 3},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS
)
b'{"1970-01-03":3,"1970-01-05":2,"other":1}'
```
This measures serializing 589KiB of JSON comprising a `list` of 100 `dict`
in which each `dict` has both 365 randomly-sorted `int` keys representing epoch
timestamps as well as one `str` key and the value for each key is a
single integer. In "str keys", the keys were converted to `str` before
serialization, and orjson still specifes `option=orjson.OPT_NON_STR_KEYS`
(which is always somewhat slower).
| Library | str keys (ms) | int keys (ms) | int keys sorted (ms) |
|------------|-----------------|-----------------|------------------------|
| orjson | 1.53 | 2.16 | 4.29 |
| ujson | 3.07 | 5.65 | |
| rapidjson | 4.29 | | |
| simplejson | 11.24 | 14.50 | 21.86 |
| json | 7.17 | 8.49 | |
ujson is blank for sorting because it segfaults. json is blank because it
raises `TypeError` on attempting to sort before converting all keys to `str`.
rapidjson is blank because it does not support non-`str` keys. This can
be reproduced using the `pynonstr` script.
##### OPT_OMIT_MICROSECONDS
Do not serialize the `microsecond` field on `datetime.datetime` and
`datetime.time` instances.
```python
>>> import orjson, datetime
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0, 1),
)
b'"1970-01-01T00:00:00.000001"'
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0, 1),
option=orjson.OPT_OMIT_MICROSECONDS,
)
b'"1970-01-01T00:00:00"'
```
##### OPT_PASSTHROUGH_DATACLASS
Passthrough `dataclasses.dataclass` instances to `default`. This allows
customizing their output but is much slower.
```python
>>> import orjson, dataclasses
>>>
@dataclasses.dataclass
class User:
id: str
name: str
password: str
def default(obj):
if isinstance(obj, User):
return {"id": obj.id, "name": obj.name}
raise TypeError
>>> orjson.dumps(User("3b1", "asd", "zxc"))
b'{"id":"3b1","name":"asd","password":"zxc"}'
>>> orjson.dumps(User("3b1", "asd", "zxc"), option=orjson.OPT_PASSTHROUGH_DATACLASS)
TypeError: Type is not JSON serializable: User
>>> orjson.dumps(
User("3b1", "asd", "zxc"),
option=orjson.OPT_PASSTHROUGH_DATACLASS,
default=default,
)
b'{"id":"3b1","name":"asd"}'
```
##### OPT_PASSTHROUGH_DATETIME
Passthrough `datetime.datetime`, `datetime.date`, and `datetime.time` instances
to `default`. This allows serializing datetimes to a custom format, e.g.,
HTTP dates:
```python
>>> import orjson, datetime
>>>
def default(obj):
if isinstance(obj, datetime.datetime):
return obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
raise TypeError
>>> orjson.dumps({"created_at": datetime.datetime(1970, 1, 1)})
b'{"created_at":"1970-01-01T00:00:00"}'
>>> orjson.dumps({"created_at": datetime.datetime(1970, 1, 1)}, option=orjson.OPT_PASSTHROUGH_DATETIME)
TypeError: Type is not JSON serializable: datetime.datetime
>>> orjson.dumps(
{"created_at": datetime.datetime(1970, 1, 1)},
option=orjson.OPT_PASSTHROUGH_DATETIME,
default=default,
)
b'{"created_at":"Thu, 01 Jan 1970 00:00:00 GMT"}'
```
This does not affect datetimes in `dict` keys if using OPT_NON_STR_KEYS.
##### OPT_PASSTHROUGH_SUBCLASS
Passthrough subclasses of builtin types to `default`.
```python
>>> import orjson
>>>
class Secret(str):
pass
def default(obj):
if isinstance(obj, Secret):
return "******"
raise TypeError
>>> orjson.dumps(Secret("zxc"))
b'"zxc"'
>>> orjson.dumps(Secret("zxc"), option=orjson.OPT_PASSTHROUGH_SUBCLASS)
TypeError: Type is not JSON serializable: Secret
>>> orjson.dumps(Secret("zxc"), option=orjson.OPT_PASSTHROUGH_SUBCLASS, default=default)
b'"******"'
```
This does not affect serializing subclasses as `dict` keys if using
OPT_NON_STR_KEYS.
##### OPT_SERIALIZE_DATACLASS
This is deprecated and has no effect in version 3. In version 2 this was
required to serialize `dataclasses.dataclass` instances. For more, see
[dataclass](https://github.com/ijl/orjson?tab=readme-ov-file#dataclass).
##### OPT_SERIALIZE_NUMPY
Serialize `numpy.ndarray` instances. For more, see
[numpy](https://github.com/ijl/orjson?tab=readme-ov-file#numpy).
##### OPT_SERIALIZE_UUID
This is deprecated and has no effect in version 3. In version 2 this was
required to serialize `uuid.UUID` instances. For more, see
[UUID](https://github.com/ijl/orjson?tab=readme-ov-file#UUID).
##### OPT_SORT_KEYS
Serialize `dict` keys in sorted order. The default is to serialize in an
unspecified order. This is equivalent to `sort_keys=True` in the standard
library.
This can be used to ensure the order is deterministic for hashing or tests.
It has a substantial performance penalty and is not recommended in general.
```python
>>> import orjson
>>> orjson.dumps({"b": 1, "c": 2, "a": 3})
b'{"b":1,"c":2,"a":3}'
>>> orjson.dumps({"b": 1, "c": 2, "a": 3}, option=orjson.OPT_SORT_KEYS)
b'{"a":3,"b":1,"c":2}'
```
This measures serializing the twitter.json fixture unsorted and sorted:
| Library | unsorted (ms) | sorted (ms) | vs. orjson |
|------------|-----------------|---------------|--------------|
| orjson | 0.32 | 0.54 | 1 |
| ujson | 1.6 | 2.07 | 3.8 |
| rapidjson | 1.12 | 1.65 | 3.1 |
| simplejson | 2.25 | 3.13 | 5.8 |
| json | 1.78 | 2.32 | 4.3 |
The benchmark can be reproduced using the `pysort` script.
The sorting is not collation/locale-aware:
```python
>>> import orjson
>>> orjson.dumps({"a": 1, "ä": 2, "A": 3}, option=orjson.OPT_SORT_KEYS)
b'{"A":3,"a":1,"\xc3\xa4":2}'
```
This is the same sorting behavior as the standard library, rapidjson,
simplejson, and ujson.
`dataclass` also serialize as maps but this has no effect on them.
##### OPT_STRICT_INTEGER
Enforce 53-bit limit on integers. The limit is otherwise 64 bits, the same as
the Python standard library. For more, see [int](https://github.com/ijl/orjson?tab=readme-ov-file#int).
##### OPT_UTC_Z
Serialize a UTC timezone on `datetime.datetime` instances as `Z` instead
of `+00:00`.
```python
>>> import orjson, datetime, zoneinfo
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=zoneinfo.ZoneInfo("UTC")),
)
b'"1970-01-01T00:00:00+00:00"'
>>> orjson.dumps(
datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=zoneinfo.ZoneInfo("UTC")),
option=orjson.OPT_UTC_Z
)
b'"1970-01-01T00:00:00Z"'
```
#### Fragment
`orjson.Fragment` includes already-serialized JSON in a document. This is an
efficient way to include JSON blobs from a cache, JSONB field, or separately
serialized object without first deserializing to Python objects via `loads()`.
```python
>>> import orjson
>>> orjson.dumps({"key": "zxc", "data": orjson.Fragment(b'{"a": "b", "c": 1}')})
b'{"key":"zxc","data":{"a": "b", "c": 1}}'
```
It does no reformatting: `orjson.OPT_INDENT_2` will not affect a
compact blob nor will a pretty-printed JSON blob be rewritten as compact.
The input must be `bytes` or `str` and given as a positional argument.
This raises `orjson.JSONEncodeError` if a `str` is given and the input is
not valid UTF-8. It otherwise does no validation and it is possible to
write invalid JSON. This does not escape characters. The implementation is
tested to not crash if given invalid strings or invalid JSON.
This is similar to `RawJSON` in rapidjson.
### Deserialize
```python
def loads(__obj: Union[bytes, bytearray, memoryview, str]) -> Any: ...
```
`loads()` deserializes JSON to Python objects. It deserializes to `dict`,
`list`, `int`, `float`, `str`, `bool`, and `None` objects.
`bytes`, `bytearray`, `memoryview`, and `str` input are accepted. If the input
exists as a `memoryview`, `bytearray`, or `bytes` object, it is recommended to
pass these directly rather than creating an unnecessary `str` object. That is,
`orjson.loads(b"{}")` instead of `orjson.loads(b"{}".decode("utf-8"))`. This
has lower memory usage and lower latency.
The input must be valid UTF-8.
orjson maintains a cache of map keys for the duration of the process. This
causes a net reduction in memory usage by avoiding duplicate strings. The
keys must be at most 64 bytes to be cached and 2048 entries are stored.
The global interpreter lock (GIL) is held for the duration of the call.
It raises `JSONDecodeError` if given an invalid type or invalid
JSON. This includes if the input contains `NaN`, `Infinity`, or `-Infinity`,
which the standard library allows, but is not valid JSON.
It raises `JSONDecodeError` if a combination of array or object recurses
1024 levels deep.
`JSONDecodeError` is a subclass of `json.JSONDecodeError` and `ValueError`.
This is for compatibility with the standard library.
## Types
### dataclass
orjson serializes instances of `dataclasses.dataclass` natively. It serializes
instances 40-50x as fast as other libraries and avoids a severe slowdown seen
in other libraries compared to serializing `dict`.
It is supported to pass all variants of dataclasses, including dataclasses
using `__slots__`, frozen dataclasses, those with optional or default
attributes, and subclasses. There is a performance benefit to not
using `__slots__`.
| Library | dict (ms) | dataclass (ms) | vs. orjson |
|------------|-------------|------------------|--------------|
| orjson | 1.40 | 1.60 | 1 |
| ujson | | | |
| rapidjson | 3.64 | 68.48 | 42 |
| simplejson | 14.21 | 92.18 | 57 |
| json | 13.28 | 94.90 | 59 |
This measures serializing 555KiB of JSON, orjson natively and other libraries
using `default` to serialize the output of `dataclasses.asdict()`. This can be
reproduced using the `pydataclass` script.
Dataclasses are serialized as maps, with every attribute serialized and in
the order given on class definition:
```python
>>> import dataclasses, orjson, typing
@dataclasses.dataclass
class Member:
id: int
active: bool = dataclasses.field(default=False)
@dataclasses.dataclass
class Object:
id: int
name: str
members: typing.List[Member]
>>> orjson.dumps(Object(1, "a", [Member(1, True), Member(2)]))
b'{"id":1,"name":"a","members":[{"id":1,"active":true},{"id":2,"active":false}]}'
```
### datetime
orjson serializes `datetime.datetime` objects to
[RFC 3339](https://tools.ietf.org/html/rfc3339) format,
e.g., "1970-01-01T00:00:00+00:00". This is a subset of ISO 8601 and is
compatible with `isoformat()` in the standard library.
```python
>>> import orjson, datetime, zoneinfo
>>> orjson.dumps(
datetime.datetime(2018, 12, 1, 2, 3, 4, 9, tzinfo=zoneinfo.ZoneInfo("Australia/Adelaide"))
)
b'"2018-12-01T02:03:04.000009+10:30"'
>>> orjson.dumps(
datetime.datetime(2100, 9, 1, 21, 55, 2).replace(tzinfo=zoneinfo.ZoneInfo("UTC"))
)
b'"2100-09-01T21:55:02+00:00"'
>>> orjson.dumps(
datetime.datetime(2100, 9, 1, 21, 55, 2)
)
b'"2100-09-01T21:55:02"'
```
`datetime.datetime` supports instances with a `tzinfo` that is `None`,
`datetime.timezone.utc`, a timezone instance from the python3.9+ `zoneinfo`
module, or a timezone instance from the third-party `pendulum`, `pytz`, or
`dateutil`/`arrow` libraries.
It is fastest to use the standard library's `zoneinfo.ZoneInfo` for timezones.
`datetime.time` objects must not have a `tzinfo`.
```python
>>> import orjson, datetime
>>> orjson.dumps(datetime.time(12, 0, 15, 290))
b'"12:00:15.000290"'
```
`datetime.date` objects will always serialize.
```python
>>> import orjson, datetime
>>> orjson.dumps(datetime.date(1900, 1, 2))
b'"1900-01-02"'
```
Errors with `tzinfo` result in `JSONEncodeError` being raised.
To disable serialization of `datetime` objects specify the option
`orjson.OPT_PASSTHROUGH_DATETIME`.
To use "Z" suffix instead of "+00:00" to indicate UTC ("Zulu") time, use the option
`orjson.OPT_UTC_Z`.
To assume datetimes without timezone are UTC, use the option `orjson.OPT_NAIVE_UTC`.
### enum
orjson serializes enums natively. Options apply to their values.
```python
>>> import enum, datetime, orjson
>>>
class DatetimeEnum(enum.Enum):
EPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0)
>>> orjson.dumps(DatetimeEnum.EPOCH)
b'"1970-01-01T00:00:00"'
>>> orjson.dumps(DatetimeEnum.EPOCH, option=orjson.OPT_NAIVE_UTC)
b'"1970-01-01T00:00:00+00:00"'
```
Enums with members that are not supported types can be serialized using
`default`:
```python
>>> import enum, orjson
>>>
class Custom:
def __init__(self, val):
self.val = val
def default(obj):
if isinstance(obj, Custom):
return obj.val
raise TypeError
class CustomEnum(enum.Enum):
ONE = Custom(1)
>>> orjson.dumps(CustomEnum.ONE, default=default)
b'1'
```
### float
orjson serializes and deserializes double precision floats with no loss of
precision and consistent rounding.
`orjson.dumps()` serializes Nan, Infinity, and -Infinity, which are not
compliant JSON, as `null`:
```python
>>> import orjson, ujson, rapidjson, json
>>> orjson.dumps([float("NaN"), float("Infinity"), float("-Infinity")])
b'[null,null,null]'
>>> ujson.dumps([float("NaN"), float("Infinity"), float("-Infinity")])
OverflowError: Invalid Inf value when encoding double
>>> rapidjson.dumps([float("NaN"), float("Infinity"), float("-Infinity")])
'[NaN,Infinity,-Infinity]'
>>> json.dumps([float("NaN"), float("Infinity"), float("-Infinity")])
'[NaN, Infinity, -Infinity]'
```
### int
orjson serializes and deserializes 64-bit integers by default. The range
supported is a signed 64-bit integer's minimum (-9223372036854775807) to
an unsigned 64-bit integer's maximum (18446744073709551615). This
is widely compatible, but there are implementations
that only support 53-bits for integers, e.g.,
web browsers. For those implementations, `dumps()` can be configured to
raise a `JSONEncodeError` on values exceeding the 53-bit range.
```python
>>> import orjson
>>> orjson.dumps(9007199254740992)
b'9007199254740992'
>>> orjson.dumps(9007199254740992, option=orjson.OPT_STRICT_INTEGER)
JSONEncodeError: Integer exceeds 53-bit range
>>> orjson.dumps(-9007199254740992, option=orjson.OPT_STRICT_INTEGER)
JSONEncodeError: Integer exceeds 53-bit range
```
### numpy
orjson natively serializes `numpy.ndarray` and individual
`numpy.float64`, `numpy.float32`, `numpy.float16` (`numpy.half`),
`numpy.int64`, `numpy.int32`, `numpy.int16`, `numpy.int8`,
`numpy.uint64`, `numpy.uint32`, `numpy.uint16`, `numpy.uint8`,
`numpy.uintp`, `numpy.intp`, `numpy.datetime64`, and `numpy.bool`
instances.
orjson is compatible with both numpy v1 and v2.
orjson is faster than all compared libraries at serializing
numpy instances. Serializing numpy data requires specifying
`option=orjson.OPT_SERIALIZE_NUMPY`.
```python
>>> import orjson, numpy
>>> orjson.dumps(
numpy.array([[1, 2, 3], [4, 5, 6]]),
option=orjson.OPT_SERIALIZE_NUMPY,
)
b'[[1,2,3],[4,5,6]]'
```
The array must be a contiguous C array (`C_CONTIGUOUS`) and one of the
supported datatypes.
Note a difference between serializing `numpy.float32` using `ndarray.tolist()`
or `orjson.dumps(..., option=orjson.OPT_SERIALIZE_NUMPY)`: `tolist()` converts
to a `double` before serializing and orjson's native path does not. This
can result in different rounding.
`numpy.datetime64` instances are serialized as RFC 3339 strings and
datetime options affect them.
```python
>>> import orjson, numpy
>>> orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=orjson.OPT_SERIALIZE_NUMPY,
)
b'"2021-01-01T00:00:00.172000"'
>>> orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=(
orjson.OPT_SERIALIZE_NUMPY |
orjson.OPT_NAIVE_UTC |
orjson.OPT_OMIT_MICROSECONDS
),
)
b'"2021-01-01T00:00:00+00:00"'
```
If an array is not a contiguous C array, contains an unsupported datatype,
or contains a `numpy.datetime64` using an unsupported representation
(e.g., picoseconds), orjson falls through to `default`. In `default`,
`obj.tolist()` can be specified.
If an array is not in the native endianness, e.g., an array of big-endian values
on a little-endian system, `orjson.JSONEncodeError` is raised.
If an array is malformed, `orjson.JSONEncodeError` is raised.
This measures serializing 92MiB of JSON from an `numpy.ndarray` with
dimensions of `(50000, 100)` and `numpy.float64` values:
| Library | Latency (ms) | RSS diff (MiB) | vs. orjson |
|------------|----------------|------------------|--------------|
| orjson | 194 | 99 | 1.0 |
| ujson | | | |
| rapidjson | 3,048 | 309 | 15.7 |
| simplejson | 3,023 | 297 | 15.6 |
| json | 3,133 | 297 | 16.1 |
This measures serializing 100MiB of JSON from an `numpy.ndarray` with
dimensions of `(100000, 100)` and `numpy.int32` values:
| Library | Latency (ms) | RSS diff (MiB) | vs. orjson |
|------------|----------------|------------------|--------------|
| orjson | 178 | 115 | 1.0 |
| ujson | | | |
| rapidjson | 1,512 | 551 | 8.5 |
| simplejson | 1,606 | 504 | 9.0 |
| json | 1,506 | 503 | 8.4 |
This measures serializing 105MiB of JSON from an `numpy.ndarray` with
dimensions of `(100000, 200)` and `numpy.bool` values:
| Library | Latency (ms) | RSS diff (MiB) | vs. orjson |
|------------|----------------|------------------|--------------|
| orjson | 157 | 120 | 1.0 |
| ujson | | | |
| rapidjson | 710 | 327 | 4.5 |
| simplejson | 931 | 398 | 5.9 |
| json | 996 | 400 | 6.3 |
In these benchmarks, orjson serializes natively, ujson is blank because it
does not support a `default` parameter, and the other libraries serialize
`ndarray.tolist()` via `default`. The RSS column measures peak memory
usage during serialization. This can be reproduced using the `pynumpy` script.
orjson does not have an installation or compilation dependency on numpy. The
implementation is independent, reading `numpy.ndarray` using
`PyArrayInterface`.
### str
orjson is strict about UTF-8 conformance. This is stricter than the standard
library's json module, which will serialize and deserialize UTF-16 surrogates,
e.g., "\ud800", that are invalid UTF-8.
If `orjson.dumps()` is given a `str` that does not contain valid UTF-8,
`orjson.JSONEncodeError` is raised. If `loads()` receives invalid UTF-8,
`orjson.JSONDecodeError` is raised.
orjson and rapidjson are the only compared JSON libraries to consistently
error on bad input.
```python
>>> import orjson, ujson, rapidjson, json
>>> orjson.dumps('\ud800')
JSONEncodeError: str is not valid UTF-8: surrogates not allowed
>>> ujson.dumps('\ud800')
UnicodeEncodeError: 'utf-8' codec ...
>>> rapidjson.dumps('\ud800')
UnicodeEncodeError: 'utf-8' codec ...
>>> json.dumps('\ud800')
'"\\ud800"'
>>> orjson.loads('"\\ud800"')
JSONDecodeError: unexpected end of hex escape at line 1 column 8: line 1 column 1 (char 0)
>>> ujson.loads('"\\ud800"')
''
>>> rapidjson.loads('"\\ud800"')
ValueError: Parse error at offset 1: The surrogate pair in string is invalid.
>>> json.loads('"\\ud800"')
'\ud800'
```
To make a best effort at deserializing bad input, first decode `bytes` using
the `replace` or `lossy` argument for `errors`:
```python
>>> import orjson
>>> orjson.loads(b'"\xed\xa0\x80"')
JSONDecodeError: str is not valid UTF-8: surrogates not allowed
>>> orjson.loads(b'"\xed\xa0\x80"'.decode("utf-8", "replace"))
'���'
```
### uuid
orjson serializes `uuid.UUID` instances to
[RFC 4122](https://tools.ietf.org/html/rfc4122) format, e.g.,
"f81d4fae-7dec-11d0-a765-00a0c91e6bf6".
``` python
>>> import orjson, uuid
>>> orjson.dumps(uuid.UUID('f81d4fae-7dec-11d0-a765-00a0c91e6bf6'))
b'"f81d4fae-7dec-11d0-a765-00a0c91e6bf6"'
>>> orjson.dumps(uuid.uuid5(uuid.NAMESPACE_DNS, "python.org"))
b'"886313e1-3b8a-5372-9b90-0c9aee199e5d"'
```
## Testing
The library has comprehensive tests. There are tests against fixtures in the
[JSONTestSuite](https://github.com/nst/JSONTestSuite) and
[nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark)
repositories. It is tested to not crash against the
[Big List of Naughty Strings](https://github.com/minimaxir/big-list-of-naughty-strings).
It is tested to not leak memory. It is tested to not crash
against and not accept invalid UTF-8. There are integration tests
exercising the library's use in web servers (gunicorn using multiprocess/forked
workers) and when
multithreaded. It also uses some tests from the ultrajson library.
orjson is the most correct of the compared libraries. This graph shows how each
library handles a combined 342 JSON fixtures from the
[JSONTestSuite](https://github.com/nst/JSONTestSuite) and
[nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark) tests:
| Library | Invalid JSON documents not rejected | Valid JSON documents not deserialized |
|------------|---------------------------------------|-----------------------------------------|
| orjson | 0 | 0 |
| ujson | 31 | 0 |
| rapidjson | 6 | 0 |
| simplejson | 10 | 0 |
| json | 17 | 0 |
This shows that all libraries deserialize valid JSON but only orjson
correctly rejects the given invalid JSON fixtures. Errors are largely due to
accepting invalid strings and numbers.
The graph above can be reproduced using the `pycorrectness` script.
## Performance
Serialization and deserialization performance of orjson is better than
ultrajson, rapidjson, simplejson, or json. The benchmarks are done on
fixtures of real data:
* twitter.json, 631.5KiB, results of a search on Twitter for "一", containing
CJK strings, dictionaries of strings and arrays of dictionaries, indented.
* github.json, 55.8KiB, a GitHub activity feed, containing dictionaries of
strings and arrays of dictionaries, not indented.
* citm_catalog.json, 1.7MiB, concert data, containing nested dictionaries of
strings and arrays of integers, indented.
* canada.json, 2.2MiB, coordinates of the Canadian border in GeoJSON
format, containing floats and arrays, indented.
### Latency


#### twitter.json serialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 0.1 | 8377 | 1 |
| ujson | 0.9 | 1088 | 7.3 |
| rapidjson | 0.8 | 1228 | 6.8 |
| simplejson | 1.9 | 531 | 15.6 |
| json | 1.4 | 744 | 11.3 |
#### twitter.json deserialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 0.6 | 1811 | 1 |
| ujson | 1.2 | 814 | 2.1 |
| rapidjson | 2.1 | 476 | 3.8 |
| simplejson | 1.6 | 626 | 3 |
| json | 1.8 | 557 | 3.3 |
#### github.json serialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 0.01 | 104424 | 1 |
| ujson | 0.09 | 10594 | 9.8 |
| rapidjson | 0.07 | 13667 | 7.6 |
| simplejson | 0.2 | 5051 | 20.6 |
| json | 0.14 | 7133 | 14.6 |
#### github.json deserialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 0.05 | 20069 | 1 |
| ujson | 0.11 | 8913 | 2.3 |
| rapidjson | 0.13 | 8077 | 2.6 |
| simplejson | 0.11 | 9342 | 2.1 |
| json | 0.11 | 9291 | 2.2 |
#### citm_catalog.json serialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 0.3 | 3757 | 1 |
| ujson | 1.7 | 598 | 6.3 |
| rapidjson | 1.3 | 768 | 4.9 |
| simplejson | 8.3 | 120 | 31.1 |
| json | 3 | 331 | 11.3 |
#### citm_catalog.json deserialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 1.4 | 730 | 1 |
| ujson | 2.6 | 384 | 1.9 |
| rapidjson | 4 | 246 | 3 |
| simplejson | 3.7 | 271 | 2.7 |
| json | 3.7 | 267 | 2.7 |
#### canada.json serialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 2.4 | 410 | 1 |
| ujson | 9.6 | 104 | 3.9 |
| rapidjson | 28.7 | 34 | 11.8 |
| simplejson | 49.3 | 20 | 20.3 |
| json | 30.6 | 32 | 12.6 |
#### canada.json deserialization
| Library | Median latency (milliseconds) | Operations per second | Relative (latency) |
|------------|---------------------------------|-------------------------|----------------------|
| orjson | 3 | 336 | 1 |
| ujson | 7.1 | 141 | 2.4 |
| rapidjson | 20.1 | 49 | 6.7 |
| simplejson | 16.8 | 59 | 5.6 |
| json | 18.2 | 55 | 6.1 |
### Memory
orjson as of 3.7.0 has higher baseline memory usage than other libraries
due to a persistent buffer used for parsing. Incremental memory usage when
deserializing is similar to the standard library and other third-party
libraries.
This measures, in the first column, RSS after importing a library and reading
the fixture, and in the second column, increases in RSS after repeatedly
calling `loads()` on the fixture.
#### twitter.json
| Library | import, read() RSS (MiB) | loads() increase in RSS (MiB) |
|------------|----------------------------|---------------------------------|
| orjson | 15.7 | 3.4 |
| ujson | 16.4 | 3.4 |
| rapidjson | 16.6 | 4.4 |
| simplejson | 14.5 | 1.8 |
| json | 13.9 | 1.8 |
#### github.json
| Library | import, read() RSS (MiB) | loads() increase in RSS (MiB) |
|------------|----------------------------|---------------------------------|
| orjson | 15.2 | 0.4 |
| ujson | 15.4 | 0.4 |
| rapidjson | 15.7 | 0.5 |
| simplejson | 13.7 | 0.2 |
| json | 13.3 | 0.1 |
#### citm_catalog.json
| Library | import, read() RSS (MiB) | loads() increase in RSS (MiB) |
|------------|----------------------------|---------------------------------|
| orjson | 16.8 | 10.1 |
| ujson | 17.3 | 10.2 |
| rapidjson | 17.6 | 28.7 |
| simplejson | 15.8 | 30.1 |
| json | 14.8 | 20.5 |
#### canada.json
| Library | import, read() RSS (MiB) | loads() increase in RSS (MiB) |
|------------|----------------------------|---------------------------------|
| orjson | 17.2 | 22.1 |
| ujson | 17.4 | 18.3 |
| rapidjson | 18 | 23.5 |
| simplejson | 15.7 | 21.4 |
| json | 15.4 | 20.4 |
### Reproducing
The above was measured using Python 3.11.9 on Linux (amd64) with
orjson 3.10.6, ujson 5.10.0, python-rapidson 1.18, and simplejson 3.19.2.
The latency results can be reproduced using the `pybench` and `graph`
scripts. The memory results can be reproduced using the `pymem` script.
## Questions
### Why can't I install it from PyPI?
Probably `pip` needs to be upgraded to version 20.3 or later to support
the latest manylinux_x_y or universal2 wheel formats.
### "Cargo, the Rust package manager, is not installed or is not on PATH."
This happens when there are no binary wheels (like manylinux) for your
platform on PyPI. You can install [Rust](https://www.rust-lang.org/) through
`rustup` or a package manager and then it will compile.
### Will it deserialize to dataclasses, UUIDs, decimals, etc or support object_hook?
No. This requires a schema specifying what types are expected and how to
handle errors etc. This is addressed by data validation libraries a
level above this.
### Will it serialize to `str`?
No. `bytes` is the correct type for a serialized blob.
### Will it support NDJSON or JSONL?
No. [orjsonl](https://github.com/umarbutler/orjsonl) may be appropriate.
### Will it support JSON5 or RJSON?
No, it supports RFC 8259.
## Packaging
To package orjson requires at least [Rust](https://www.rust-lang.org/) 1.72
and the [maturin](https://github.com/PyO3/maturin) build tool. The recommended
build command is:
```sh
maturin build --release --strip
```
It benefits from also having a C build environment to compile a faster
deserialization backend. See this project's `manylinux_2_28` builds for an
example using clang and LTO.
The project's own CI tests against `nightly-2024-09-25` and stable 1.72. It
is prudent to pin the nightly version because that channel can introduce
breaking changes.
orjson is tested for amd64 on Linux and cross-compiles for aarch64, arm7,
ppc64le, and s390x. It is tested for either aarch64 or amd64 on macOS and
cross-compiles for the other, depending on version. For Windows it is
tested on amd64 and i686.
There are no runtime dependencies other than libc.
The source distribution on PyPI contains all dependencies' source and can be
built without network access. The file can be downloaded from
`https://files.pythonhosted.org/packages/source/o/orjson/orjson-${version}.tar.gz`.
orjson's tests are included in the source distribution on PyPI. The
requirements to run the tests are specified in `test/requirements.txt`. The
tests should be run as part of the build. It can be run with
`pytest -q test`.
## License
orjson was written by ijl <<ijl@mailbox.org>>, copyright 2018 - 2024, available
to you under either the Apache 2 license or MIT license at your choice.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/LICENSE-MIT | Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/pyproject.toml | [project]
name = "orjson"
repository = "https://github.com/ijl/orjson"
requires-python = ">=3.8"
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python",
"Programming Language :: Rust",
"Typing :: Typed",
]
[project.urls]
Documentation = "https://github.com/ijl/orjson"
Changelog = "https://github.com/ijl/orjson/blob/master/CHANGELOG.md"
[build-system]
build-backend = "maturin"
requires = ["maturin>=1,<2"]
[tool.maturin]
python-source = "pysrc"
include = [
{ format = "sdist", path = ".cargo/*" },
{ format = "sdist", path = "build.rs" },
{ format = "sdist", path = "Cargo.lock" },
{ format = "sdist", path = "include/**/*" },
]
[tool.ruff]
line-length = 88
target-version = "py38"
[tool.ruff.lint]
select = [
"I",
]
ignore = [
"E501", # line too long
"F601", # Dictionary key literal ... repeated
]
[tool.ruff.lint.isort]
known-first-party = ["orjson"]
[tool.mypy]
python_version = "3.8"
[[tool.mypy.overrides]]
module = ["dateutil", "pytz", "simplejson", "ujson"]
ignore_missing_imports = true
|
0 | lc_public_repos/langsmith-sdk/vendor | lc_public_repos/langsmith-sdk/vendor/orjson/Cargo.lock | # This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "associative-cache"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b993cd767a2bc7307dd87622311ca22c44329cc7a21366206bfa0896827b2bad"
[[package]]
name = "bytecount"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce"
[[package]]
name = "castaway"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5"
dependencies = [
"rustversion",
]
[[package]]
name = "cc"
version = "1.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "compact_str"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6050c3a16ddab2e412160b31f2c871015704239bca62f72f6e5f0be631d3f644"
dependencies = [
"castaway",
"cfg-if",
"itoa",
"rustversion",
"ryu",
"serde",
"static_assertions",
]
[[package]]
name = "crunchy"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
name = "encoding_rs"
version = "0.8.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
dependencies = [
"cfg-if",
]
[[package]]
name = "gimli"
version = "0.30.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9"
[[package]]
name = "half"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "itoa"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
dependencies = [
"no-panic",
]
[[package]]
name = "itoap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8"
[[package]]
name = "jiff"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d9d414fc817d3e3d62b2598616733f76c4cc74fbac96069674739b881295c8"
[[package]]
name = "libc"
version = "0.2.161"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "no-panic"
version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8540b7d99a20166178b42a05776aef900cdbfec397f861dfc7819bf1d7760b3d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "once_cell"
version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "orjson"
version = "3.10.11"
dependencies = [
"associative-cache",
"bytecount",
"cc",
"compact_str",
"encoding_rs",
"half",
"itoa",
"itoap",
"jiff",
"once_cell",
"pyo3-build-config",
"pyo3-ffi",
"ryu",
"serde",
"serde_json",
"simdutf8",
"smallvec",
"unwinding",
"uuid",
"version_check",
"xxhash-rust",
]
[[package]]
name = "proc-macro2"
version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3-build-config"
version = "0.23.0-dev"
dependencies = [
"once_cell",
"target-lexicon",
]
[[package]]
name = "pyo3-ffi"
version = "0.23.0-dev"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "quote"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rustversion"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
[[package]]
name = "ryu"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
dependencies = [
"no-panic",
]
[[package]]
name = "serde"
version = "1.0.214"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.214"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "simdutf8"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "syn"
version = "2.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "target-lexicon"
version = "0.12.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
[[package]]
name = "unicode-ident"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
[[package]]
name = "unwinding"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc55842d0db6329a669d55a623c674b02d677b16bfb2d24857d4089d41eba882"
dependencies = [
"gimli",
]
[[package]]
name = "uuid"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
[[package]]
name = "version_check"
version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "xxhash-rust"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a5cbf750400958819fb6178eaa83bee5cd9c29a26a40cc241df8c70fdd46984"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_fake.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import random
import pytest
import orjson
try:
from faker import Faker
except ImportError:
Faker = None # type: ignore
NUM_LOOPS = 10
NUM_SHUFFLES = 10
NUM_ENTRIES = 250
FAKER_LOCALES = [
"ar_AA",
"fi_FI",
"fil_PH",
"he_IL",
"ja_JP",
"th_TH",
"tr_TR",
"uk_UA",
"vi_VN",
]
class TestFaker:
@pytest.mark.skipif(Faker is None, reason="faker not available")
def test_faker(self):
fake = Faker(FAKER_LOCALES)
profile_keys = list(
set(fake.profile().keys()) - {"birthdate", "current_location"}
)
for _ in range(0, NUM_LOOPS):
data = [
{
"person": fake.profile(profile_keys),
"emoji": fake.emoji(),
"text": fake.paragraphs(),
}
for _ in range(0, NUM_ENTRIES)
]
for _ in range(0, NUM_SHUFFLES):
random.shuffle(data)
output = orjson.dumps(data)
assert orjson.loads(output) == data
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_circular.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
class TestCircular:
def test_circular_dict(self):
"""
dumps() circular reference dict
"""
obj = {} # type: ignore
obj["obj"] = obj
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
def test_circular_dict_sort_keys(self):
"""
dumps() circular reference dict OPT_SORT_KEYS
"""
obj = {} # type: ignore
obj["obj"] = obj
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
def test_circular_dict_non_str_keys(self):
"""
dumps() circular reference dict OPT_NON_STR_KEYS
"""
obj = {} # type: ignore
obj["obj"] = obj
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS)
def test_circular_list(self):
"""
dumps() circular reference list
"""
obj = [] # type: ignore
obj.append(obj) # type: ignore
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
def test_circular_nested(self):
"""
dumps() circular reference nested dict, list
"""
obj = {} # type: ignore
obj["list"] = [{"obj": obj}]
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
def test_circular_nested_sort_keys(self):
"""
dumps() circular reference nested dict, list OPT_SORT_KEYS
"""
obj = {} # type: ignore
obj["list"] = [{"obj": obj}]
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
def test_circular_nested_non_str_keys(self):
"""
dumps() circular reference nested dict, list OPT_NON_STR_KEYS
"""
obj = {} # type: ignore
obj["list"] = [{"obj": obj}]
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_sort_keys.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
from .util import read_fixture_obj
class TestDictSortKeys:
# citm_catalog is already sorted
def test_twitter_sorted(self):
"""
twitter.json sorted
"""
obj = read_fixture_obj("twitter.json.xz")
assert list(obj.keys()) != sorted(list(obj.keys()))
serialized = orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
val = orjson.loads(serialized)
assert list(val.keys()) == sorted(list(val.keys()))
def test_canada_sorted(self):
"""
canada.json sorted
"""
obj = read_fixture_obj("canada.json.xz")
assert list(obj.keys()) != sorted(list(obj.keys()))
serialized = orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
val = orjson.loads(serialized)
assert list(val.keys()) == sorted(list(val.keys()))
def test_github_sorted(self):
"""
github.json sorted
"""
obj = read_fixture_obj("github.json.xz")
for each in obj:
assert list(each.keys()) != sorted(list(each.keys()))
serialized = orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
val = orjson.loads(serialized)
for each in val:
assert list(each.keys()) == sorted(list(each.keys()))
def test_utf8_sorted(self):
"""
UTF-8 sorted
"""
obj = {"a": 1, "ä": 2, "A": 3}
assert list(obj.keys()) != sorted(list(obj.keys()))
serialized = orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
val = orjson.loads(serialized)
assert list(val.keys()) == sorted(list(val.keys()))
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_issue221.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
@pytest.mark.parametrize(
"input",
[
b'"\xc8\x93',
b'"\xc8',
],
)
def test_invalid(input):
with pytest.raises(orjson.JSONDecodeError):
orjson.loads(input)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_issue331.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import orjson
from .util import read_fixture_bytes
FIXTURE_ISSUE_335 = {
"pfkrpavmb": "maxyjzmvacdwjfiifmzwbztjmnqdsjesykpf",
"obtsdcnmi": "psyucdnwjr",
"ghsccsccdwep": 1673954411550,
"vyqvkq": "ilfcrjas",
"drfobem": {
"mzqwuvwsglxx": 1673954411550,
"oup": "mmimyli",
"pxfepg": {
"pnqjr": "ylttscz",
"rahfmy": "xrcsutu",
"rccgrkom": "fbt",
"xulnoryigkhtoybq": "hubxdjrnaq",
"vdwriwvlgu": datetime.datetime(
2023, 1, 15, 15, 23, 38, 686000, tzinfo=datetime.timezone.utc
),
"fhmjsszqmxwfruiq": "fzghfrbjxqccf",
"dyiurstuzhu": None,
"tdovgfimofmclc": datetime.datetime(
2023, 1, 15, 15, 23, 38, 686000, tzinfo=datetime.timezone.utc
),
"iyxkgbwxdlrdc": datetime.datetime(
2023, 1, 17, 11, 19, 55, 761000, tzinfo=datetime.timezone.utc
),
"jnjtckehsrtwhgzuhksmclk": ["tlejijcpbjzygepptbxgrugcbufncyupnivbljzhxe"],
"zewoojzsiykjf": datetime.datetime(
2023, 1, 17, 11, 17, 46, 140960, tzinfo=datetime.timezone.utc
),
"muzabbfnxptvqwzbeilkz": False,
"wdiuepootdqyniogblxgwkgcqezutcesb": None,
"lzkthufcerqnxdypdts": datetime.datetime(
2023, 1, 17, 11, 19, 56, 73000, tzinfo=datetime.timezone.utc
),
"epukgzafaubmn": 50000.0,
"cdpeessdedncodoajdqsos": 50000.0,
"adxucexfjgfwxo": "jwuoomwdrfklgt",
"sotxdizdpuunbssidop": None,
"lxmgvysiltbzfkjne": None,
"wyeaarjbilfmjbfzjuzv": None,
"cwlcgx": -1272.22,
"oniptvyaub": -1275.75,
"hqsfeelokxlwnha": datetime.datetime(
2023, 1, 17, 11, 19, 55, 886000, tzinfo=datetime.timezone.utc
),
"nuidlcyrxcrkyytgrnmc": -733.5,
"wmofdeftonjcdnkg": -737.03,
"bnsttxjfxxgxphfiguqew": datetime.datetime(
2023, 1, 17, 11, 19, 55, 886000, tzinfo=datetime.timezone.utc
),
"audhoqqxjliwnsqttwsadmwwv": -737.03,
"badwwjzugwtdkbsamckoljfrrumtrt": datetime.datetime(
2023, 1, 17, 11, 19, 55, 886000, tzinfo=datetime.timezone.utc
),
"zlbggbbjgsugkgkqjycxwdx": -1241.28,
"fxueeffryeafcxtkfzdmlmgu": -538.72,
"yjmapfqummrsyujkosmixumjgfkwd": datetime.datetime(
2023, 1, 16, 22, 59, 59, 999999, tzinfo=datetime.timezone.utc
),
"qepdxlodjetleseyminybdvitcgd": None,
"ltokvpltajwbn": datetime.date(2023, 1, 17),
"ifzhionnrpeoorsupiniwbljek": datetime.datetime(
2023, 1, 17, 11, 19, 49, 113000, tzinfo=datetime.timezone.utc
),
"ljmmehacdawrlbhlhthm": -1241.28,
"jnwffrtloedorwctsclshnpwjq": -702.56,
"yhgssmtrmrcqhsdaekvoxyv": None,
"nfzljididdzkofkrjfxdloygjxfhhoe": None,
"mpctjlifbrgugaugiijj": None,
"ckknohnsefzknbvnmwzlxlajsckl": None,
"rfehqmeeslkcfbptrrghvivcrx": None,
"nqeovbshknctkgkcytzbhfuvpcyamfrafi": None,
"lptomdhvkvjnegsanzshqecas": 0,
"vkbijuitbghlywkeojjf": None,
"hzzmtggbqdglch": "xgehztikx",
"yhmplqyhbndcfdafjvvr": False,
"oucaxvjhjapayexuqwvnnls": None,
"xbnagbhttfloffstxyr": 1673954411.5502248,
"eiqrshvbjmlyzqle": {
"dkayiglkkhfrvbliqy": ["ohjuifj"],
"grqcjzqdiaslqaxhcqg": ["fuuxwsu"],
},
"uflenvgkk": {
"ehycwsz": {
"jeikui": "noxawd",
"gkrefq": "hfonlfp",
"xkxs": "jzt",
"ztpmv": "mpscuot",
"zagmfzmgh": "pdculhh",
"jgzsrpukwqoln": 100000.0,
"vlqzkxbwc": datetime.datetime(
2023,
1,
17,
11,
19,
50,
867000,
tzinfo=datetime.timezone.utc,
),
"cchovdmelbchcgvtg": -30.94,
"xvznnjfpwtdujqrh": 0.92059,
"tmsqwiiopyhlcovcxhojuzzyac": 1.0862009,
"tfzkaimjrpsbeswnrxeo": 0.0,
"isqjxmjupeiboufeaavkdj": -9.76,
"ywjqjiasfuifyqmz": 0.0,
"uvtlmdrk": 0.92028,
"dquzguej": None,
"guudreveynvhvhihegoybqrmejkj": datetime.datetime(
2023, 1, 17, 11, 19, 56, 73000, tzinfo=datetime.timezone.utc
),
"agvnijfztpbpatej": "zym",
"mqsozcvnuvueixszfz": [
{
"oepzcayabl": "givcnhztbdmili",
"rhhaorqbiziqvyhglecqw": True,
"paxvrmateisxfqs": 1.0862009,
"bydrnmhvj": {
"kwqlickvqv": "beinfgmofalgytujorwxqfvlxtbeujmqwrdqzkfpul",
"cxdikf": "dfpbnpe",
"dnnhiy": "reeenz",
"tx": datetime.datetime(
2023,
1,
17,
11,
19,
56,
73000,
tzinfo=datetime.timezone.utc,
),
"tck": datetime.date(2023, 1, 17),
"nvt": 0.92064,
"enc": 0.92059,
"icginezbybhcs": 1673954396073,
"gfamgxmknxirghgmtxl": 1673954411.5492423,
},
}
],
"dqiabsyky": {
"hxzdtwunrr": "fozhshbmijhujcznqykxtlaxfbtdpzvwvjtyuqzlyw",
"tmpscl": "tbivvoa",
"vjjjvl": "arukeb",
"fm": datetime.datetime(
2023,
1,
17,
11,
19,
56,
73000,
tzinfo=datetime.timezone.utc,
),
"rjq": datetime.date(2023, 1, 17),
"oax": 0.92064,
"gdv": 0.92059,
"vousomtllbpsh": 1673954396073,
"pgiblyqswxvwkpmpyay": 1673954411.5492423,
},
"gebil": [
{
"bzrjh": 0.92065,
"izmljcvqinm": 3.25,
"legczrbxlrmcep": None,
}
],
"eqg": [
{
"yngp": "kako",
"udntq": {
"wzygahsmwd": "hplammnltegchpaorxaremhymtqtxdpfzzoyouimnw",
"iofcbwwgu": datetime.datetime(
2023,
1,
17,
11,
19,
50,
867000,
tzinfo=datetime.timezone.utc,
),
"nengib": "zpyilz",
"sorpcw": "ixhzipg",
"kruw": "taq",
"vaqaj": "kravspj",
"omkjhzkxp": "watatag",
"ckwtjcqkjxmdn": 100000.0,
"kpjtgiuhfqx": 3.25,
"upkgqboyyg": 0.92065,
"gkshzyqtpmolnybr": 0.92065,
"oeiueaildnobcyzzpqwjwivkgj": 1.0861891,
"hiheqtjxyjnweryve": 0.0,
"wntcyohtaeylkylp": 0.0,
"jmebuufukzzymohzynpxzp": -9.76,
"rblubytyjuvbeurwrqmz": 0.0,
"xpscrgcnratymu": None,
},
}
],
"kpmgmiqgmswzebawzciss": -0.7,
"ktggnjdemtfxhnultritqokgbjucktdiooic": 0.92058,
"oawdfonaymcwwvmszhdlemjcnb": datetime.datetime(
2023,
1,
17,
11,
19,
55,
886000,
tzinfo=datetime.timezone.utc,
),
"bwfkzqjqqjdgbfbbjwoxhweihipy": "lzvn",
"feslxjpurrukajwellwjqww": 0.0,
"ptuysyuuhwkfqlugjlxkohwanzijtzknupfikp": None,
"gquuleqhpsbyiluhijdddreenggl": datetime.datetime(
2023,
1,
17,
11,
19,
50,
867000,
tzinfo=datetime.timezone.utc,
),
"auhxrvhvvtszkkkpyhbhvpjlypjoyz": "vqdxfdvgxqcu",
}
},
},
"qbov": "vylhkevwf",
"uidiyv": {
"qkyoj": {
"cclzxqbosqmj": 1673954395761,
"rzijfrywwwcr": 1,
"toujesmzk": "afnu",
"aqmpunnlt": "nyreokscjljpfcrrstxgvwddphymgzkvuolbigqhla",
"ofrjrk": "rlffwrw",
"legyfjl": {
"byalenyqro": "tbzhyxo",
"qxrtujt": 0.92028,
"onmhbvy": 0,
"cbhmp": "vqrkzbg",
},
}
},
},
}
def test_issue331_1_pretty():
as_bytes = read_fixture_bytes("issue331_1.json.xz")
as_obj = orjson.loads(as_bytes)
for _ in range(1000):
assert orjson.loads(orjson.dumps(as_obj, option=orjson.OPT_INDENT_2)) == as_obj
def test_issue331_1_compact():
as_bytes = read_fixture_bytes("issue331_1.json.xz")
as_obj = orjson.loads(as_bytes)
for _ in range(1000):
assert orjson.loads(orjson.dumps(as_obj)) == as_obj
def test_issue331_2_pretty():
as_bytes = read_fixture_bytes("issue331_2.json.xz")
as_obj = orjson.loads(as_bytes)
for _ in range(1000):
assert orjson.loads(orjson.dumps(as_obj, option=orjson.OPT_INDENT_2)) == as_obj
def test_issue331_2_compact():
as_bytes = read_fixture_bytes("issue331_2.json.xz")
as_obj = orjson.loads(as_bytes)
for _ in range(1000):
assert orjson.loads(orjson.dumps(as_obj)) == as_obj
def test_issue335_compact():
for _ in range(1000):
assert orjson.dumps(FIXTURE_ISSUE_335)
def test_issue335_pretty():
for _ in range(1000):
assert orjson.dumps(FIXTURE_ISSUE_335, option=orjson.OPT_INDENT_2)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/util.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import lzma
import os
from pathlib import Path
from typing import Any, Dict
import orjson
dirname = os.path.join(os.path.dirname(__file__), "../data")
STR_CACHE: Dict[str, str] = {}
OBJ_CACHE: Dict[str, Any] = {}
def read_fixture_bytes(filename, subdir=None):
if subdir is None:
path = Path(dirname, filename)
else:
path = Path(dirname, subdir, filename)
if path.suffix == ".xz":
contents = lzma.decompress(path.read_bytes())
else:
contents = path.read_bytes()
return contents
def read_fixture_str(filename, subdir=None):
if filename not in STR_CACHE:
STR_CACHE[filename] = read_fixture_bytes(filename, subdir).decode("utf-8")
return STR_CACHE[filename]
def read_fixture_obj(filename):
if filename not in OBJ_CACHE:
OBJ_CACHE[filename] = orjson.loads(read_fixture_str(filename))
return OBJ_CACHE[filename]
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_fragment.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
try:
import pandas
except ImportError:
pandas = None # type: ignore
from .util import read_fixture_bytes
class TestFragment:
def test_fragment_fragment_eq(self):
assert orjson.Fragment(b"{}") != orjson.Fragment(b"{}")
def test_fragment_fragment_not_mut(self):
fragment = orjson.Fragment(b"{}")
with pytest.raises(AttributeError):
fragment.contents = b"[]"
assert orjson.dumps(fragment) == b"{}"
def test_fragment_repr(self):
assert repr(orjson.Fragment(b"{}")).startswith("<orjson.Fragment object at ")
def test_fragment_fragment_bytes(self):
assert orjson.dumps(orjson.Fragment(b"{}")) == b"{}"
assert orjson.dumps(orjson.Fragment(b"[]")) == b"[]"
assert orjson.dumps([orjson.Fragment(b"{}")]) == b"[{}]"
assert orjson.dumps([orjson.Fragment(b'{}"a\\')]) == b'[{}"a\\]'
def test_fragment_fragment_str(self):
assert orjson.dumps(orjson.Fragment("{}")) == b"{}"
assert orjson.dumps(orjson.Fragment("[]")) == b"[]"
assert orjson.dumps([orjson.Fragment("{}")]) == b"[{}]"
assert orjson.dumps([orjson.Fragment('{}"a\\')]) == b'[{}"a\\]'
def test_fragment_fragment_str_empty(self):
assert orjson.dumps(orjson.Fragment("")) == b""
def test_fragment_fragment_str_str(self):
assert orjson.dumps(orjson.Fragment('"str"')) == b'"str"'
def test_fragment_fragment_str_emoji(self):
assert orjson.dumps(orjson.Fragment('"🐈"')) == b'"\xf0\x9f\x90\x88"'
def test_fragment_fragment_str_array(self):
n = 8096
obj = [orjson.Fragment('"🐈"')] * n
ref = b"[" + b",".join((b'"\xf0\x9f\x90\x88"' for _ in range(0, n))) + b"]"
assert orjson.dumps(obj) == ref
def test_fragment_fragment_str_invalid(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(orjson.Fragment("\ud800")) # type: ignore
def test_fragment_fragment_bytes_invalid(self):
assert orjson.dumps(orjson.Fragment(b"\\ud800")) == b"\\ud800"
def test_fragment_fragment_none(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps([orjson.Fragment(None)]) # type: ignore
def test_fragment_fragment_args_zero(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment())
def test_fragment_fragment_args_two(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(b"{}", None)) # type: ignore
def test_fragment_fragment_keywords(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(contents=b"{}")) # type: ignore
def test_fragment_fragment_arg_and_keywords(self):
with pytest.raises(TypeError):
orjson.dumps(orjson.Fragment(b"{}", contents=b"{}")) # type: ignore
@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
class TestFragmentPandas:
def test_fragment_pandas(self):
"""
Fragment pandas.DataFrame.to_json()
"""
def default(value):
if isinstance(value, pandas.DataFrame):
return orjson.Fragment(value.to_json(orient="records"))
raise TypeError
val = pandas.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]})
assert (
orjson.dumps({"data": val}, default=default)
== b'{"data":[{"foo":1,"bar":4},{"foo":2,"bar":5},{"foo":3,"bar":6}]}'
)
class TestFragmentParsing:
def _run_test(self, filename: str):
data = read_fixture_bytes(filename, "parsing")
orjson.dumps(orjson.Fragment(data))
def test_fragment_y_array_arraysWithSpace(self):
self._run_test("y_array_arraysWithSpaces.json")
def test_fragment_y_array_empty_string(self):
self._run_test("y_array_empty-string.json")
def test_fragment_y_array_empty(self):
self._run_test("y_array_empty.json")
def test_fragment_y_array_ending_with_newline(self):
self._run_test("y_array_ending_with_newline.json")
def test_fragment_y_array_false(self):
self._run_test("y_array_false.json")
def test_fragment_y_array_heterogeneou(self):
self._run_test("y_array_heterogeneous.json")
def test_fragment_y_array_null(self):
self._run_test("y_array_null.json")
def test_fragment_y_array_with_1_and_newline(self):
self._run_test("y_array_with_1_and_newline.json")
def test_fragment_y_array_with_leading_space(self):
self._run_test("y_array_with_leading_space.json")
def test_fragment_y_array_with_several_null(self):
self._run_test("y_array_with_several_null.json")
def test_fragment_y_array_with_trailing_space(self):
self._run_test("y_array_with_trailing_space.json")
def test_fragment_y_number(self):
self._run_test("y_number.json")
def test_fragment_y_number_0e_1(self):
self._run_test("y_number_0e+1.json")
def test_fragment_y_number_0e1(self):
self._run_test("y_number_0e1.json")
def test_fragment_y_number_after_space(self):
self._run_test("y_number_after_space.json")
def test_fragment_y_number_double_close_to_zer(self):
self._run_test("y_number_double_close_to_zero.json")
def test_fragment_y_number_int_with_exp(self):
self._run_test("y_number_int_with_exp.json")
def test_fragment_y_number_minus_zer(self):
self._run_test("y_number_minus_zero.json")
def test_fragment_y_number_negative_int(self):
self._run_test("y_number_negative_int.json")
def test_fragment_y_number_negative_one(self):
self._run_test("y_number_negative_one.json")
def test_fragment_y_number_negative_zer(self):
self._run_test("y_number_negative_zero.json")
def test_fragment_y_number_real_capital_e(self):
self._run_test("y_number_real_capital_e.json")
def test_fragment_y_number_real_capital_e_neg_exp(self):
self._run_test("y_number_real_capital_e_neg_exp.json")
def test_fragment_y_number_real_capital_e_pos_exp(self):
self._run_test("y_number_real_capital_e_pos_exp.json")
def test_fragment_y_number_real_exponent(self):
self._run_test("y_number_real_exponent.json")
def test_fragment_y_number_real_fraction_exponent(self):
self._run_test("y_number_real_fraction_exponent.json")
def test_fragment_y_number_real_neg_exp(self):
self._run_test("y_number_real_neg_exp.json")
def test_fragment_y_number_real_pos_exponent(self):
self._run_test("y_number_real_pos_exponent.json")
def test_fragment_y_number_simple_int(self):
self._run_test("y_number_simple_int.json")
def test_fragment_y_number_simple_real(self):
self._run_test("y_number_simple_real.json")
def test_fragment_y_object(self):
self._run_test("y_object.json")
def test_fragment_y_object_basic(self):
self._run_test("y_object_basic.json")
def test_fragment_y_object_duplicated_key(self):
self._run_test("y_object_duplicated_key.json")
def test_fragment_y_object_duplicated_key_and_value(self):
self._run_test("y_object_duplicated_key_and_value.json")
def test_fragment_y_object_empty(self):
self._run_test("y_object_empty.json")
def test_fragment_y_object_empty_key(self):
self._run_test("y_object_empty_key.json")
def test_fragment_y_object_escaped_null_in_key(self):
self._run_test("y_object_escaped_null_in_key.json")
def test_fragment_y_object_extreme_number(self):
self._run_test("y_object_extreme_numbers.json")
def test_fragment_y_object_long_string(self):
self._run_test("y_object_long_strings.json")
def test_fragment_y_object_simple(self):
self._run_test("y_object_simple.json")
def test_fragment_y_object_string_unicode(self):
self._run_test("y_object_string_unicode.json")
def test_fragment_y_object_with_newline(self):
self._run_test("y_object_with_newlines.json")
def test_fragment_y_string_1_2_3_bytes_UTF_8_sequence(self):
self._run_test("y_string_1_2_3_bytes_UTF-8_sequences.json")
def test_fragment_y_string_accepted_surrogate_pair(self):
self._run_test("y_string_accepted_surrogate_pair.json")
def test_fragment_y_string_accepted_surrogate_pairs(self):
self._run_test("y_string_accepted_surrogate_pairs.json")
def test_fragment_y_string_allowed_escape(self):
self._run_test("y_string_allowed_escapes.json")
def test_fragment_y_string_backslash_and_u_escaped_zer(self):
self._run_test("y_string_backslash_and_u_escaped_zero.json")
def test_fragment_y_string_backslash_doublequote(self):
self._run_test("y_string_backslash_doublequotes.json")
def test_fragment_y_string_comment(self):
self._run_test("y_string_comments.json")
def test_fragment_y_string_double_escape_a(self):
self._run_test("y_string_double_escape_a.json")
def test_fragment_y_string_double_escape_(self):
self._run_test("y_string_double_escape_n.json")
def test_fragment_y_string_escaped_control_character(self):
self._run_test("y_string_escaped_control_character.json")
def test_fragment_y_string_escaped_noncharacter(self):
self._run_test("y_string_escaped_noncharacter.json")
def test_fragment_y_string_in_array(self):
self._run_test("y_string_in_array.json")
def test_fragment_y_string_in_array_with_leading_space(self):
self._run_test("y_string_in_array_with_leading_space.json")
def test_fragment_y_string_last_surrogates_1_and_2(self):
self._run_test("y_string_last_surrogates_1_and_2.json")
def test_fragment_y_string_nbsp_uescaped(self):
self._run_test("y_string_nbsp_uescaped.json")
def test_fragment_y_string_nonCharacterInUTF_8_U_10FFFF(self):
self._run_test("y_string_nonCharacterInUTF-8_U+10FFFF.json")
def test_fragment_y_string_nonCharacterInUTF_8_U_FFFF(self):
self._run_test("y_string_nonCharacterInUTF-8_U+FFFF.json")
def test_fragment_y_string_null_escape(self):
self._run_test("y_string_null_escape.json")
def test_fragment_y_string_one_byte_utf_8(self):
self._run_test("y_string_one-byte-utf-8.json")
def test_fragment_y_string_pi(self):
self._run_test("y_string_pi.json")
def test_fragment_y_string_reservedCharacterInUTF_8_U_1BFFF(self):
self._run_test("y_string_reservedCharacterInUTF-8_U+1BFFF.json")
def test_fragment_y_string_simple_ascii(self):
self._run_test("y_string_simple_ascii.json")
def test_fragment_y_string_space(self):
self._run_test("y_string_space.json")
def test_fragment_y_string_surrogates_U_1D11E_MUSICAL_SYMBOL_G_CLEF(self):
self._run_test("y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json")
def test_fragment_y_string_three_byte_utf_8(self):
self._run_test("y_string_three-byte-utf-8.json")
def test_fragment_y_string_two_byte_utf_8(self):
self._run_test("y_string_two-byte-utf-8.json")
def test_fragment_y_string_u_2028_line_sep(self):
self._run_test("y_string_u+2028_line_sep.json")
def test_fragment_y_string_u_2029_par_sep(self):
self._run_test("y_string_u+2029_par_sep.json")
def test_fragment_y_string_uEscape(self):
self._run_test("y_string_uEscape.json")
def test_fragment_y_string_uescaped_newline(self):
self._run_test("y_string_uescaped_newline.json")
def test_fragment_y_string_unescaped_char_delete(self):
self._run_test("y_string_unescaped_char_delete.json")
def test_fragment_y_string_unicode(self):
self._run_test("y_string_unicode.json")
def test_fragment_y_string_unicodeEscapedBackslash(self):
self._run_test("y_string_unicodeEscapedBackslash.json")
def test_fragment_y_string_unicode_2(self):
self._run_test("y_string_unicode_2.json")
def test_fragment_y_string_unicode_U_10FFFE_nonchar(self):
self._run_test("y_string_unicode_U+10FFFE_nonchar.json")
def test_fragment_y_string_unicode_U_1FFFE_nonchar(self):
self._run_test("y_string_unicode_U+1FFFE_nonchar.json")
def test_fragment_y_string_unicode_U_200B_ZERO_WIDTH_SPACE(self):
self._run_test("y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json")
def test_fragment_y_string_unicode_U_2064_invisible_plu(self):
self._run_test("y_string_unicode_U+2064_invisible_plus.json")
def test_fragment_y_string_unicode_U_FDD0_nonchar(self):
self._run_test("y_string_unicode_U+FDD0_nonchar.json")
def test_fragment_y_string_unicode_U_FFFE_nonchar(self):
self._run_test("y_string_unicode_U+FFFE_nonchar.json")
def test_fragment_y_string_unicode_escaped_double_quote(self):
self._run_test("y_string_unicode_escaped_double_quote.json")
def test_fragment_y_string_utf8(self):
self._run_test("y_string_utf8.json")
def test_fragment_y_string_with_del_character(self):
self._run_test("y_string_with_del_character.json")
def test_fragment_y_structure_lonely_false(self):
self._run_test("y_structure_lonely_false.json")
def test_fragment_y_structure_lonely_int(self):
self._run_test("y_structure_lonely_int.json")
def test_fragment_y_structure_lonely_negative_real(self):
self._run_test("y_structure_lonely_negative_real.json")
def test_fragment_y_structure_lonely_null(self):
self._run_test("y_structure_lonely_null.json")
def test_fragment_y_structure_lonely_string(self):
self._run_test("y_structure_lonely_string.json")
def test_fragment_y_structure_lonely_true(self):
self._run_test("y_structure_lonely_true.json")
def test_fragment_y_structure_string_empty(self):
self._run_test("y_structure_string_empty.json")
def test_fragment_y_structure_trailing_newline(self):
self._run_test("y_structure_trailing_newline.json")
def test_fragment_y_structure_true_in_array(self):
self._run_test("y_structure_true_in_array.json")
def test_fragment_y_structure_whitespace_array(self):
self._run_test("y_structure_whitespace_array.json")
def test_fragment_n_array_1_true_without_comma(self):
self._run_test("n_array_1_true_without_comma.json")
def test_fragment_n_array_a_invalid_utf8(self):
self._run_test("n_array_a_invalid_utf8.json")
def test_fragment_n_array_colon_instead_of_comma(self):
self._run_test("n_array_colon_instead_of_comma.json")
def test_fragment_n_array_comma_after_close(self):
self._run_test("n_array_comma_after_close.json")
def test_fragment_n_array_comma_and_number(self):
self._run_test("n_array_comma_and_number.json")
def test_fragment_n_array_double_comma(self):
self._run_test("n_array_double_comma.json")
def test_fragment_n_array_double_extra_comma(self):
self._run_test("n_array_double_extra_comma.json")
def test_fragment_n_array_extra_close(self):
self._run_test("n_array_extra_close.json")
def test_fragment_n_array_extra_comma(self):
self._run_test("n_array_extra_comma.json")
def test_fragment_n_array_incomplete(self):
self._run_test("n_array_incomplete.json")
def test_fragment_n_array_incomplete_invalid_value(self):
self._run_test("n_array_incomplete_invalid_value.json")
def test_fragment_n_array_inner_array_no_comma(self):
self._run_test("n_array_inner_array_no_comma.json")
def test_fragment_n_array_invalid_utf8(self):
self._run_test("n_array_invalid_utf8.json")
def test_fragment_n_array_items_separated_by_semicol(self):
self._run_test("n_array_items_separated_by_semicolon.json")
def test_fragment_n_array_just_comma(self):
self._run_test("n_array_just_comma.json")
def test_fragment_n_array_just_minu(self):
self._run_test("n_array_just_minus.json")
def test_fragment_n_array_missing_value(self):
self._run_test("n_array_missing_value.json")
def test_fragment_n_array_newlines_unclosed(self):
self._run_test("n_array_newlines_unclosed.json")
def test_fragment_n_array_number_and_comma(self):
self._run_test("n_array_number_and_comma.json")
def test_fragment_n_array_number_and_several_comma(self):
self._run_test("n_array_number_and_several_commas.json")
def test_fragment_n_array_spaces_vertical_tab_formfeed(self):
self._run_test("n_array_spaces_vertical_tab_formfeed.json")
def test_fragment_n_array_star_inside(self):
self._run_test("n_array_star_inside.json")
def test_fragment_n_array_unclosed(self):
self._run_test("n_array_unclosed.json")
def test_fragment_n_array_unclosed_trailing_comma(self):
self._run_test("n_array_unclosed_trailing_comma.json")
def test_fragment_n_array_unclosed_with_new_line(self):
self._run_test("n_array_unclosed_with_new_lines.json")
def test_fragment_n_array_unclosed_with_object_inside(self):
self._run_test("n_array_unclosed_with_object_inside.json")
def test_fragment_n_incomplete_false(self):
self._run_test("n_incomplete_false.json")
def test_fragment_n_incomplete_null(self):
self._run_test("n_incomplete_null.json")
def test_fragment_n_incomplete_true(self):
self._run_test("n_incomplete_true.json")
def test_fragment_n_multidigit_number_then_00(self):
self._run_test("n_multidigit_number_then_00.json")
def test_fragment_n_number__(self):
self._run_test("n_number_++.json")
def test_fragment_n_number_1(self):
self._run_test("n_number_+1.json")
def test_fragment_n_number_Inf(self):
self._run_test("n_number_+Inf.json")
def test_fragment_n_number_01(self):
self._run_test("n_number_-01.json")
def test_fragment_n_number_1_0(self):
self._run_test("n_number_-1.0..json")
def test_fragment_n_number_2(self):
self._run_test("n_number_-2..json")
def test_fragment_n_number_negative_NaN(self):
self._run_test("n_number_-NaN.json")
def test_fragment_n_number_negative_1(self):
self._run_test("n_number_.-1.json")
def test_fragment_n_number_2e_3(self):
self._run_test("n_number_.2e-3.json")
def test_fragment_n_number_0_1_2(self):
self._run_test("n_number_0.1.2.json")
def test_fragment_n_number_0_3e_(self):
self._run_test("n_number_0.3e+.json")
def test_fragment_n_number_0_3e(self):
self._run_test("n_number_0.3e.json")
def test_fragment_n_number_0_e1(self):
self._run_test("n_number_0.e1.json")
def test_fragment_n_number_0_capital_E_(self):
self._run_test("n_number_0_capital_E+.json")
def test_fragment_n_number_0_capital_E(self):
self._run_test("n_number_0_capital_E.json")
def test_fragment_n_number_0e_(self):
self._run_test("n_number_0e+.json")
def test_fragment_n_number_0e(self):
self._run_test("n_number_0e.json")
def test_fragment_n_number_1_0e_(self):
self._run_test("n_number_1.0e+.json")
def test_fragment_n_number_1_0e_2(self):
self._run_test("n_number_1.0e-.json")
def test_fragment_n_number_1_0e(self):
self._run_test("n_number_1.0e.json")
def test_fragment_n_number_1_000(self):
self._run_test("n_number_1_000.json")
def test_fragment_n_number_1eE2(self):
self._run_test("n_number_1eE2.json")
def test_fragment_n_number_2_e_3(self):
self._run_test("n_number_2.e+3.json")
def test_fragment_n_number_2_e_3_2(self):
self._run_test("n_number_2.e-3.json")
def test_fragment_n_number_2_e3_3(self):
self._run_test("n_number_2.e3.json")
def test_fragment_n_number_9_e_(self):
self._run_test("n_number_9.e+.json")
def test_fragment_n_number_negative_Inf(self):
self._run_test("n_number_Inf.json")
def test_fragment_n_number_NaN(self):
self._run_test("n_number_NaN.json")
def test_fragment_n_number_U_FF11_fullwidth_digit_one(self):
self._run_test("n_number_U+FF11_fullwidth_digit_one.json")
def test_fragment_n_number_expressi(self):
self._run_test("n_number_expression.json")
def test_fragment_n_number_hex_1_digit(self):
self._run_test("n_number_hex_1_digit.json")
def test_fragment_n_number_hex_2_digit(self):
self._run_test("n_number_hex_2_digits.json")
def test_fragment_n_number_infinity(self):
self._run_test("n_number_infinity.json")
def test_fragment_n_number_invalid_(self):
self._run_test("n_number_invalid+-.json")
def test_fragment_n_number_invalid_negative_real(self):
self._run_test("n_number_invalid-negative-real.json")
def test_fragment_n_number_invalid_utf_8_in_bigger_int(self):
self._run_test("n_number_invalid-utf-8-in-bigger-int.json")
def test_fragment_n_number_invalid_utf_8_in_exponent(self):
self._run_test("n_number_invalid-utf-8-in-exponent.json")
def test_fragment_n_number_invalid_utf_8_in_int(self):
self._run_test("n_number_invalid-utf-8-in-int.json")
def test_fragment_n_number_minus_infinity(self):
self._run_test("n_number_minus_infinity.json")
def test_fragment_n_number_minus_sign_with_trailing_garbage(self):
self._run_test("n_number_minus_sign_with_trailing_garbage.json")
def test_fragment_n_number_minus_space_1(self):
self._run_test("n_number_minus_space_1.json")
def test_fragment_n_number_neg_int_starting_with_zer(self):
self._run_test("n_number_neg_int_starting_with_zero.json")
def test_fragment_n_number_neg_real_without_int_part(self):
self._run_test("n_number_neg_real_without_int_part.json")
def test_fragment_n_number_neg_with_garbage_at_end(self):
self._run_test("n_number_neg_with_garbage_at_end.json")
def test_fragment_n_number_real_garbage_after_e(self):
self._run_test("n_number_real_garbage_after_e.json")
def test_fragment_n_number_real_with_invalid_utf8_after_e(self):
self._run_test("n_number_real_with_invalid_utf8_after_e.json")
def test_fragment_n_number_real_without_fractional_part(self):
self._run_test("n_number_real_without_fractional_part.json")
def test_fragment_n_number_starting_with_dot(self):
self._run_test("n_number_starting_with_dot.json")
def test_fragment_n_number_with_alpha(self):
self._run_test("n_number_with_alpha.json")
def test_fragment_n_number_with_alpha_char(self):
self._run_test("n_number_with_alpha_char.json")
def test_fragment_n_number_with_leading_zer(self):
self._run_test("n_number_with_leading_zero.json")
def test_fragment_n_object_bad_value(self):
self._run_test("n_object_bad_value.json")
def test_fragment_n_object_bracket_key(self):
self._run_test("n_object_bracket_key.json")
def test_fragment_n_object_comma_instead_of_col(self):
self._run_test("n_object_comma_instead_of_colon.json")
def test_fragment_n_object_double_col(self):
self._run_test("n_object_double_colon.json")
def test_fragment_n_object_emoji(self):
self._run_test("n_object_emoji.json")
def test_fragment_n_object_garbage_at_end(self):
self._run_test("n_object_garbage_at_end.json")
def test_fragment_n_object_key_with_single_quote(self):
self._run_test("n_object_key_with_single_quotes.json")
def test_fragment_n_object_lone_continuation_byte_in_key_and_trailing_comma(self):
self._run_test("n_object_lone_continuation_byte_in_key_and_trailing_comma.json")
def test_fragment_n_object_missing_col(self):
self._run_test("n_object_missing_colon.json")
def test_fragment_n_object_missing_key(self):
self._run_test("n_object_missing_key.json")
def test_fragment_n_object_missing_semicol(self):
self._run_test("n_object_missing_semicolon.json")
def test_fragment_n_object_missing_value(self):
self._run_test("n_object_missing_value.json")
def test_fragment_n_object_no_col(self):
self._run_test("n_object_no-colon.json")
def test_fragment_n_object_non_string_key(self):
self._run_test("n_object_non_string_key.json")
def test_fragment_n_object_non_string_key_but_huge_number_instead(self):
self._run_test("n_object_non_string_key_but_huge_number_instead.json")
def test_fragment_n_object_repeated_null_null(self):
self._run_test("n_object_repeated_null_null.json")
def test_fragment_n_object_several_trailing_comma(self):
self._run_test("n_object_several_trailing_commas.json")
def test_fragment_n_object_single_quote(self):
self._run_test("n_object_single_quote.json")
def test_fragment_n_object_trailing_comma(self):
self._run_test("n_object_trailing_comma.json")
def test_fragment_n_object_trailing_comment(self):
self._run_test("n_object_trailing_comment.json")
def test_fragment_n_object_trailing_comment_ope(self):
self._run_test("n_object_trailing_comment_open.json")
def test_fragment_n_object_trailing_comment_slash_ope(self):
self._run_test("n_object_trailing_comment_slash_open.json")
def test_fragment_n_object_trailing_comment_slash_open_incomplete(self):
self._run_test("n_object_trailing_comment_slash_open_incomplete.json")
def test_fragment_n_object_two_commas_in_a_row(self):
self._run_test("n_object_two_commas_in_a_row.json")
def test_fragment_n_object_unquoted_key(self):
self._run_test("n_object_unquoted_key.json")
def test_fragment_n_object_unterminated_value(self):
self._run_test("n_object_unterminated-value.json")
def test_fragment_n_object_with_single_string(self):
self._run_test("n_object_with_single_string.json")
def test_fragment_n_object_with_trailing_garbage(self):
self._run_test("n_object_with_trailing_garbage.json")
def test_fragment_n_single_space(self):
self._run_test("n_single_space.json")
def test_fragment_n_string_1_surrogate_then_escape(self):
self._run_test("n_string_1_surrogate_then_escape.json")
def test_fragment_n_string_1_surrogate_then_escape_u(self):
self._run_test("n_string_1_surrogate_then_escape_u.json")
def test_fragment_n_string_1_surrogate_then_escape_u1(self):
self._run_test("n_string_1_surrogate_then_escape_u1.json")
def test_fragment_n_string_1_surrogate_then_escape_u1x(self):
self._run_test("n_string_1_surrogate_then_escape_u1x.json")
def test_fragment_n_string_accentuated_char_no_quote(self):
self._run_test("n_string_accentuated_char_no_quotes.json")
def test_fragment_n_string_backslash_00(self):
self._run_test("n_string_backslash_00.json")
def test_fragment_n_string_escape_x(self):
self._run_test("n_string_escape_x.json")
def test_fragment_n_string_escaped_backslash_bad(self):
self._run_test("n_string_escaped_backslash_bad.json")
def test_fragment_n_string_escaped_ctrl_char_tab(self):
self._run_test("n_string_escaped_ctrl_char_tab.json")
def test_fragment_n_string_escaped_emoji(self):
self._run_test("n_string_escaped_emoji.json")
def test_fragment_n_string_incomplete_escape(self):
self._run_test("n_string_incomplete_escape.json")
def test_fragment_n_string_incomplete_escaped_character(self):
self._run_test("n_string_incomplete_escaped_character.json")
def test_fragment_n_string_incomplete_surrogate(self):
self._run_test("n_string_incomplete_surrogate.json")
def test_fragment_n_string_incomplete_surrogate_escape_invalid(self):
self._run_test("n_string_incomplete_surrogate_escape_invalid.json")
def test_fragment_n_string_invalid_utf_8_in_escape(self):
self._run_test("n_string_invalid-utf-8-in-escape.json")
def test_fragment_n_string_invalid_backslash_esc(self):
self._run_test("n_string_invalid_backslash_esc.json")
def test_fragment_n_string_invalid_unicode_escape(self):
self._run_test("n_string_invalid_unicode_escape.json")
def test_fragment_n_string_invalid_utf8_after_escape(self):
self._run_test("n_string_invalid_utf8_after_escape.json")
def test_fragment_n_string_leading_uescaped_thinspace(self):
self._run_test("n_string_leading_uescaped_thinspace.json")
def test_fragment_n_string_no_quotes_with_bad_escape(self):
self._run_test("n_string_no_quotes_with_bad_escape.json")
def test_fragment_n_string_single_doublequote(self):
self._run_test("n_string_single_doublequote.json")
def test_fragment_n_string_single_quote(self):
self._run_test("n_string_single_quote.json")
def test_fragment_n_string_single_string_no_double_quote(self):
self._run_test("n_string_single_string_no_double_quotes.json")
def test_fragment_n_string_start_escape_unclosed(self):
self._run_test("n_string_start_escape_unclosed.json")
def test_fragment_n_string_unescaped_crtl_char(self):
self._run_test("n_string_unescaped_crtl_char.json")
def test_fragment_n_string_unescaped_newline(self):
self._run_test("n_string_unescaped_newline.json")
def test_fragment_n_string_unescaped_tab(self):
self._run_test("n_string_unescaped_tab.json")
def test_fragment_n_string_unicode_CapitalU(self):
self._run_test("n_string_unicode_CapitalU.json")
def test_fragment_n_string_with_trailing_garbage(self):
self._run_test("n_string_with_trailing_garbage.json")
def test_fragment_n_structure_100000_opening_array(self):
self._run_test("n_structure_100000_opening_arrays.json.xz")
def test_fragment_n_structure_U_2060_word_joined(self):
self._run_test("n_structure_U+2060_word_joined.json")
def test_fragment_n_structure_UTF8_BOM_no_data(self):
self._run_test("n_structure_UTF8_BOM_no_data.json")
def test_fragment_n_structure_angle_bracket_(self):
self._run_test("n_structure_angle_bracket_..json")
def test_fragment_n_structure_angle_bracket_null(self):
self._run_test("n_structure_angle_bracket_null.json")
def test_fragment_n_structure_array_trailing_garbage(self):
self._run_test("n_structure_array_trailing_garbage.json")
def test_fragment_n_structure_array_with_extra_array_close(self):
self._run_test("n_structure_array_with_extra_array_close.json")
def test_fragment_n_structure_array_with_unclosed_string(self):
self._run_test("n_structure_array_with_unclosed_string.json")
def test_fragment_n_structure_ascii_unicode_identifier(self):
self._run_test("n_structure_ascii-unicode-identifier.json")
def test_fragment_n_structure_capitalized_True(self):
self._run_test("n_structure_capitalized_True.json")
def test_fragment_n_structure_close_unopened_array(self):
self._run_test("n_structure_close_unopened_array.json")
def test_fragment_n_structure_comma_instead_of_closing_brace(self):
self._run_test("n_structure_comma_instead_of_closing_brace.json")
def test_fragment_n_structure_double_array(self):
self._run_test("n_structure_double_array.json")
def test_fragment_n_structure_end_array(self):
self._run_test("n_structure_end_array.json")
def test_fragment_n_structure_incomplete_UTF8_BOM(self):
self._run_test("n_structure_incomplete_UTF8_BOM.json")
def test_fragment_n_structure_lone_invalid_utf_8(self):
self._run_test("n_structure_lone-invalid-utf-8.json")
def test_fragment_n_structure_lone_open_bracket(self):
self._run_test("n_structure_lone-open-bracket.json")
def test_fragment_n_structure_no_data(self):
self._run_test("n_structure_no_data.json")
def test_fragment_n_structure_null_byte_outside_string(self):
self._run_test("n_structure_null-byte-outside-string.json")
def test_fragment_n_structure_number_with_trailing_garbage(self):
self._run_test("n_structure_number_with_trailing_garbage.json")
def test_fragment_n_structure_object_followed_by_closing_object(self):
self._run_test("n_structure_object_followed_by_closing_object.json")
def test_fragment_n_structure_object_unclosed_no_value(self):
self._run_test("n_structure_object_unclosed_no_value.json")
def test_fragment_n_structure_object_with_comment(self):
self._run_test("n_structure_object_with_comment.json")
def test_fragment_n_structure_object_with_trailing_garbage(self):
self._run_test("n_structure_object_with_trailing_garbage.json")
def test_fragment_n_structure_open_array_apostrophe(self):
self._run_test("n_structure_open_array_apostrophe.json")
def test_fragment_n_structure_open_array_comma(self):
self._run_test("n_structure_open_array_comma.json")
def test_fragment_n_structure_open_array_object(self):
self._run_test("n_structure_open_array_object.json.xz")
def test_fragment_n_structure_open_array_open_object(self):
self._run_test("n_structure_open_array_open_object.json")
def test_fragment_n_structure_open_array_open_string(self):
self._run_test("n_structure_open_array_open_string.json")
def test_fragment_n_structure_open_array_string(self):
self._run_test("n_structure_open_array_string.json")
def test_fragment_n_structure_open_object(self):
self._run_test("n_structure_open_object.json")
def test_fragment_n_structure_open_object_close_array(self):
self._run_test("n_structure_open_object_close_array.json")
def test_fragment_n_structure_open_object_comma(self):
self._run_test("n_structure_open_object_comma.json")
def test_fragment_n_structure_open_object_open_array(self):
self._run_test("n_structure_open_object_open_array.json")
def test_fragment_n_structure_open_object_open_string(self):
self._run_test("n_structure_open_object_open_string.json")
def test_fragment_n_structure_open_object_string_with_apostrophe(self):
self._run_test("n_structure_open_object_string_with_apostrophes.json")
def test_fragment_n_structure_open_ope(self):
self._run_test("n_structure_open_open.json")
def test_fragment_n_structure_single_eacute(self):
self._run_test("n_structure_single_eacute.json")
def test_fragment_n_structure_single_star(self):
self._run_test("n_structure_single_star.json")
def test_fragment_n_structure_trailing_(self):
self._run_test("n_structure_trailing_#.json")
def test_fragment_n_structure_uescaped_LF_before_string(self):
self._run_test("n_structure_uescaped_LF_before_string.json")
def test_fragment_n_structure_unclosed_array(self):
self._run_test("n_structure_unclosed_array.json")
def test_fragment_n_structure_unclosed_array_partial_null(self):
self._run_test("n_structure_unclosed_array_partial_null.json")
def test_fragment_n_structure_unclosed_array_unfinished_false(self):
self._run_test("n_structure_unclosed_array_unfinished_false.json")
def test_fragment_n_structure_unclosed_array_unfinished_true(self):
self._run_test("n_structure_unclosed_array_unfinished_true.json")
def test_fragment_n_structure_unclosed_object(self):
self._run_test("n_structure_unclosed_object.json")
def test_fragment_n_structure_unicode_identifier(self):
self._run_test("n_structure_unicode-identifier.json")
def test_fragment_n_structure_whitespace_U_2060_word_joiner(self):
self._run_test("n_structure_whitespace_U+2060_word_joiner.json")
def test_fragment_n_structure_whitespace_formfeed(self):
self._run_test("n_structure_whitespace_formfeed.json")
def test_fragment_i_number_double_huge_neg_exp(self):
self._run_test("i_number_double_huge_neg_exp.json")
def test_fragment_i_number_huge_exp(self):
self._run_test("i_number_huge_exp.json")
def test_fragment_i_number_neg_int_huge_exp(self):
self._run_test("i_number_neg_int_huge_exp.json")
def test_fragment_i_number_pos_double_huge_exp(self):
self._run_test("i_number_pos_double_huge_exp.json")
def test_fragment_i_number_real_neg_overflow(self):
self._run_test("i_number_real_neg_overflow.json")
def test_fragment_i_number_real_pos_overflow(self):
self._run_test("i_number_real_pos_overflow.json")
def test_fragment_i_number_real_underflow(self):
self._run_test("i_number_real_underflow.json")
def test_fragment_i_number_too_big_neg_int(self):
self._run_test("i_number_too_big_neg_int.json")
def test_fragment_i_number_too_big_pos_int(self):
self._run_test("i_number_too_big_pos_int.json")
def test_fragment_i_number_very_big_negative_int(self):
self._run_test("i_number_very_big_negative_int.json")
def test_fragment_i_object_key_lone_2nd_surrogate(self):
self._run_test("i_object_key_lone_2nd_surrogate.json")
def test_fragment_i_string_1st_surrogate_but_2nd_missing(self):
self._run_test("i_string_1st_surrogate_but_2nd_missing.json")
def test_fragment_i_string_1st_valid_surrogate_2nd_invalid(self):
self._run_test("i_string_1st_valid_surrogate_2nd_invalid.json")
def test_fragment_i_string_UTF_16LE_with_BOM(self):
self._run_test("i_string_UTF-16LE_with_BOM.json")
def test_fragment_i_string_UTF_8_invalid_sequence(self):
self._run_test("i_string_UTF-8_invalid_sequence.json")
def test_fragment_i_string_UTF8_surrogate_U_D800(self):
self._run_test("i_string_UTF8_surrogate_U+D800.json")
def test_fragment_i_string_incomplete_surrogate_and_escape_valid(self):
self._run_test("i_string_incomplete_surrogate_and_escape_valid.json")
def test_fragment_i_string_incomplete_surrogate_pair(self):
self._run_test("i_string_incomplete_surrogate_pair.json")
def test_fragment_i_string_incomplete_surrogates_escape_valid(self):
self._run_test("i_string_incomplete_surrogates_escape_valid.json")
def test_fragment_i_string_invalid_lonely_surrogate(self):
self._run_test("i_string_invalid_lonely_surrogate.json")
def test_fragment_i_string_invalid_surrogate(self):
self._run_test("i_string_invalid_surrogate.json")
def test_fragment_i_string_invalid_utf_8(self):
self._run_test("i_string_invalid_utf-8.json")
def test_fragment_i_string_inverted_surrogates_U_1D11E(self):
self._run_test("i_string_inverted_surrogates_U+1D11E.json")
def test_fragment_i_string_iso_latin_1(self):
self._run_test("i_string_iso_latin_1.json")
def test_fragment_i_string_lone_second_surrogate(self):
self._run_test("i_string_lone_second_surrogate.json")
def test_fragment_i_string_lone_utf8_continuation_byte(self):
self._run_test("i_string_lone_utf8_continuation_byte.json")
def test_fragment_i_string_not_in_unicode_range(self):
self._run_test("i_string_not_in_unicode_range.json")
def test_fragment_i_string_overlong_sequence_2_byte(self):
self._run_test("i_string_overlong_sequence_2_bytes.json")
def test_fragment_i_string_overlong_sequence_6_byte(self):
self._run_test("i_string_overlong_sequence_6_bytes.json")
def test_fragment_i_string_overlong_sequence_6_bytes_null(self):
self._run_test("i_string_overlong_sequence_6_bytes_null.json")
def test_fragment_i_string_truncated_utf_8(self):
self._run_test("i_string_truncated-utf-8.json")
def test_fragment_i_string_utf16BE_no_BOM(self):
self._run_test("i_string_utf16BE_no_BOM.json")
def test_fragment_i_string_utf16LE_no_BOM(self):
self._run_test("i_string_utf16LE_no_BOM.json")
def test_fragment_i_structure_500_nested_array(self):
self._run_test("i_structure_500_nested_arrays.json.xz")
def test_fragment_i_structure_UTF_8_BOM_empty_object(self):
self._run_test("i_structure_UTF-8_BOM_empty_object.json")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_numpy.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import pytest
import orjson
try:
import numpy
except ImportError:
numpy = None # type: ignore
def numpy_default(obj):
if isinstance(obj, numpy.ndarray):
return obj.tolist()
raise TypeError
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
class TestNumpy:
def test_numpy_array_d1_uintp(self):
low = numpy.iinfo(numpy.uintp).min
high = numpy.iinfo(numpy.uintp).max
assert orjson.dumps(
numpy.array([low, high], numpy.uintp),
option=orjson.OPT_SERIALIZE_NUMPY,
) == f"[{low},{high}]".encode("ascii")
def test_numpy_array_d1_intp(self):
low = numpy.iinfo(numpy.intp).min
high = numpy.iinfo(numpy.intp).max
assert orjson.dumps(
numpy.array([low, high], numpy.intp),
option=orjson.OPT_SERIALIZE_NUMPY,
) == f"[{low},{high}]".encode("ascii")
def test_numpy_array_d1_i64(self):
assert (
orjson.dumps(
numpy.array([-9223372036854775807, 9223372036854775807], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[-9223372036854775807,9223372036854775807]"
)
def test_numpy_array_d1_u64(self):
assert (
orjson.dumps(
numpy.array([0, 18446744073709551615], numpy.uint64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[0,18446744073709551615]"
)
def test_numpy_array_d1_i8(self):
assert (
orjson.dumps(
numpy.array([-128, 127], numpy.int8),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[-128,127]"
)
def test_numpy_array_d1_u8(self):
assert (
orjson.dumps(
numpy.array([0, 255], numpy.uint8),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[0,255]"
)
def test_numpy_array_d1_i32(self):
assert (
orjson.dumps(
numpy.array([-2147483647, 2147483647], numpy.int32),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[-2147483647,2147483647]"
)
def test_numpy_array_d1_i16(self):
assert (
orjson.dumps(
numpy.array([-32768, 32767], numpy.int16),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[-32768,32767]"
)
def test_numpy_array_d1_u16(self):
assert (
orjson.dumps(
numpy.array([0, 65535], numpy.uint16),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[0,65535]"
)
def test_numpy_array_d1_u32(self):
assert (
orjson.dumps(
numpy.array([0, 4294967295], numpy.uint32),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[0,4294967295]"
)
def test_numpy_array_d1_f32(self):
assert (
orjson.dumps(
numpy.array([1.0, 3.4028235e38], numpy.float32),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[1.0,3.4028235e38]"
)
def test_numpy_array_d1_f16(self):
assert (
orjson.dumps(
numpy.array([-1.0, 0.0009765625, 1.0, 65504.0], numpy.float16),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[-1.0,0.0009765625,1.0,65504.0]"
)
def test_numpy_array_f16_roundtrip(self):
ref = [
-1.0,
-2.0,
0.000000059604645,
0.000060975552,
0.00006103515625,
0.0009765625,
0.33325195,
0.99951172,
1.0,
1.00097656,
65504.0,
]
obj = numpy.array(ref, numpy.float16) # type: ignore
serialized = orjson.dumps(
obj,
option=orjson.OPT_SERIALIZE_NUMPY,
)
deserialized = numpy.array(orjson.loads(serialized), numpy.float16) # type: ignore
assert numpy.array_equal(obj, deserialized)
def test_numpy_array_f16_edge(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.inf,
-numpy.inf,
numpy.nan,
-0.0,
0.0,
numpy.pi,
],
numpy.float16,
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[null,null,null,-0.0,0.0,3.140625]"
)
def test_numpy_array_f32_edge(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.inf,
-numpy.inf,
numpy.nan,
-0.0,
0.0,
numpy.pi,
],
numpy.float32,
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[null,null,null,-0.0,0.0,3.1415927]"
)
def test_numpy_array_f64_edge(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.inf,
-numpy.inf,
numpy.nan,
-0.0,
0.0,
numpy.pi,
],
numpy.float64,
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[null,null,null,-0.0,0.0,3.141592653589793]"
)
def test_numpy_array_d1_f64(self):
assert (
orjson.dumps(
numpy.array([1.0, 1.7976931348623157e308], numpy.float64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[1.0,1.7976931348623157e308]"
)
def test_numpy_array_d1_bool(self):
assert (
orjson.dumps(
numpy.array([True, False, False, True]),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[true,false,false,true]"
)
def test_numpy_array_d1_datetime64_years(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("1"),
numpy.datetime64("970"),
numpy.datetime64("1920"),
numpy.datetime64("1971"),
numpy.datetime64("2021"),
numpy.datetime64("2022"),
numpy.datetime64("2023"),
numpy.datetime64("9999"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["0001-01-01T00:00:00","0970-01-01T00:00:00","1920-01-01T00:00:00","1971-01-01T00:00:00","2021-01-01T00:00:00","2022-01-01T00:00:00","2023-01-01T00:00:00","9999-01-01T00:00:00"]'
)
def test_numpy_array_d1_datetime64_months(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01"),
numpy.datetime64("2022-01"),
numpy.datetime64("2023-01"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2022-01-01T00:00:00","2023-01-01T00:00:00"]'
)
def test_numpy_array_d1_datetime64_days(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01"),
numpy.datetime64("2021-01-01"),
numpy.datetime64("2021-01-01"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:00:00","2021-01-01T00:00:00"]'
)
def test_numpy_array_d1_datetime64_hours(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00"),
numpy.datetime64("2021-01-01T01"),
numpy.datetime64("2021-01-01T02"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T01:00:00","2021-01-01T02:00:00"]'
)
def test_numpy_array_d1_datetime64_minutes(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00"),
numpy.datetime64("2021-01-01T00:01"),
numpy.datetime64("2021-01-01T00:02"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:01:00","2021-01-01T00:02:00"]'
)
def test_numpy_array_d1_datetime64_seconds(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00:00"),
numpy.datetime64("2021-01-01T00:00:01"),
numpy.datetime64("2021-01-01T00:00:02"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:00:01","2021-01-01T00:00:02"]'
)
def test_numpy_array_d1_datetime64_milliseconds(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00:00"),
numpy.datetime64("2021-01-01T00:00:00.172"),
numpy.datetime64("2021-01-01T00:00:00.567"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567000"]'
)
def test_numpy_array_d1_datetime64_microseconds(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00:00"),
numpy.datetime64("2021-01-01T00:00:00.172"),
numpy.datetime64("2021-01-01T00:00:00.567891"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567891"]'
)
def test_numpy_array_d1_datetime64_nanoseconds(self):
assert (
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00:00"),
numpy.datetime64("2021-01-01T00:00:00.172"),
numpy.datetime64("2021-01-01T00:00:00.567891234"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567891"]'
)
def test_numpy_array_d1_datetime64_picoseconds(self):
try:
orjson.dumps(
numpy.array(
[
numpy.datetime64("2021-01-01T00:00:00"),
numpy.datetime64("2021-01-01T00:00:00.172"),
numpy.datetime64("2021-01-01T00:00:00.567891234567"),
]
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
assert False
except TypeError as exc:
assert str(exc) == "unsupported numpy.datetime64 unit: picoseconds"
def test_numpy_array_d2_i64(self):
assert (
orjson.dumps(
numpy.array([[1, 2, 3], [4, 5, 6]], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[1,2,3],[4,5,6]]"
)
def test_numpy_array_d2_f64(self):
assert (
orjson.dumps(
numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], numpy.float64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[1.0,2.0,3.0],[4.0,5.0,6.0]]"
)
def test_numpy_array_d3_i8(self):
assert (
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int8),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1,2],[3,4]],[[5,6],[7,8]]]"
)
def test_numpy_array_d3_u8(self):
assert (
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.uint8),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1,2],[3,4]],[[5,6],[7,8]]]"
)
def test_numpy_array_d3_i32(self):
assert (
orjson.dumps(
numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int32),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1,2],[3,4]],[[5,6],[7,8]]]"
)
def test_numpy_array_d3_i64(self):
assert (
orjson.dumps(
numpy.array([[[1, 2], [3, 4], [5, 6], [7, 8]]], numpy.int64),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1,2],[3,4],[5,6],[7,8]]]"
)
def test_numpy_array_d3_f64(self):
assert (
orjson.dumps(
numpy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], numpy.float64
),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1.0,2.0],[3.0,4.0]],[[5.0,6.0],[7.0,8.0]]]"
)
def test_numpy_array_fortran(self):
array = numpy.array([[1, 2], [3, 4]], order="F")
assert array.flags["F_CONTIGUOUS"] is True
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
assert orjson.dumps(
array, default=numpy_default, option=orjson.OPT_SERIALIZE_NUMPY
) == orjson.dumps(array.tolist())
def test_numpy_array_non_contiguous_message(self):
array = numpy.array([[1, 2], [3, 4]], order="F")
assert array.flags["F_CONTIGUOUS"] is True
try:
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
assert False
except TypeError as exc:
assert (
str(exc)
== "numpy array is not C contiguous; use ndarray.tolist() in default"
)
def test_numpy_array_unsupported_dtype(self):
array = numpy.array([[1, 2], [3, 4]], numpy.csingle) # type: ignore
with pytest.raises(orjson.JSONEncodeError) as cm:
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
assert "unsupported datatype in numpy array" in str(cm)
def test_numpy_array_d1(self):
array = numpy.array([1])
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_d2(self):
array = numpy.array([[1]])
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_d3(self):
array = numpy.array([[[1]]])
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_d4(self):
array = numpy.array([[[[1]]]])
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_4_stride(self):
array = numpy.random.rand(4, 4, 4, 4)
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_dimension_zero(self):
array = numpy.array(0)
assert array.ndim == 0
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
array = numpy.empty((0, 4, 2))
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
array = numpy.empty((4, 0, 2))
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
array = numpy.empty((2, 4, 0))
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_array_dimension_max(self):
array = numpy.random.rand(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
)
assert array.ndim == 32
assert (
orjson.loads(
orjson.dumps(
array,
option=orjson.OPT_SERIALIZE_NUMPY,
)
)
== array.tolist()
)
def test_numpy_scalar_int8(self):
assert orjson.dumps(numpy.int8(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(numpy.int8(127), option=orjson.OPT_SERIALIZE_NUMPY) == b"127"
)
assert (
orjson.dumps(numpy.int8(-128), option=orjson.OPT_SERIALIZE_NUMPY) == b"-128"
)
def test_numpy_scalar_int16(self):
assert orjson.dumps(numpy.int16(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(numpy.int16(32767), option=orjson.OPT_SERIALIZE_NUMPY)
== b"32767"
)
assert (
orjson.dumps(numpy.int16(-32768), option=orjson.OPT_SERIALIZE_NUMPY)
== b"-32768"
)
def test_numpy_scalar_int32(self):
assert orjson.dumps(numpy.int32(1), option=orjson.OPT_SERIALIZE_NUMPY) == b"1"
assert (
orjson.dumps(numpy.int32(2147483647), option=orjson.OPT_SERIALIZE_NUMPY)
== b"2147483647"
)
assert (
orjson.dumps(numpy.int32(-2147483648), option=orjson.OPT_SERIALIZE_NUMPY)
== b"-2147483648"
)
def test_numpy_scalar_int64(self):
assert (
orjson.dumps(
numpy.int64(-9223372036854775808), option=orjson.OPT_SERIALIZE_NUMPY
)
== b"-9223372036854775808"
)
assert (
orjson.dumps(
numpy.int64(9223372036854775807), option=orjson.OPT_SERIALIZE_NUMPY
)
== b"9223372036854775807"
)
def test_numpy_scalar_uint8(self):
assert orjson.dumps(numpy.uint8(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(numpy.uint8(255), option=orjson.OPT_SERIALIZE_NUMPY) == b"255"
)
def test_numpy_scalar_uint16(self):
assert orjson.dumps(numpy.uint16(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(numpy.uint16(65535), option=orjson.OPT_SERIALIZE_NUMPY)
== b"65535"
)
def test_numpy_scalar_uint32(self):
assert orjson.dumps(numpy.uint32(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(numpy.uint32(4294967295), option=orjson.OPT_SERIALIZE_NUMPY)
== b"4294967295"
)
def test_numpy_scalar_uint64(self):
assert orjson.dumps(numpy.uint64(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0"
assert (
orjson.dumps(
numpy.uint64(18446744073709551615), option=orjson.OPT_SERIALIZE_NUMPY
)
== b"18446744073709551615"
)
def test_numpy_scalar_float16(self):
assert (
orjson.dumps(numpy.float16(1.0), option=orjson.OPT_SERIALIZE_NUMPY)
== b"1.0"
)
def test_numpy_scalar_float32(self):
assert (
orjson.dumps(numpy.float32(1.0), option=orjson.OPT_SERIALIZE_NUMPY)
== b"1.0"
)
def test_numpy_scalar_float64(self):
assert (
orjson.dumps(numpy.float64(123.123), option=orjson.OPT_SERIALIZE_NUMPY)
== b"123.123"
)
def test_numpy_bool(self):
assert (
orjson.dumps(
{"a": numpy.bool_(True), "b": numpy.bool_(False)},
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'{"a":true,"b":false}'
)
def test_numpy_datetime_year(self):
assert (
orjson.dumps(numpy.datetime64("2021"), option=orjson.OPT_SERIALIZE_NUMPY)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_month(self):
assert (
orjson.dumps(numpy.datetime64("2021-01"), option=orjson.OPT_SERIALIZE_NUMPY)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_day(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01"), option=orjson.OPT_SERIALIZE_NUMPY
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_hour(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00"), option=orjson.OPT_SERIALIZE_NUMPY
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_minute(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00"), option=orjson.OPT_SERIALIZE_NUMPY
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_second(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00"),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_milli(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'"2021-01-01T00:00:00.172000"'
)
def test_numpy_datetime_micro(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576"),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'"2021-01-01T00:00:00.172576"'
)
def test_numpy_datetime_nano(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576789"),
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b'"2021-01-01T00:00:00.172576"'
)
def test_numpy_datetime_naive_utc_year(self):
assert (
orjson.dumps(
numpy.datetime64("2021"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_month(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_day(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_hour(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_minute(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_second(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00+00:00"'
)
def test_numpy_datetime_naive_utc_milli(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00.172000+00:00"'
)
def test_numpy_datetime_naive_utc_micro(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00.172576+00:00"'
)
def test_numpy_datetime_naive_utc_nano(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576789"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC,
)
== b'"2021-01-01T00:00:00.172576+00:00"'
)
def test_numpy_datetime_naive_utc_utc_z_year(self):
assert (
orjson.dumps(
numpy.datetime64("2021"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_month(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_day(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_hour(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_minute(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_second(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00Z"'
)
def test_numpy_datetime_naive_utc_utc_z_milli(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00.172000Z"'
)
def test_numpy_datetime_naive_utc_utc_z_micro(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00.172576Z"'
)
def test_numpy_datetime_naive_utc_utc_z_nano(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576789"),
option=orjson.OPT_SERIALIZE_NUMPY
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'"2021-01-01T00:00:00.172576Z"'
)
def test_numpy_datetime_omit_microseconds_year(self):
assert (
orjson.dumps(
numpy.datetime64("2021"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_month(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_day(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_hour(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_minute(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_second(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_milli(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_micro(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_omit_microseconds_nano(self):
assert (
orjson.dumps(
numpy.datetime64("2021-01-01T00:00:00.172576789"),
option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS,
)
== b'"2021-01-01T00:00:00"'
)
def test_numpy_datetime_nat(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(numpy.datetime64("NaT"), option=orjson.OPT_SERIALIZE_NUMPY)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps([numpy.datetime64("NaT")], option=orjson.OPT_SERIALIZE_NUMPY)
def test_numpy_repeated(self):
data = numpy.array([[[1, 2], [3, 4], [5, 6], [7, 8]]], numpy.int64) # type: ignore
for _ in range(0, 3):
assert (
orjson.dumps(
data,
option=orjson.OPT_SERIALIZE_NUMPY,
)
== b"[[[1,2],[3,4],[5,6],[7,8]]]"
)
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
class TestNumpyEquivalence:
def _test(self, obj):
assert orjson.dumps(obj, option=orjson.OPT_SERIALIZE_NUMPY) == orjson.dumps(
obj.tolist()
)
def test_numpy_uint8(self):
self._test(numpy.array([0, 255], numpy.uint8))
def test_numpy_uint16(self):
self._test(numpy.array([0, 65535], numpy.uint16))
def test_numpy_uint32(self):
self._test(numpy.array([0, 4294967295], numpy.uint32))
def test_numpy_uint64(self):
self._test(numpy.array([0, 18446744073709551615], numpy.uint64))
def test_numpy_int8(self):
self._test(numpy.array([-128, 127], numpy.int8))
def test_numpy_int16(self):
self._test(numpy.array([-32768, 32767], numpy.int16))
def test_numpy_int32(self):
self._test(numpy.array([-2147483647, 2147483647], numpy.int32))
def test_numpy_int64(self):
self._test(
numpy.array([-9223372036854775807, 9223372036854775807], numpy.int64)
)
@pytest.mark.skip(reason="tolist() conversion results in 3.4028234663852886e38")
def test_numpy_float32(self):
self._test(
numpy.array(
[
-340282346638528859811704183484516925440.0000000000000000,
340282346638528859811704183484516925440.0000000000000000,
],
numpy.float32,
)
)
self._test(numpy.array([-3.4028235e38, 3.4028235e38], numpy.float32))
def test_numpy_float64(self):
self._test(
numpy.array(
[-1.7976931348623157e308, 1.7976931348623157e308], numpy.float64
)
)
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
class NumpyEndianness:
def test_numpy_array_dimension_zero(self):
wrong_endianness = ">" if sys.byteorder == "little" else "<"
array = numpy.array([0, 1, 0.4, 5.7], dtype=f"{wrong_endianness}f8")
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_canonical.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
class TestCanonicalTests:
def test_dumps_ctrl_escape(self):
"""
dumps() ctrl characters
"""
assert orjson.dumps("text\u0003\r\n") == b'"text\\u0003\\r\\n"'
def test_dumps_escape_quote_backslash(self):
"""
dumps() quote, backslash escape
"""
assert orjson.dumps(r'"\ test') == b'"\\"\\\\ test"'
def test_dumps_escape_line_separator(self):
"""
dumps() U+2028, U+2029 escape
"""
assert (
orjson.dumps({"spaces": "\u2028 \u2029"})
== b'{"spaces":"\xe2\x80\xa8 \xe2\x80\xa9"}'
)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/requirements.txt | arrow
faker
numpy;(platform_machine=="x86_64" or (platform_machine=="aarch64" and sys_platform == "linux")) and python_version<"3.13"
pendulum;sys_platform=="linux" and platform_machine=="x86_64" and python_version<"3.12"
time-machine < 2.15;sys_platform=="linux" and platform_machine=="x86_64" and python_version<"3.12"
psutil;(sys_platform=="linux" or sys_platform == "macos") and platform_machine=="x86_64" and python_version<"3.13"
pytest
pytz
typing_extensions;python_version<"3.8"
xxhash==1.4.3;sys_platform=="linux" and platform_machine=="x86_64" and python_version<"3.9" # creates non-compact ASCII for test_str_ascii
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_indent.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import json
import orjson
from .util import read_fixture_obj
class TestIndentedOutput:
def test_equivalent(self):
"""
OPT_INDENT_2 is equivalent to indent=2
"""
obj = {"a": "b", "c": {"d": True}, "e": [1, 2]}
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps(
obj, indent=2
).encode("utf-8")
def test_sort(self):
obj = {"b": 1, "a": 2}
assert (
orjson.dumps(obj, option=orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS)
== b'{\n "a": 2,\n "b": 1\n}'
)
def test_non_str(self):
obj = {1: 1, "a": 2}
assert (
orjson.dumps(obj, option=orjson.OPT_INDENT_2 | orjson.OPT_NON_STR_KEYS)
== b'{\n "1": 1,\n "a": 2\n}'
)
def test_options(self):
obj = {
1: 1,
"b": True,
"a": datetime.datetime(1970, 1, 1),
}
assert (
orjson.dumps(
obj,
option=orjson.OPT_INDENT_2
| orjson.OPT_SORT_KEYS
| orjson.OPT_NON_STR_KEYS
| orjson.OPT_NAIVE_UTC,
)
== b'{\n "1": 1,\n "a": "1970-01-01T00:00:00+00:00",\n "b": true\n}'
)
def test_empty(self):
obj = [{}, [[[]]], {"key": []}]
ref = b'[\n {},\n [\n [\n []\n ]\n ],\n {\n "key": []\n }\n]'
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == ref
def test_twitter_pretty(self):
"""
twitter.json pretty
"""
obj = read_fixture_obj("twitter.json.xz")
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps(
obj, indent=2, ensure_ascii=False
).encode("utf-8")
def test_github_pretty(self):
"""
github.json pretty
"""
obj = read_fixture_obj("github.json.xz")
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps(
obj, indent=2, ensure_ascii=False
).encode("utf-8")
def test_canada_pretty(self):
"""
canada.json pretty
"""
obj = read_fixture_obj("canada.json.xz")
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps(
obj, indent=2, ensure_ascii=False
).encode("utf-8")
def test_citm_catalog_pretty(self):
"""
citm_catalog.json pretty
"""
obj = read_fixture_obj("citm_catalog.json.xz")
assert orjson.dumps(obj, option=orjson.OPT_INDENT_2) == json.dumps(
obj, indent=2, ensure_ascii=False
).encode("utf-8")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_error.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import pytest
import orjson
from .util import read_fixture_str
ASCII_TEST = b"""\
{
"a": "qwe",
"b": "qweqwe",
"c": "qweq",
"d: "qwe"
}
"""
MULTILINE_EMOJI = """[
"😊",
"a"
"""
class TestJsonDecodeError:
def _get_error_infos(self, json_decode_error_exc_info):
return {
k: v
for k, v in json_decode_error_exc_info.value.__dict__.items()
if k in ("pos", "lineno", "colno")
}
def _test(self, data, expected_err_infos):
with pytest.raises(json.decoder.JSONDecodeError) as json_exc_info:
json.loads(data)
with pytest.raises(json.decoder.JSONDecodeError) as orjson_exc_info:
orjson.loads(data)
assert (
self._get_error_infos(json_exc_info)
== self._get_error_infos(orjson_exc_info)
== expected_err_infos
)
def test_empty(self):
with pytest.raises(orjson.JSONDecodeError) as json_exc_info:
orjson.loads("")
assert str(json_exc_info.value).startswith(
"Input is a zero-length, empty document:"
)
def test_ascii(self):
self._test(
ASCII_TEST,
{"pos": 55, "lineno": 5, "colno": 8},
)
def test_latin1(self):
self._test(
"""["üýþÿ", "a" """,
{"pos": 13, "lineno": 1, "colno": 14},
)
def test_two_byte_str(self):
self._test(
"""["東京", "a" """,
{"pos": 11, "lineno": 1, "colno": 12},
)
def test_two_byte_bytes(self):
self._test(
b'["\xe6\x9d\xb1\xe4\xba\xac", "a" ',
{"pos": 11, "lineno": 1, "colno": 12},
)
def test_four_byte(self):
self._test(
MULTILINE_EMOJI,
{"pos": 19, "lineno": 4, "colno": 1},
)
def test_tab(self):
data = read_fixture_str("fail26.json", "jsonchecker")
with pytest.raises(json.decoder.JSONDecodeError) as json_exc_info:
json.loads(data)
assert self._get_error_infos(json_exc_info) == {
"pos": 5,
"lineno": 1,
"colno": 6,
}
with pytest.raises(json.decoder.JSONDecodeError) as json_exc_info:
orjson.loads(data)
assert self._get_error_infos(json_exc_info) == {
"pos": 6,
"lineno": 1,
"colno": 7,
}
class Custom:
pass
class CustomException(Exception):
pass
def default_typeerror(obj):
raise TypeError
def default_notimplementederror(obj):
raise NotImplementedError
def default_systemerror(obj):
raise SystemError
def default_importerror(obj):
import doesnotexist
assert doesnotexist
CUSTOM_ERROR_MESSAGE = "zxc"
def default_customerror(obj):
raise CustomException(CUSTOM_ERROR_MESSAGE)
class TestJsonEncodeError:
def test_dumps_arg(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps() # type: ignore
assert exc_info.type == orjson.JSONEncodeError
assert (
str(exc_info.value)
== "dumps() missing 1 required positional argument: 'obj'"
)
assert exc_info.value.__cause__ is None
def test_dumps_chain_none(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(Custom())
assert exc_info.type == orjson.JSONEncodeError
assert str(exc_info.value) == "Type is not JSON serializable: Custom"
assert exc_info.value.__cause__ is None
def test_dumps_chain_u64(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps([18446744073709551615, Custom()])
assert exc_info.type == orjson.JSONEncodeError
assert exc_info.value.__cause__ is None
def test_dumps_chain_default_typeerror(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(Custom(), default=default_typeerror)
assert exc_info.type == orjson.JSONEncodeError
assert isinstance(exc_info.value.__cause__, TypeError)
def test_dumps_chain_default_systemerror(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(Custom(), default=default_systemerror)
assert exc_info.type == orjson.JSONEncodeError
assert isinstance(exc_info.value.__cause__, SystemError)
def test_dumps_chain_default_importerror(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(Custom(), default=default_importerror)
assert exc_info.type == orjson.JSONEncodeError
assert isinstance(exc_info.value.__cause__, ImportError)
def test_dumps_chain_default_customerror(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(Custom(), default=default_customerror)
assert exc_info.type == orjson.JSONEncodeError
assert isinstance(exc_info.value.__cause__, CustomException)
assert str(exc_info.value.__cause__) == CUSTOM_ERROR_MESSAGE
def test_dumps_normalize_exception(self):
with pytest.raises(orjson.JSONEncodeError) as exc_info:
orjson.dumps(10**60)
assert exc_info.type == orjson.JSONEncodeError
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_api.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import inspect
import json
import re
import pytest
import orjson
SIMPLE_TYPES = (1, 1.0, -1, None, "str", True, False)
LOADS_RECURSION_LIMIT = 1024
def default(obj):
return str(obj)
class TestApi:
def test_loads_trailing(self):
"""
loads() handles trailing whitespace
"""
assert orjson.loads("{}\n\t ") == {}
def test_loads_trailing_invalid(self):
"""
loads() handles trailing invalid
"""
pytest.raises(orjson.JSONDecodeError, orjson.loads, "{}\n\t a")
def test_simple_json(self):
"""
dumps() equivalent to json on simple types
"""
for obj in SIMPLE_TYPES:
assert orjson.dumps(obj) == json.dumps(obj).encode("utf-8")
def test_simple_round_trip(self):
"""
dumps(), loads() round trip on simple types
"""
for obj in SIMPLE_TYPES:
assert orjson.loads(orjson.dumps(obj)) == obj
def test_loads_type(self):
"""
loads() invalid type
"""
for val in (1, 3.14, [], {}, None): # type: ignore
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_loads_recursion_partial(self):
"""
loads() recursion limit partial
"""
pytest.raises(orjson.JSONDecodeError, orjson.loads, "[" * (1024 * 1024))
def test_loads_recursion_valid_limit_array(self):
"""
loads() recursion limit at limit array
"""
n = LOADS_RECURSION_LIMIT + 1
value = b"[" * n + b"]" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_limit_object(self):
"""
loads() recursion limit at limit object
"""
n = LOADS_RECURSION_LIMIT
value = b'{"key":' * n + b'{"key":true}' + b"}" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_limit_mixed(self):
"""
loads() recursion limit at limit mixed
"""
n = LOADS_RECURSION_LIMIT
value = b"[" b'{"key":' * n + b'{"key":true}' + b"}" * n + b"]"
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_excessive_array(self):
"""
loads() recursion limit excessively high value
"""
n = 10000000
value = b"[" * n + b"]" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_limit_array_pretty(self):
"""
loads() recursion limit at limit array pretty
"""
n = LOADS_RECURSION_LIMIT + 1
value = b"[\n " * n + b"]" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_limit_object_pretty(self):
"""
loads() recursion limit at limit object pretty
"""
n = LOADS_RECURSION_LIMIT
value = b'{\n "key":' * n + b'{"key":true}' + b"}" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_limit_mixed_pretty(self):
"""
loads() recursion limit at limit mixed pretty
"""
n = LOADS_RECURSION_LIMIT
value = b"[\n " b'{"key":' * n + b'{"key":true}' + b"}" * n + b"]"
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_loads_recursion_valid_excessive_array_pretty(self):
"""
loads() recursion limit excessively high value pretty
"""
n = 10000000
value = b"[\n " * n + b"]" * n
pytest.raises(orjson.JSONDecodeError, orjson.loads, value)
def test_version(self):
"""
__version__
"""
assert re.match(r"^\d+\.\d+(\.\d+)?$", orjson.__version__)
def test_valueerror(self):
"""
orjson.JSONDecodeError is a subclass of ValueError
"""
pytest.raises(orjson.JSONDecodeError, orjson.loads, "{")
pytest.raises(ValueError, orjson.loads, "{")
def test_optional_none(self):
"""
dumps() option, default None
"""
assert orjson.dumps([], option=None) == b"[]"
assert orjson.dumps([], default=None) == b"[]"
assert orjson.dumps([], option=None, default=None) == b"[]"
assert orjson.dumps([], None, None) == b"[]"
def test_option_not_int(self):
"""
dumps() option not int or None
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(True, option=True)
def test_option_invalid_int(self):
"""
dumps() option invalid 64-bit number
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(True, option=9223372036854775809)
def test_option_range_low(self):
"""
dumps() option out of range low
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(True, option=-1)
def test_option_range_high(self):
"""
dumps() option out of range high
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(True, option=1 << 12)
def test_opts_multiple(self):
"""
dumps() multiple option
"""
assert (
orjson.dumps(
[1, datetime.datetime(2000, 1, 1, 2, 3, 4)],
option=orjson.OPT_STRICT_INTEGER | orjson.OPT_NAIVE_UTC,
)
== b'[1,"2000-01-01T02:03:04+00:00"]'
)
def test_default_positional(self):
"""
dumps() positional arg
"""
with pytest.raises(TypeError):
orjson.dumps(__obj={}) # type: ignore
with pytest.raises(TypeError):
orjson.dumps(zxc={}) # type: ignore
def test_default_unknown_kwarg(self):
"""
dumps() unknown kwarg
"""
with pytest.raises(TypeError):
orjson.dumps({}, zxc=default) # type: ignore
def test_default_empty_kwarg(self):
"""
dumps() empty kwarg
"""
assert orjson.dumps(None, **{}) == b"null"
def test_default_twice(self):
"""
dumps() default twice
"""
with pytest.raises(TypeError):
orjson.dumps({}, default, default=default) # type: ignore
def test_option_twice(self):
"""
dumps() option twice
"""
with pytest.raises(TypeError):
orjson.dumps({}, None, orjson.OPT_NAIVE_UTC, option=orjson.OPT_NAIVE_UTC) # type: ignore
def test_option_mixed(self):
"""
dumps() option one arg, one kwarg
"""
class Custom:
def __str__(self):
return "zxc"
assert (
orjson.dumps(
[Custom(), datetime.datetime(2000, 1, 1, 2, 3, 4)],
default,
option=orjson.OPT_NAIVE_UTC,
)
== b'["zxc","2000-01-01T02:03:04+00:00"]'
)
def test_dumps_signature(self):
"""
dumps() valid __text_signature__
"""
assert (
str(inspect.signature(orjson.dumps))
== "(obj, /, default=None, option=None)"
)
inspect.signature(orjson.dumps).bind("str")
inspect.signature(orjson.dumps).bind("str", default=default, option=1)
inspect.signature(orjson.dumps).bind("str", default=None, option=None)
def test_loads_signature(self):
"""
loads() valid __text_signature__
"""
assert str(inspect.signature(orjson.loads)), "(obj == /)"
inspect.signature(orjson.loads).bind("[]")
def test_dumps_module_str(self):
"""
orjson.dumps.__module__ is a str
"""
assert orjson.dumps.__module__ == "orjson"
def test_loads_module_str(self):
"""
orjson.loads.__module__ is a str
"""
assert orjson.loads.__module__ == "orjson"
def test_bytes_buffer(self):
"""
dumps() trigger buffer growing where length is greater than growth
"""
a = "a" * 900
b = "b" * 4096
c = "c" * 4096 * 4096
assert orjson.dumps([a, b, c]) == f'["{a}","{b}","{c}"]'.encode("utf-8")
def test_bytes_null_terminated(self):
"""
dumps() PyBytesObject buffer is null-terminated
"""
# would raise ValueError: invalid literal for int() with base 10: b'1596728892'
int(orjson.dumps(1596728892))
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_parsing.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
from .util import read_fixture_bytes
class TestJSONTestSuiteParsing:
def _run_fail_json(self, filename, exc=orjson.JSONDecodeError):
data = read_fixture_bytes(filename, "parsing")
with pytest.raises(exc):
orjson.loads(data)
with pytest.raises(exc):
orjson.loads(bytearray(data))
with pytest.raises(exc):
orjson.loads(memoryview(data))
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
pass
else:
with pytest.raises(exc):
orjson.loads(decoded)
def _run_pass_json(self, filename, match=""):
data = read_fixture_bytes(filename, "parsing")
orjson.loads(data)
orjson.loads(bytearray(data))
orjson.loads(memoryview(data))
orjson.loads(data.decode("utf-8"))
def test_y_array_arraysWithSpace(self):
"""
y_array_arraysWithSpaces.json
"""
self._run_pass_json("y_array_arraysWithSpaces.json")
def test_y_array_empty_string(self):
"""
y_array_empty-string.json
"""
self._run_pass_json("y_array_empty-string.json")
def test_y_array_empty(self):
"""
y_array_empty.json
"""
self._run_pass_json("y_array_empty.json")
def test_y_array_ending_with_newline(self):
"""
y_array_ending_with_newline.json
"""
self._run_pass_json("y_array_ending_with_newline.json")
def test_y_array_false(self):
"""
y_array_false.json
"""
self._run_pass_json("y_array_false.json")
def test_y_array_heterogeneou(self):
"""
y_array_heterogeneous.json
"""
self._run_pass_json("y_array_heterogeneous.json")
def test_y_array_null(self):
"""
y_array_null.json
"""
self._run_pass_json("y_array_null.json")
def test_y_array_with_1_and_newline(self):
"""
y_array_with_1_and_newline.json
"""
self._run_pass_json("y_array_with_1_and_newline.json")
def test_y_array_with_leading_space(self):
"""
y_array_with_leading_space.json
"""
self._run_pass_json("y_array_with_leading_space.json")
def test_y_array_with_several_null(self):
"""
y_array_with_several_null.json
"""
self._run_pass_json("y_array_with_several_null.json")
def test_y_array_with_trailing_space(self):
"""
y_array_with_trailing_space.json
"""
self._run_pass_json("y_array_with_trailing_space.json")
def test_y_number(self):
"""
y_number.json
"""
self._run_pass_json("y_number.json")
def test_y_number_0e_1(self):
"""
y_number_0e+1.json
"""
self._run_pass_json("y_number_0e+1.json")
def test_y_number_0e1(self):
"""
y_number_0e1.json
"""
self._run_pass_json("y_number_0e1.json")
def test_y_number_after_space(self):
"""
y_number_after_space.json
"""
self._run_pass_json("y_number_after_space.json")
def test_y_number_double_close_to_zer(self):
"""
y_number_double_close_to_zero.json
"""
self._run_pass_json("y_number_double_close_to_zero.json")
def test_y_number_int_with_exp(self):
"""
y_number_int_with_exp.json
"""
self._run_pass_json("y_number_int_with_exp.json")
def test_y_number_minus_zer(self):
"""
y_number_minus_zero.json
"""
self._run_pass_json("y_number_minus_zero.json")
def test_y_number_negative_int(self):
"""
y_number_negative_int.json
"""
self._run_pass_json("y_number_negative_int.json")
def test_y_number_negative_one(self):
"""
y_number_negative_one.json
"""
self._run_pass_json("y_number_negative_one.json")
def test_y_number_negative_zer(self):
"""
y_number_negative_zero.json
"""
self._run_pass_json("y_number_negative_zero.json")
def test_y_number_real_capital_e(self):
"""
y_number_real_capital_e.json
"""
self._run_pass_json("y_number_real_capital_e.json")
def test_y_number_real_capital_e_neg_exp(self):
"""
y_number_real_capital_e_neg_exp.json
"""
self._run_pass_json("y_number_real_capital_e_neg_exp.json")
def test_y_number_real_capital_e_pos_exp(self):
"""
y_number_real_capital_e_pos_exp.json
"""
self._run_pass_json("y_number_real_capital_e_pos_exp.json")
def test_y_number_real_exponent(self):
"""
y_number_real_exponent.json
"""
self._run_pass_json("y_number_real_exponent.json")
def test_y_number_real_fraction_exponent(self):
"""
y_number_real_fraction_exponent.json
"""
self._run_pass_json("y_number_real_fraction_exponent.json")
def test_y_number_real_neg_exp(self):
"""
y_number_real_neg_exp.json
"""
self._run_pass_json("y_number_real_neg_exp.json")
def test_y_number_real_pos_exponent(self):
"""
y_number_real_pos_exponent.json
"""
self._run_pass_json("y_number_real_pos_exponent.json")
def test_y_number_simple_int(self):
"""
y_number_simple_int.json
"""
self._run_pass_json("y_number_simple_int.json")
def test_y_number_simple_real(self):
"""
y_number_simple_real.json
"""
self._run_pass_json("y_number_simple_real.json")
def test_y_object(self):
"""
y_object.json
"""
self._run_pass_json("y_object.json")
def test_y_object_basic(self):
"""
y_object_basic.json
"""
self._run_pass_json("y_object_basic.json")
def test_y_object_duplicated_key(self):
"""
y_object_duplicated_key.json
"""
self._run_pass_json("y_object_duplicated_key.json")
def test_y_object_duplicated_key_and_value(self):
"""
y_object_duplicated_key_and_value.json
"""
self._run_pass_json("y_object_duplicated_key_and_value.json")
def test_y_object_empty(self):
"""
y_object_empty.json
"""
self._run_pass_json("y_object_empty.json")
def test_y_object_empty_key(self):
"""
y_object_empty_key.json
"""
self._run_pass_json("y_object_empty_key.json")
def test_y_object_escaped_null_in_key(self):
"""
y_object_escaped_null_in_key.json
"""
self._run_pass_json("y_object_escaped_null_in_key.json")
def test_y_object_extreme_number(self):
"""
y_object_extreme_numbers.json
"""
self._run_pass_json("y_object_extreme_numbers.json")
def test_y_object_long_string(self):
"""
y_object_long_strings.json
"""
self._run_pass_json("y_object_long_strings.json")
def test_y_object_simple(self):
"""
y_object_simple.json
"""
self._run_pass_json("y_object_simple.json")
def test_y_object_string_unicode(self):
"""
y_object_string_unicode.json
"""
self._run_pass_json("y_object_string_unicode.json")
def test_y_object_with_newline(self):
"""
y_object_with_newlines.json
"""
self._run_pass_json("y_object_with_newlines.json")
def test_y_string_1_2_3_bytes_UTF_8_sequence(self):
"""
y_string_1_2_3_bytes_UTF-8_sequences.json
"""
self._run_pass_json("y_string_1_2_3_bytes_UTF-8_sequences.json")
def test_y_string_accepted_surrogate_pair(self):
"""
y_string_accepted_surrogate_pair.json
"""
self._run_pass_json("y_string_accepted_surrogate_pair.json")
def test_y_string_accepted_surrogate_pairs(self):
"""
y_string_accepted_surrogate_pairs.json
"""
self._run_pass_json("y_string_accepted_surrogate_pairs.json")
def test_y_string_allowed_escape(self):
"""
y_string_allowed_escapes.json
"""
self._run_pass_json("y_string_allowed_escapes.json")
def test_y_string_backslash_and_u_escaped_zer(self):
"""
y_string_backslash_and_u_escaped_zero.json
"""
self._run_pass_json("y_string_backslash_and_u_escaped_zero.json")
def test_y_string_backslash_doublequote(self):
"""
y_string_backslash_doublequotes.json
"""
self._run_pass_json("y_string_backslash_doublequotes.json")
def test_y_string_comment(self):
"""
y_string_comments.json
"""
self._run_pass_json("y_string_comments.json")
def test_y_string_double_escape_a(self):
"""
y_string_double_escape_a.json
"""
self._run_pass_json("y_string_double_escape_a.json")
def test_y_string_double_escape_(self):
"""
y_string_double_escape_n.json
"""
self._run_pass_json("y_string_double_escape_n.json")
def test_y_string_escaped_control_character(self):
"""
y_string_escaped_control_character.json
"""
self._run_pass_json("y_string_escaped_control_character.json")
def test_y_string_escaped_noncharacter(self):
"""
y_string_escaped_noncharacter.json
"""
self._run_pass_json("y_string_escaped_noncharacter.json")
def test_y_string_in_array(self):
"""
y_string_in_array.json
"""
self._run_pass_json("y_string_in_array.json")
def test_y_string_in_array_with_leading_space(self):
"""
y_string_in_array_with_leading_space.json
"""
self._run_pass_json("y_string_in_array_with_leading_space.json")
def test_y_string_last_surrogates_1_and_2(self):
"""
y_string_last_surrogates_1_and_2.json
"""
self._run_pass_json("y_string_last_surrogates_1_and_2.json")
def test_y_string_nbsp_uescaped(self):
"""
y_string_nbsp_uescaped.json
"""
self._run_pass_json("y_string_nbsp_uescaped.json")
def test_y_string_nonCharacterInUTF_8_U_10FFFF(self):
"""
y_string_nonCharacterInUTF-8_U+10FFFF.json
"""
self._run_pass_json("y_string_nonCharacterInUTF-8_U+10FFFF.json")
def test_y_string_nonCharacterInUTF_8_U_FFFF(self):
"""
y_string_nonCharacterInUTF-8_U+FFFF.json
"""
self._run_pass_json("y_string_nonCharacterInUTF-8_U+FFFF.json")
def test_y_string_null_escape(self):
"""
y_string_null_escape.json
"""
self._run_pass_json("y_string_null_escape.json")
def test_y_string_one_byte_utf_8(self):
"""
y_string_one-byte-utf-8.json
"""
self._run_pass_json("y_string_one-byte-utf-8.json")
def test_y_string_pi(self):
"""
y_string_pi.json
"""
self._run_pass_json("y_string_pi.json")
def test_y_string_reservedCharacterInUTF_8_U_1BFFF(self):
"""
y_string_reservedCharacterInUTF-8_U+1BFFF.json
"""
self._run_pass_json("y_string_reservedCharacterInUTF-8_U+1BFFF.json")
def test_y_string_simple_ascii(self):
"""
y_string_simple_ascii.json
"""
self._run_pass_json("y_string_simple_ascii.json")
def test_y_string_space(self):
"""
y_string_space.json
"""
self._run_pass_json("y_string_space.json")
def test_y_string_surrogates_U_1D11E_MUSICAL_SYMBOL_G_CLEF(self):
"""
y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json
"""
self._run_pass_json("y_string_surrogates_U+1D11E_MUSICAL_SYMBOL_G_CLEF.json")
def test_y_string_three_byte_utf_8(self):
"""
y_string_three-byte-utf-8.json
"""
self._run_pass_json("y_string_three-byte-utf-8.json")
def test_y_string_two_byte_utf_8(self):
"""
y_string_two-byte-utf-8.json
"""
self._run_pass_json("y_string_two-byte-utf-8.json")
def test_y_string_u_2028_line_sep(self):
"""
y_string_u+2028_line_sep.json
"""
self._run_pass_json("y_string_u+2028_line_sep.json")
def test_y_string_u_2029_par_sep(self):
"""
y_string_u+2029_par_sep.json
"""
self._run_pass_json("y_string_u+2029_par_sep.json")
def test_y_string_uEscape(self):
"""
y_string_uEscape.json
"""
self._run_pass_json("y_string_uEscape.json")
def test_y_string_uescaped_newline(self):
"""
y_string_uescaped_newline.json
"""
self._run_pass_json("y_string_uescaped_newline.json")
def test_y_string_unescaped_char_delete(self):
"""
y_string_unescaped_char_delete.json
"""
self._run_pass_json("y_string_unescaped_char_delete.json")
def test_y_string_unicode(self):
"""
y_string_unicode.json
"""
self._run_pass_json("y_string_unicode.json")
def test_y_string_unicodeEscapedBackslash(self):
"""
y_string_unicodeEscapedBackslash.json
"""
self._run_pass_json("y_string_unicodeEscapedBackslash.json")
def test_y_string_unicode_2(self):
"""
y_string_unicode_2.json
"""
self._run_pass_json("y_string_unicode_2.json")
def test_y_string_unicode_U_10FFFE_nonchar(self):
"""
y_string_unicode_U+10FFFE_nonchar.json
"""
self._run_pass_json("y_string_unicode_U+10FFFE_nonchar.json")
def test_y_string_unicode_U_1FFFE_nonchar(self):
"""
y_string_unicode_U+1FFFE_nonchar.json
"""
self._run_pass_json("y_string_unicode_U+1FFFE_nonchar.json")
def test_y_string_unicode_U_200B_ZERO_WIDTH_SPACE(self):
"""
y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json
"""
self._run_pass_json("y_string_unicode_U+200B_ZERO_WIDTH_SPACE.json")
def test_y_string_unicode_U_2064_invisible_plu(self):
"""
y_string_unicode_U+2064_invisible_plus.json
"""
self._run_pass_json("y_string_unicode_U+2064_invisible_plus.json")
def test_y_string_unicode_U_FDD0_nonchar(self):
"""
y_string_unicode_U+FDD0_nonchar.json
"""
self._run_pass_json("y_string_unicode_U+FDD0_nonchar.json")
def test_y_string_unicode_U_FFFE_nonchar(self):
"""
y_string_unicode_U+FFFE_nonchar.json
"""
self._run_pass_json("y_string_unicode_U+FFFE_nonchar.json")
def test_y_string_unicode_escaped_double_quote(self):
"""
y_string_unicode_escaped_double_quote.json
"""
self._run_pass_json("y_string_unicode_escaped_double_quote.json")
def test_y_string_utf8(self):
"""
y_string_utf8.json
"""
self._run_pass_json("y_string_utf8.json")
def test_y_string_with_del_character(self):
"""
y_string_with_del_character.json
"""
self._run_pass_json("y_string_with_del_character.json")
def test_y_structure_lonely_false(self):
"""
y_structure_lonely_false.json
"""
self._run_pass_json("y_structure_lonely_false.json")
def test_y_structure_lonely_int(self):
"""
y_structure_lonely_int.json
"""
self._run_pass_json("y_structure_lonely_int.json")
def test_y_structure_lonely_negative_real(self):
"""
y_structure_lonely_negative_real.json
"""
self._run_pass_json("y_structure_lonely_negative_real.json")
def test_y_structure_lonely_null(self):
"""
y_structure_lonely_null.json
"""
self._run_pass_json("y_structure_lonely_null.json")
def test_y_structure_lonely_string(self):
"""
y_structure_lonely_string.json
"""
self._run_pass_json("y_structure_lonely_string.json")
def test_y_structure_lonely_true(self):
"""
y_structure_lonely_true.json
"""
self._run_pass_json("y_structure_lonely_true.json")
def test_y_structure_string_empty(self):
"""
y_structure_string_empty.json
"""
self._run_pass_json("y_structure_string_empty.json")
def test_y_structure_trailing_newline(self):
"""
y_structure_trailing_newline.json
"""
self._run_pass_json("y_structure_trailing_newline.json")
def test_y_structure_true_in_array(self):
"""
y_structure_true_in_array.json
"""
self._run_pass_json("y_structure_true_in_array.json")
def test_y_structure_whitespace_array(self):
"""
y_structure_whitespace_array.json
"""
self._run_pass_json("y_structure_whitespace_array.json")
def test_n_array_1_true_without_comma(self):
"""
n_array_1_true_without_comma.json
"""
self._run_fail_json("n_array_1_true_without_comma.json")
def test_n_array_a_invalid_utf8(self):
"""
n_array_a_invalid_utf8.json
"""
self._run_fail_json("n_array_a_invalid_utf8.json")
def test_n_array_colon_instead_of_comma(self):
"""
n_array_colon_instead_of_comma.json
"""
self._run_fail_json("n_array_colon_instead_of_comma.json")
def test_n_array_comma_after_close(self):
"""
n_array_comma_after_close.json
"""
self._run_fail_json("n_array_comma_after_close.json")
def test_n_array_comma_and_number(self):
"""
n_array_comma_and_number.json
"""
self._run_fail_json("n_array_comma_and_number.json")
def test_n_array_double_comma(self):
"""
n_array_double_comma.json
"""
self._run_fail_json("n_array_double_comma.json")
def test_n_array_double_extra_comma(self):
"""
n_array_double_extra_comma.json
"""
self._run_fail_json("n_array_double_extra_comma.json")
def test_n_array_extra_close(self):
"""
n_array_extra_close.json
"""
self._run_fail_json("n_array_extra_close.json")
def test_n_array_extra_comma(self):
"""
n_array_extra_comma.json
"""
self._run_fail_json("n_array_extra_comma.json")
def test_n_array_incomplete(self):
"""
n_array_incomplete.json
"""
self._run_fail_json("n_array_incomplete.json")
def test_n_array_incomplete_invalid_value(self):
"""
n_array_incomplete_invalid_value.json
"""
self._run_fail_json("n_array_incomplete_invalid_value.json")
def test_n_array_inner_array_no_comma(self):
"""
n_array_inner_array_no_comma.json
"""
self._run_fail_json("n_array_inner_array_no_comma.json")
def test_n_array_invalid_utf8(self):
"""
n_array_invalid_utf8.json
"""
self._run_fail_json("n_array_invalid_utf8.json")
def test_n_array_items_separated_by_semicol(self):
"""
n_array_items_separated_by_semicolon.json
"""
self._run_fail_json("n_array_items_separated_by_semicolon.json")
def test_n_array_just_comma(self):
"""
n_array_just_comma.json
"""
self._run_fail_json("n_array_just_comma.json")
def test_n_array_just_minu(self):
"""
n_array_just_minus.json
"""
self._run_fail_json("n_array_just_minus.json")
def test_n_array_missing_value(self):
"""
n_array_missing_value.json
"""
self._run_fail_json("n_array_missing_value.json")
def test_n_array_newlines_unclosed(self):
"""
n_array_newlines_unclosed.json
"""
self._run_fail_json("n_array_newlines_unclosed.json")
def test_n_array_number_and_comma(self):
"""
n_array_number_and_comma.json
"""
self._run_fail_json("n_array_number_and_comma.json")
def test_n_array_number_and_several_comma(self):
"""
n_array_number_and_several_commas.json
"""
self._run_fail_json("n_array_number_and_several_commas.json")
def test_n_array_spaces_vertical_tab_formfeed(self):
"""
n_array_spaces_vertical_tab_formfeed.json
"""
self._run_fail_json("n_array_spaces_vertical_tab_formfeed.json")
def test_n_array_star_inside(self):
"""
n_array_star_inside.json
"""
self._run_fail_json("n_array_star_inside.json")
def test_n_array_unclosed(self):
"""
n_array_unclosed.json
"""
self._run_fail_json("n_array_unclosed.json")
def test_n_array_unclosed_trailing_comma(self):
"""
n_array_unclosed_trailing_comma.json
"""
self._run_fail_json("n_array_unclosed_trailing_comma.json")
def test_n_array_unclosed_with_new_line(self):
"""
n_array_unclosed_with_new_lines.json
"""
self._run_fail_json("n_array_unclosed_with_new_lines.json")
def test_n_array_unclosed_with_object_inside(self):
"""
n_array_unclosed_with_object_inside.json
"""
self._run_fail_json("n_array_unclosed_with_object_inside.json")
def test_n_incomplete_false(self):
"""
n_incomplete_false.json
"""
self._run_fail_json("n_incomplete_false.json")
def test_n_incomplete_null(self):
"""
n_incomplete_null.json
"""
self._run_fail_json("n_incomplete_null.json")
def test_n_incomplete_true(self):
"""
n_incomplete_true.json
"""
self._run_fail_json("n_incomplete_true.json")
def test_n_multidigit_number_then_00(self):
"""
n_multidigit_number_then_00.json
"""
self._run_fail_json("n_multidigit_number_then_00.json")
def test_n_number__(self):
"""
n_number_++.json
"""
self._run_fail_json("n_number_++.json")
def test_n_number_1(self):
"""
n_number_+1.json
"""
self._run_fail_json("n_number_+1.json")
def test_n_number_Inf(self):
"""
n_number_+Inf.json
"""
self._run_fail_json("n_number_+Inf.json")
def test_n_number_01(self):
"""
n_number_-01.json
"""
self._run_fail_json("n_number_-01.json")
def test_n_number_1_0(self):
"""
n_number_-1.0..json
"""
self._run_fail_json("n_number_-1.0..json")
def test_n_number_2(self):
"""
n_number_-2..json
"""
self._run_fail_json("n_number_-2..json")
def test_n_number_negative_NaN(self):
"""
n_number_-NaN.json
"""
self._run_fail_json("n_number_-NaN.json")
def test_n_number_negative_1(self):
"""
n_number_.-1.json
"""
self._run_fail_json("n_number_.-1.json")
def test_n_number_2e_3(self):
"""
n_number_.2e-3.json
"""
self._run_fail_json("n_number_.2e-3.json")
def test_n_number_0_1_2(self):
"""
n_number_0.1.2.json
"""
self._run_fail_json("n_number_0.1.2.json")
def test_n_number_0_3e_(self):
"""
n_number_0.3e+.json
"""
self._run_fail_json("n_number_0.3e+.json")
def test_n_number_0_3e(self):
"""
n_number_0.3e.json
"""
self._run_fail_json("n_number_0.3e.json")
def test_n_number_0_e1(self):
"""
n_number_0.e1.json
"""
self._run_fail_json("n_number_0.e1.json")
def test_n_number_0_capital_E_(self):
"""
n_number_0_capital_E+.json
"""
self._run_fail_json("n_number_0_capital_E+.json")
def test_n_number_0_capital_E(self):
"""
n_number_0_capital_E.json
"""
self._run_fail_json("n_number_0_capital_E.json")
def test_n_number_0e_(self):
"""
n_number_0e+.json
"""
self._run_fail_json("n_number_0e+.json")
def test_n_number_0e(self):
"""
n_number_0e.json
"""
self._run_fail_json("n_number_0e.json")
def test_n_number_1_0e_(self):
"""
n_number_1.0e+.json
"""
self._run_fail_json("n_number_1.0e+.json")
def test_n_number_1_0e_2(self):
"""
n_number_1.0e-.json
"""
self._run_fail_json("n_number_1.0e-.json")
def test_n_number_1_0e(self):
"""
n_number_1.0e.json
"""
self._run_fail_json("n_number_1.0e.json")
def test_n_number_1_000(self):
"""
n_number_1_000.json
"""
self._run_fail_json("n_number_1_000.json")
def test_n_number_1eE2(self):
"""
n_number_1eE2.json
"""
self._run_fail_json("n_number_1eE2.json")
def test_n_number_2_e_3(self):
"""
n_number_2.e+3.json
"""
self._run_fail_json("n_number_2.e+3.json")
def test_n_number_2_e_3_2(self):
"""
n_number_2.e-3.json
"""
self._run_fail_json("n_number_2.e-3.json")
def test_n_number_2_e3_3(self):
"""
n_number_2.e3.json
"""
self._run_fail_json("n_number_2.e3.json")
def test_n_number_9_e_(self):
"""
n_number_9.e+.json
"""
self._run_fail_json("n_number_9.e+.json")
def test_n_number_negative_Inf(self):
"""
n_number_Inf.json
"""
self._run_fail_json("n_number_Inf.json")
def test_n_number_NaN(self):
"""
n_number_NaN.json
"""
self._run_fail_json("n_number_NaN.json")
def test_n_number_U_FF11_fullwidth_digit_one(self):
"""
n_number_U+FF11_fullwidth_digit_one.json
"""
self._run_fail_json("n_number_U+FF11_fullwidth_digit_one.json")
def test_n_number_expressi(self):
"""
n_number_expression.json
"""
self._run_fail_json("n_number_expression.json")
def test_n_number_hex_1_digit(self):
"""
n_number_hex_1_digit.json
"""
self._run_fail_json("n_number_hex_1_digit.json")
def test_n_number_hex_2_digit(self):
"""
n_number_hex_2_digits.json
"""
self._run_fail_json("n_number_hex_2_digits.json")
def test_n_number_infinity(self):
"""
n_number_infinity.json
"""
self._run_fail_json("n_number_infinity.json")
def test_n_number_invalid_(self):
"""
n_number_invalid+-.json
"""
self._run_fail_json("n_number_invalid+-.json")
def test_n_number_invalid_negative_real(self):
"""
n_number_invalid-negative-real.json
"""
self._run_fail_json("n_number_invalid-negative-real.json")
def test_n_number_invalid_utf_8_in_bigger_int(self):
"""
n_number_invalid-utf-8-in-bigger-int.json
"""
self._run_fail_json("n_number_invalid-utf-8-in-bigger-int.json")
def test_n_number_invalid_utf_8_in_exponent(self):
"""
n_number_invalid-utf-8-in-exponent.json
"""
self._run_fail_json("n_number_invalid-utf-8-in-exponent.json")
def test_n_number_invalid_utf_8_in_int(self):
"""
n_number_invalid-utf-8-in-int.json
"""
self._run_fail_json("n_number_invalid-utf-8-in-int.json")
def test_n_number_minus_infinity(self):
"""
n_number_minus_infinity.json
"""
self._run_fail_json("n_number_minus_infinity.json")
def test_n_number_minus_sign_with_trailing_garbage(self):
"""
n_number_minus_sign_with_trailing_garbage.json
"""
self._run_fail_json("n_number_minus_sign_with_trailing_garbage.json")
def test_n_number_minus_space_1(self):
"""
n_number_minus_space_1.json
"""
self._run_fail_json("n_number_minus_space_1.json")
def test_n_number_neg_int_starting_with_zer(self):
"""
n_number_neg_int_starting_with_zero.json
"""
self._run_fail_json("n_number_neg_int_starting_with_zero.json")
def test_n_number_neg_real_without_int_part(self):
"""
n_number_neg_real_without_int_part.json
"""
self._run_fail_json("n_number_neg_real_without_int_part.json")
def test_n_number_neg_with_garbage_at_end(self):
"""
n_number_neg_with_garbage_at_end.json
"""
self._run_fail_json("n_number_neg_with_garbage_at_end.json")
def test_n_number_real_garbage_after_e(self):
"""
n_number_real_garbage_after_e.json
"""
self._run_fail_json("n_number_real_garbage_after_e.json")
def test_n_number_real_with_invalid_utf8_after_e(self):
"""
n_number_real_with_invalid_utf8_after_e.json
"""
self._run_fail_json("n_number_real_with_invalid_utf8_after_e.json")
def test_n_number_real_without_fractional_part(self):
"""
n_number_real_without_fractional_part.json
"""
self._run_fail_json("n_number_real_without_fractional_part.json")
def test_n_number_starting_with_dot(self):
"""
n_number_starting_with_dot.json
"""
self._run_fail_json("n_number_starting_with_dot.json")
def test_n_number_with_alpha(self):
"""
n_number_with_alpha.json
"""
self._run_fail_json("n_number_with_alpha.json")
def test_n_number_with_alpha_char(self):
"""
n_number_with_alpha_char.json
"""
self._run_fail_json("n_number_with_alpha_char.json")
def test_n_number_with_leading_zer(self):
"""
n_number_with_leading_zero.json
"""
self._run_fail_json("n_number_with_leading_zero.json")
def test_n_object_bad_value(self):
"""
n_object_bad_value.json
"""
self._run_fail_json("n_object_bad_value.json")
def test_n_object_bracket_key(self):
"""
n_object_bracket_key.json
"""
self._run_fail_json("n_object_bracket_key.json")
def test_n_object_comma_instead_of_col(self):
"""
n_object_comma_instead_of_colon.json
"""
self._run_fail_json("n_object_comma_instead_of_colon.json")
def test_n_object_double_col(self):
"""
n_object_double_colon.json
"""
self._run_fail_json("n_object_double_colon.json")
def test_n_object_emoji(self):
"""
n_object_emoji.json
"""
self._run_fail_json("n_object_emoji.json")
def test_n_object_garbage_at_end(self):
"""
n_object_garbage_at_end.json
"""
self._run_fail_json("n_object_garbage_at_end.json")
def test_n_object_key_with_single_quote(self):
"""
n_object_key_with_single_quotes.json
"""
self._run_fail_json("n_object_key_with_single_quotes.json")
def test_n_object_lone_continuation_byte_in_key_and_trailing_comma(self):
"""
n_object_lone_continuation_byte_in_key_and_trailing_comma.json
"""
self._run_fail_json(
"n_object_lone_continuation_byte_in_key_and_trailing_comma.json"
)
def test_n_object_missing_col(self):
"""
n_object_missing_colon.json
"""
self._run_fail_json("n_object_missing_colon.json")
def test_n_object_missing_key(self):
"""
n_object_missing_key.json
"""
self._run_fail_json("n_object_missing_key.json")
def test_n_object_missing_semicol(self):
"""
n_object_missing_semicolon.json
"""
self._run_fail_json("n_object_missing_semicolon.json")
def test_n_object_missing_value(self):
"""
n_object_missing_value.json
"""
self._run_fail_json("n_object_missing_value.json")
def test_n_object_no_col(self):
"""
n_object_no-colon.json
"""
self._run_fail_json("n_object_no-colon.json")
def test_n_object_non_string_key(self):
"""
n_object_non_string_key.json
"""
self._run_fail_json("n_object_non_string_key.json")
def test_n_object_non_string_key_but_huge_number_instead(self):
"""
n_object_non_string_key_but_huge_number_instead.json
"""
self._run_fail_json("n_object_non_string_key_but_huge_number_instead.json")
def test_n_object_repeated_null_null(self):
"""
n_object_repeated_null_null.json
"""
self._run_fail_json("n_object_repeated_null_null.json")
def test_n_object_several_trailing_comma(self):
"""
n_object_several_trailing_commas.json
"""
self._run_fail_json("n_object_several_trailing_commas.json")
def test_n_object_single_quote(self):
"""
n_object_single_quote.json
"""
self._run_fail_json("n_object_single_quote.json")
def test_n_object_trailing_comma(self):
"""
n_object_trailing_comma.json
"""
self._run_fail_json("n_object_trailing_comma.json")
def test_n_object_trailing_comment(self):
"""
n_object_trailing_comment.json
"""
self._run_fail_json("n_object_trailing_comment.json")
def test_n_object_trailing_comment_ope(self):
"""
n_object_trailing_comment_open.json
"""
self._run_fail_json("n_object_trailing_comment_open.json")
def test_n_object_trailing_comment_slash_ope(self):
"""
n_object_trailing_comment_slash_open.json
"""
self._run_fail_json("n_object_trailing_comment_slash_open.json")
def test_n_object_trailing_comment_slash_open_incomplete(self):
"""
n_object_trailing_comment_slash_open_incomplete.json
"""
self._run_fail_json("n_object_trailing_comment_slash_open_incomplete.json")
def test_n_object_two_commas_in_a_row(self):
"""
n_object_two_commas_in_a_row.json
"""
self._run_fail_json("n_object_two_commas_in_a_row.json")
def test_n_object_unquoted_key(self):
"""
n_object_unquoted_key.json
"""
self._run_fail_json("n_object_unquoted_key.json")
def test_n_object_unterminated_value(self):
"""
n_object_unterminated-value.json
"""
self._run_fail_json("n_object_unterminated-value.json")
def test_n_object_with_single_string(self):
"""
n_object_with_single_string.json
"""
self._run_fail_json("n_object_with_single_string.json")
def test_n_object_with_trailing_garbage(self):
"""
n_object_with_trailing_garbage.json
"""
self._run_fail_json("n_object_with_trailing_garbage.json")
def test_n_single_space(self):
"""
n_single_space.json
"""
self._run_fail_json("n_single_space.json")
def test_n_string_1_surrogate_then_escape(self):
"""
n_string_1_surrogate_then_escape.json
"""
self._run_fail_json("n_string_1_surrogate_then_escape.json")
def test_n_string_1_surrogate_then_escape_u(self):
"""
n_string_1_surrogate_then_escape_u.json
"""
self._run_fail_json("n_string_1_surrogate_then_escape_u.json")
def test_n_string_1_surrogate_then_escape_u1(self):
"""
n_string_1_surrogate_then_escape_u1.json
"""
self._run_fail_json("n_string_1_surrogate_then_escape_u1.json")
def test_n_string_1_surrogate_then_escape_u1x(self):
"""
n_string_1_surrogate_then_escape_u1x.json
"""
self._run_fail_json("n_string_1_surrogate_then_escape_u1x.json")
def test_n_string_accentuated_char_no_quote(self):
"""
n_string_accentuated_char_no_quotes.json
"""
self._run_fail_json("n_string_accentuated_char_no_quotes.json")
def test_n_string_backslash_00(self):
"""
n_string_backslash_00.json
"""
self._run_fail_json("n_string_backslash_00.json")
def test_n_string_escape_x(self):
"""
n_string_escape_x.json
"""
self._run_fail_json("n_string_escape_x.json")
def test_n_string_escaped_backslash_bad(self):
"""
n_string_escaped_backslash_bad.json
"""
self._run_fail_json("n_string_escaped_backslash_bad.json")
def test_n_string_escaped_ctrl_char_tab(self):
"""
n_string_escaped_ctrl_char_tab.json
"""
self._run_fail_json("n_string_escaped_ctrl_char_tab.json")
def test_n_string_escaped_emoji(self):
"""
n_string_escaped_emoji.json
"""
self._run_fail_json("n_string_escaped_emoji.json")
def test_n_string_incomplete_escape(self):
"""
n_string_incomplete_escape.json
"""
self._run_fail_json("n_string_incomplete_escape.json")
def test_n_string_incomplete_escaped_character(self):
"""
n_string_incomplete_escaped_character.json
"""
self._run_fail_json("n_string_incomplete_escaped_character.json")
def test_n_string_incomplete_surrogate(self):
"""
n_string_incomplete_surrogate.json
"""
self._run_fail_json("n_string_incomplete_surrogate.json")
def test_n_string_incomplete_surrogate_escape_invalid(self):
"""
n_string_incomplete_surrogate_escape_invalid.json
"""
self._run_fail_json("n_string_incomplete_surrogate_escape_invalid.json")
def test_n_string_invalid_utf_8_in_escape(self):
"""
n_string_invalid-utf-8-in-escape.json
"""
self._run_fail_json("n_string_invalid-utf-8-in-escape.json")
def test_n_string_invalid_backslash_esc(self):
"""
n_string_invalid_backslash_esc.json
"""
self._run_fail_json("n_string_invalid_backslash_esc.json")
def test_n_string_invalid_unicode_escape(self):
"""
n_string_invalid_unicode_escape.json
"""
self._run_fail_json("n_string_invalid_unicode_escape.json")
def test_n_string_invalid_utf8_after_escape(self):
"""
n_string_invalid_utf8_after_escape.json
"""
self._run_fail_json("n_string_invalid_utf8_after_escape.json")
def test_n_string_leading_uescaped_thinspace(self):
"""
n_string_leading_uescaped_thinspace.json
"""
self._run_fail_json("n_string_leading_uescaped_thinspace.json")
def test_n_string_no_quotes_with_bad_escape(self):
"""
n_string_no_quotes_with_bad_escape.json
"""
self._run_fail_json("n_string_no_quotes_with_bad_escape.json")
def test_n_string_single_doublequote(self):
"""
n_string_single_doublequote.json
"""
self._run_fail_json("n_string_single_doublequote.json")
def test_n_string_single_quote(self):
"""
n_string_single_quote.json
"""
self._run_fail_json("n_string_single_quote.json")
def test_n_string_single_string_no_double_quote(self):
"""
n_string_single_string_no_double_quotes.json
"""
self._run_fail_json("n_string_single_string_no_double_quotes.json")
def test_n_string_start_escape_unclosed(self):
"""
n_string_start_escape_unclosed.json
"""
self._run_fail_json("n_string_start_escape_unclosed.json")
def test_n_string_unescaped_crtl_char(self):
"""
n_string_unescaped_crtl_char.json
"""
self._run_fail_json("n_string_unescaped_crtl_char.json")
def test_n_string_unescaped_newline(self):
"""
n_string_unescaped_newline.json
"""
self._run_fail_json("n_string_unescaped_newline.json")
def test_n_string_unescaped_tab(self):
"""
n_string_unescaped_tab.json
"""
self._run_fail_json("n_string_unescaped_tab.json")
def test_n_string_unicode_CapitalU(self):
"""
n_string_unicode_CapitalU.json
"""
self._run_fail_json("n_string_unicode_CapitalU.json")
def test_n_string_with_trailing_garbage(self):
"""
n_string_with_trailing_garbage.json
"""
self._run_fail_json("n_string_with_trailing_garbage.json")
def test_n_structure_100000_opening_array(self):
"""
n_structure_100000_opening_arrays.json
"""
self._run_fail_json("n_structure_100000_opening_arrays.json.xz")
def test_n_structure_U_2060_word_joined(self):
"""
n_structure_U+2060_word_joined.json
"""
self._run_fail_json("n_structure_U+2060_word_joined.json")
def test_n_structure_UTF8_BOM_no_data(self):
"""
n_structure_UTF8_BOM_no_data.json
"""
self._run_fail_json("n_structure_UTF8_BOM_no_data.json")
def test_n_structure_angle_bracket_(self):
"""
n_structure_angle_bracket_..json
"""
self._run_fail_json("n_structure_angle_bracket_..json")
def test_n_structure_angle_bracket_null(self):
"""
n_structure_angle_bracket_null.json
"""
self._run_fail_json("n_structure_angle_bracket_null.json")
def test_n_structure_array_trailing_garbage(self):
"""
n_structure_array_trailing_garbage.json
"""
self._run_fail_json("n_structure_array_trailing_garbage.json")
def test_n_structure_array_with_extra_array_close(self):
"""
n_structure_array_with_extra_array_close.json
"""
self._run_fail_json("n_structure_array_with_extra_array_close.json")
def test_n_structure_array_with_unclosed_string(self):
"""
n_structure_array_with_unclosed_string.json
"""
self._run_fail_json("n_structure_array_with_unclosed_string.json")
def test_n_structure_ascii_unicode_identifier(self):
"""
n_structure_ascii-unicode-identifier.json
"""
self._run_fail_json("n_structure_ascii-unicode-identifier.json")
def test_n_structure_capitalized_True(self):
"""
n_structure_capitalized_True.json
"""
self._run_fail_json("n_structure_capitalized_True.json")
def test_n_structure_close_unopened_array(self):
"""
n_structure_close_unopened_array.json
"""
self._run_fail_json("n_structure_close_unopened_array.json")
def test_n_structure_comma_instead_of_closing_brace(self):
"""
n_structure_comma_instead_of_closing_brace.json
"""
self._run_fail_json("n_structure_comma_instead_of_closing_brace.json")
def test_n_structure_double_array(self):
"""
n_structure_double_array.json
"""
self._run_fail_json("n_structure_double_array.json")
def test_n_structure_end_array(self):
"""
n_structure_end_array.json
"""
self._run_fail_json("n_structure_end_array.json")
def test_n_structure_incomplete_UTF8_BOM(self):
"""
n_structure_incomplete_UTF8_BOM.json
"""
self._run_fail_json("n_structure_incomplete_UTF8_BOM.json")
def test_n_structure_lone_invalid_utf_8(self):
"""
n_structure_lone-invalid-utf-8.json
"""
self._run_fail_json("n_structure_lone-invalid-utf-8.json")
def test_n_structure_lone_open_bracket(self):
"""
n_structure_lone-open-bracket.json
"""
self._run_fail_json("n_structure_lone-open-bracket.json")
def test_n_structure_no_data(self):
"""
n_structure_no_data.json
"""
self._run_fail_json("n_structure_no_data.json")
def test_n_structure_null_byte_outside_string(self):
"""
n_structure_null-byte-outside-string.json
"""
self._run_fail_json("n_structure_null-byte-outside-string.json")
def test_n_structure_number_with_trailing_garbage(self):
"""
n_structure_number_with_trailing_garbage.json
"""
self._run_fail_json("n_structure_number_with_trailing_garbage.json")
def test_n_structure_object_followed_by_closing_object(self):
"""
n_structure_object_followed_by_closing_object.json
"""
self._run_fail_json("n_structure_object_followed_by_closing_object.json")
def test_n_structure_object_unclosed_no_value(self):
"""
n_structure_object_unclosed_no_value.json
"""
self._run_fail_json("n_structure_object_unclosed_no_value.json")
def test_n_structure_object_with_comment(self):
"""
n_structure_object_with_comment.json
"""
self._run_fail_json("n_structure_object_with_comment.json")
def test_n_structure_object_with_trailing_garbage(self):
"""
n_structure_object_with_trailing_garbage.json
"""
self._run_fail_json("n_structure_object_with_trailing_garbage.json")
def test_n_structure_open_array_apostrophe(self):
"""
n_structure_open_array_apostrophe.json
"""
self._run_fail_json("n_structure_open_array_apostrophe.json")
def test_n_structure_open_array_comma(self):
"""
n_structure_open_array_comma.json
"""
self._run_fail_json("n_structure_open_array_comma.json")
def test_n_structure_open_array_object(self):
"""
n_structure_open_array_object.json
"""
self._run_fail_json("n_structure_open_array_object.json.xz")
def test_n_structure_open_array_open_object(self):
"""
n_structure_open_array_open_object.json
"""
self._run_fail_json("n_structure_open_array_open_object.json")
def test_n_structure_open_array_open_string(self):
"""
n_structure_open_array_open_string.json
"""
self._run_fail_json("n_structure_open_array_open_string.json")
def test_n_structure_open_array_string(self):
"""
n_structure_open_array_string.json
"""
self._run_fail_json("n_structure_open_array_string.json")
def test_n_structure_open_object(self):
"""
n_structure_open_object.json
"""
self._run_fail_json("n_structure_open_object.json")
def test_n_structure_open_object_close_array(self):
"""
n_structure_open_object_close_array.json
"""
self._run_fail_json("n_structure_open_object_close_array.json")
def test_n_structure_open_object_comma(self):
"""
n_structure_open_object_comma.json
"""
self._run_fail_json("n_structure_open_object_comma.json")
def test_n_structure_open_object_open_array(self):
"""
n_structure_open_object_open_array.json
"""
self._run_fail_json("n_structure_open_object_open_array.json")
def test_n_structure_open_object_open_string(self):
"""
n_structure_open_object_open_string.json
"""
self._run_fail_json("n_structure_open_object_open_string.json")
def test_n_structure_open_object_string_with_apostrophe(self):
"""
n_structure_open_object_string_with_apostrophes.json
"""
self._run_fail_json("n_structure_open_object_string_with_apostrophes.json")
def test_n_structure_open_ope(self):
"""
n_structure_open_open.json
"""
self._run_fail_json("n_structure_open_open.json")
def test_n_structure_single_eacute(self):
"""
n_structure_single_eacute.json
"""
self._run_fail_json("n_structure_single_eacute.json")
def test_n_structure_single_star(self):
"""
n_structure_single_star.json
"""
self._run_fail_json("n_structure_single_star.json")
def test_n_structure_trailing_(self):
"""
n_structure_trailing_#.json
"""
self._run_fail_json("n_structure_trailing_#.json")
def test_n_structure_uescaped_LF_before_string(self):
"""
n_structure_uescaped_LF_before_string.json
"""
self._run_fail_json("n_structure_uescaped_LF_before_string.json")
def test_n_structure_unclosed_array(self):
"""
n_structure_unclosed_array.json
"""
self._run_fail_json("n_structure_unclosed_array.json")
def test_n_structure_unclosed_array_partial_null(self):
"""
n_structure_unclosed_array_partial_null.json
"""
self._run_fail_json("n_structure_unclosed_array_partial_null.json")
def test_n_structure_unclosed_array_unfinished_false(self):
"""
n_structure_unclosed_array_unfinished_false.json
"""
self._run_fail_json("n_structure_unclosed_array_unfinished_false.json")
def test_n_structure_unclosed_array_unfinished_true(self):
"""
n_structure_unclosed_array_unfinished_true.json
"""
self._run_fail_json("n_structure_unclosed_array_unfinished_true.json")
def test_n_structure_unclosed_object(self):
"""
n_structure_unclosed_object.json
"""
self._run_fail_json("n_structure_unclosed_object.json")
def test_n_structure_unicode_identifier(self):
"""
n_structure_unicode-identifier.json
"""
self._run_fail_json("n_structure_unicode-identifier.json")
def test_n_structure_whitespace_U_2060_word_joiner(self):
"""
n_structure_whitespace_U+2060_word_joiner.json
"""
self._run_fail_json("n_structure_whitespace_U+2060_word_joiner.json")
def test_n_structure_whitespace_formfeed(self):
"""
n_structure_whitespace_formfeed.json
"""
self._run_fail_json("n_structure_whitespace_formfeed.json")
def test_i_number_double_huge_neg_exp(self):
"""
i_number_double_huge_neg_exp.json
"""
self._run_pass_json("i_number_double_huge_neg_exp.json")
def test_i_number_huge_exp(self):
"""
i_number_huge_exp.json
"""
self._run_fail_json("i_number_huge_exp.json")
def test_i_number_neg_int_huge_exp(self):
"""
i_number_neg_int_huge_exp.json
"""
self._run_fail_json("i_number_neg_int_huge_exp.json")
def test_i_number_pos_double_huge_exp(self):
"""
i_number_pos_double_huge_exp.json
"""
self._run_fail_json("i_number_pos_double_huge_exp.json")
def test_i_number_real_neg_overflow(self):
"""
i_number_real_neg_overflow.json
"""
self._run_fail_json("i_number_real_neg_overflow.json")
def test_i_number_real_pos_overflow(self):
"""
i_number_real_pos_overflow.json
"""
self._run_fail_json("i_number_real_pos_overflow.json")
def test_i_number_real_underflow(self):
"""
i_number_real_underflow.json
"""
self._run_pass_json("i_number_real_underflow.json")
def test_i_number_too_big_neg_int(self):
"""
i_number_too_big_neg_int.json
"""
self._run_pass_json("i_number_too_big_neg_int.json")
def test_i_number_too_big_pos_int(self):
"""
i_number_too_big_pos_int.json
"""
self._run_pass_json("i_number_too_big_pos_int.json")
def test_i_number_very_big_negative_int(self):
"""
i_number_very_big_negative_int.json
"""
self._run_pass_json("i_number_very_big_negative_int.json")
def test_i_object_key_lone_2nd_surrogate(self):
"""
i_object_key_lone_2nd_surrogate.json
"""
self._run_fail_json("i_object_key_lone_2nd_surrogate.json")
def test_i_string_1st_surrogate_but_2nd_missing(self):
"""
i_string_1st_surrogate_but_2nd_missing.json
"""
self._run_fail_json("i_string_1st_surrogate_but_2nd_missing.json")
def test_i_string_1st_valid_surrogate_2nd_invalid(self):
"""
i_string_1st_valid_surrogate_2nd_invalid.json
"""
self._run_fail_json("i_string_1st_valid_surrogate_2nd_invalid.json")
def test_i_string_UTF_16LE_with_BOM(self):
"""
i_string_UTF-16LE_with_BOM.json
"""
self._run_fail_json("i_string_UTF-16LE_with_BOM.json")
def test_i_string_UTF_8_invalid_sequence(self):
"""
i_string_UTF-8_invalid_sequence.json
"""
self._run_fail_json("i_string_UTF-8_invalid_sequence.json")
def test_i_string_UTF8_surrogate_U_D800(self):
"""
i_string_UTF8_surrogate_U+D800.json
"""
self._run_fail_json("i_string_UTF8_surrogate_U+D800.json")
def test_i_string_incomplete_surrogate_and_escape_valid(self):
"""
i_string_incomplete_surrogate_and_escape_valid.json
"""
self._run_fail_json("i_string_incomplete_surrogate_and_escape_valid.json")
def test_i_string_incomplete_surrogate_pair(self):
"""
i_string_incomplete_surrogate_pair.json
"""
self._run_fail_json("i_string_incomplete_surrogate_pair.json")
def test_i_string_incomplete_surrogates_escape_valid(self):
"""
i_string_incomplete_surrogates_escape_valid.json
"""
self._run_fail_json("i_string_incomplete_surrogates_escape_valid.json")
def test_i_string_invalid_lonely_surrogate(self):
"""
i_string_invalid_lonely_surrogate.json
"""
self._run_fail_json("i_string_invalid_lonely_surrogate.json")
def test_i_string_invalid_surrogate(self):
"""
i_string_invalid_surrogate.json
"""
self._run_fail_json("i_string_invalid_surrogate.json")
def test_i_string_invalid_utf_8(self):
"""
i_string_invalid_utf-8.json
"""
self._run_fail_json("i_string_invalid_utf-8.json")
def test_i_string_inverted_surrogates_U_1D11E(self):
"""
i_string_inverted_surrogates_U+1D11E.json
"""
self._run_fail_json("i_string_inverted_surrogates_U+1D11E.json")
def test_i_string_iso_latin_1(self):
"""
i_string_iso_latin_1.json
"""
self._run_fail_json("i_string_iso_latin_1.json")
def test_i_string_lone_second_surrogate(self):
"""
i_string_lone_second_surrogate.json
"""
self._run_fail_json("i_string_lone_second_surrogate.json")
def test_i_string_lone_utf8_continuation_byte(self):
"""
i_string_lone_utf8_continuation_byte.json
"""
self._run_fail_json("i_string_lone_utf8_continuation_byte.json")
def test_i_string_not_in_unicode_range(self):
"""
i_string_not_in_unicode_range.json
"""
self._run_fail_json("i_string_not_in_unicode_range.json")
def test_i_string_overlong_sequence_2_byte(self):
"""
i_string_overlong_sequence_2_bytes.json
"""
self._run_fail_json("i_string_overlong_sequence_2_bytes.json")
def test_i_string_overlong_sequence_6_byte(self):
"""
i_string_overlong_sequence_6_bytes.json
"""
self._run_fail_json("i_string_overlong_sequence_6_bytes.json")
def test_i_string_overlong_sequence_6_bytes_null(self):
"""
i_string_overlong_sequence_6_bytes_null.json
"""
self._run_fail_json("i_string_overlong_sequence_6_bytes_null.json")
def test_i_string_truncated_utf_8(self):
"""
i_string_truncated-utf-8.json
"""
self._run_fail_json("i_string_truncated-utf-8.json")
def test_i_string_utf16BE_no_BOM(self):
"""
i_string_utf16BE_no_BOM.json
"""
self._run_fail_json("i_string_utf16BE_no_BOM.json")
def test_i_string_utf16LE_no_BOM(self):
"""
i_string_utf16LE_no_BOM.json
"""
self._run_fail_json("i_string_utf16LE_no_BOM.json")
def test_i_structure_500_nested_array(self):
"""
i_structure_500_nested_arrays.json
"""
try:
self._run_pass_json("i_structure_500_nested_arrays.json.xz")
except orjson.JSONDecodeError:
# fails on serde, passes on yyjson
pass
def test_i_structure_UTF_8_BOM_empty_object(self):
"""
i_structure_UTF-8_BOM_empty_object.json
"""
self._run_fail_json("i_structure_UTF-8_BOM_empty_object.json")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_jsonchecker.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
Tests files from http://json.org/JSON_checker/
"""
import pytest
import orjson
from .util import read_fixture_str
PATTERN_1 = '["JSON Test Pattern pass1",{"object with 1 member":["array with 1 element"]},{},[],-42,true,false,null,{"integer":1234567890,"real":-9876.54321,"e":1.23456789e-13,"E":1.23456789e34,"":2.3456789012e76,"zero":0,"one":1,"space":" ","quote":"\\"","backslash":"\\\\","controls":"\\b\\f\\n\\r\\t","slash":"/ & /","alpha":"abcdefghijklmnopqrstuvwyz","ALPHA":"ABCDEFGHIJKLMNOPQRSTUVWYZ","digit":"0123456789","0123456789":"digit","special":"`1~!@#$%^&*()_+-={\':[,]}|;.</>?","hex":"ģ䕧覫췯ꯍ\uef4a","true":true,"false":false,"null":null,"array":[],"object":{},"address":"50 St. James Street","url":"http://www.JSON.org/","comment":"// /* <!-- --","# -- --> */":" "," s p a c e d ":[1,2,3,4,5,6,7],"compact":[1,2,3,4,5,6,7],"jsontext":"{\\"object with 1 member\\":[\\"array with 1 element\\"]}","quotes":"" \\" %22 0x22 034 "","/\\\\\\"쫾몾ꮘﳞ볚\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?":"A key can be any string"},0.5,98.6,99.44,1066,10.0,1.0,0.1,1.0,2.0,2.0,"rosebud"]'.encode()
class TestJsonChecker:
def _run_fail_json(self, filename, exc=orjson.JSONDecodeError):
data = read_fixture_str(filename, "jsonchecker")
pytest.raises(exc, orjson.loads, data)
def _run_pass_json(self, filename, match=""):
data = read_fixture_str(filename, "jsonchecker")
assert orjson.dumps(orjson.loads(data)) == match
def test_fail01(self):
"""
fail01.json
"""
self._run_pass_json(
"fail01.json",
b'"A JSON payload should be an object or array, not a string."',
)
def test_fail02(self):
"""
fail02.json
"""
self._run_fail_json("fail02.json", orjson.JSONDecodeError) # EOF
def test_fail03(self):
"""
fail03.json
"""
self._run_fail_json("fail03.json")
def test_fail04(self):
"""
fail04.json
"""
self._run_fail_json("fail04.json")
def test_fail05(self):
"""
fail05.json
"""
self._run_fail_json("fail05.json")
def test_fail06(self):
"""
fail06.json
"""
self._run_fail_json("fail06.json")
def test_fail07(self):
"""
fail07.json
"""
self._run_fail_json("fail07.json")
def test_fail08(self):
"""
fail08.json
"""
self._run_fail_json("fail08.json")
def test_fail09(self):
"""
fail09.json
"""
self._run_fail_json("fail09.json")
def test_fail10(self):
"""
fail10.json
"""
self._run_fail_json("fail10.json")
def test_fail11(self):
"""
fail11.json
"""
self._run_fail_json("fail11.json")
def test_fail12(self):
"""
fail12.json
"""
self._run_fail_json("fail12.json")
def test_fail13(self):
"""
fail13.json
"""
self._run_fail_json("fail13.json")
def test_fail14(self):
"""
fail14.json
"""
self._run_fail_json("fail14.json")
def test_fail15(self):
"""
fail15.json
"""
self._run_fail_json("fail15.json")
def test_fail16(self):
"""
fail16.json
"""
self._run_fail_json("fail16.json")
def test_fail17(self):
"""
fail17.json
"""
self._run_fail_json("fail17.json")
def test_fail18(self):
"""
fail18.json
"""
self._run_pass_json(
"fail18.json", b'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]'
)
def test_fail19(self):
"""
fail19.json
"""
self._run_fail_json("fail19.json")
def test_fail20(self):
"""
fail20.json
"""
self._run_fail_json("fail20.json")
def test_fail21(self):
"""
fail21.json
"""
self._run_fail_json("fail21.json")
def test_fail22(self):
"""
fail22.json
"""
self._run_fail_json("fail22.json")
def test_fail23(self):
"""
fail23.json
"""
self._run_fail_json("fail23.json")
def test_fail24(self):
"""
fail24.json
"""
self._run_fail_json("fail24.json")
def test_fail25(self):
"""
fail25.json
"""
self._run_fail_json("fail25.json")
def test_fail26(self):
"""
fail26.json
"""
self._run_fail_json("fail26.json")
def test_fail27(self):
"""
fail27.json
"""
self._run_fail_json("fail27.json")
def test_fail28(self):
"""
fail28.json
"""
self._run_fail_json("fail28.json")
def test_fail29(self):
"""
fail29.json
"""
self._run_fail_json("fail29.json")
def test_fail30(self):
"""
fail30.json
"""
self._run_fail_json("fail30.json")
def test_fail31(self):
"""
fail31.json
"""
self._run_fail_json("fail31.json")
def test_fail32(self):
"""
fail32.json
"""
self._run_fail_json("fail32.json", orjson.JSONDecodeError) # EOF
def test_fail33(self):
"""
fail33.json
"""
self._run_fail_json("fail33.json")
def test_pass01(self):
"""
pass01.json
"""
self._run_pass_json("pass01.json", PATTERN_1)
def test_pass02(self):
"""
pass02.json
"""
self._run_pass_json(
"pass02.json", b'[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]'
)
def test_pass03(self):
"""
pass03.json
"""
self._run_pass_json(
"pass03.json",
b'{"JSON Test Pattern pass3":{"The outermost value":"must be '
b'an object or array.","In this test":"It is an object."}}',
)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_dict.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
class TestDict:
def test_dict(self):
"""
dict
"""
obj = {"key": "value"}
ref = '{"key":"value"}'
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == obj
def test_dict_duplicate_loads(self):
assert orjson.loads(b'{"1":true,"1":false}') == {"1": False}
def test_dict_empty(self):
obj = [{"key": [{}] * 4096}] * 4096 # type:ignore
assert orjson.loads(orjson.dumps(obj)) == obj
def test_dict_large_dict(self):
"""
dict with >512 keys
"""
obj = {"key_%s" % idx: [{}, {"a": [{}, {}, {}]}, {}] for idx in range(513)} # type: ignore
assert len(obj) == 513
assert orjson.loads(orjson.dumps(obj)) == obj
def test_dict_large_4096(self):
"""
dict with >4096 keys
"""
obj = {"key_%s" % idx: "value_%s" % idx for idx in range(4097)}
assert len(obj) == 4097
assert orjson.loads(orjson.dumps(obj)) == obj
def test_dict_large_65536(self):
"""
dict with >65536 keys
"""
obj = {"key_%s" % idx: "value_%s" % idx for idx in range(65537)}
assert len(obj) == 65537
assert orjson.loads(orjson.dumps(obj)) == obj
def test_dict_large_keys(self):
"""
dict with keys too large to cache
"""
obj = {
"keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey": "value"
}
ref = '{"keeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeey":"value"}'
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == obj
def test_dict_unicode(self):
"""
dict unicode keys
"""
obj = {"🐈": "value"}
ref = b'{"\xf0\x9f\x90\x88":"value"}'
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
assert orjson.loads(ref)["🐈"] == "value"
def test_dict_invalid_key_dumps(self):
"""
dict invalid key dumps()
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({1: "value"})
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({b"key": "value"})
def test_dict_invalid_key_loads(self):
"""
dict invalid key loads()
"""
with pytest.raises(orjson.JSONDecodeError):
orjson.loads('{1:"value"}')
with pytest.raises(orjson.JSONDecodeError):
orjson.loads('{{"a":true}:true}')
def test_dict_similar_keys(self):
"""
loads() similar keys
This was a regression in 3.4.2 caused by using
the implementation in wy instead of wyhash.
"""
assert orjson.loads(
'{"cf_status_firefox67": "---", "cf_status_firefox57": "verified"}'
) == {"cf_status_firefox57": "verified", "cf_status_firefox67": "---"}
def test_dict_pop_replace_first(self):
"Test pop and replace a first key in a dict with other keys."
data = {"id": "any", "other": "any"}
data.pop("id")
assert orjson.dumps(data) == b'{"other":"any"}'
data["id"] = "new"
assert orjson.dumps(data) == b'{"other":"any","id":"new"}'
def test_dict_pop_replace_last(self):
"Test pop and replace a last key in a dict with other keys."
data = {"other": "any", "id": "any"}
data.pop("id")
assert orjson.dumps(data) == b'{"other":"any"}'
data["id"] = "new"
assert orjson.dumps(data) == b'{"other":"any","id":"new"}'
def test_dict_pop(self):
"Test pop and replace a key in a dict with no other keys."
data = {"id": "any"}
data.pop("id")
assert orjson.dumps(data) == b"{}"
data["id"] = "new"
assert orjson.dumps(data) == b'{"id":"new"}'
def test_in_place(self):
"Mutate dict in-place"
data = {"id": "any", "static": "msg"}
data["id"] = "new"
assert orjson.dumps(data) == b'{"id":"new","static":"msg"}'
def test_dict_0xff(self):
"dk_size <= 0xff"
data = {str(idx): idx for idx in range(0, 0xFF)}
data.pop("112")
data["112"] = 1
data["113"] = 2
assert orjson.loads(orjson.dumps(data)) == data
def test_dict_0xff_repeated(self):
"dk_size <= 0xff repeated"
for _ in range(0, 100):
data = {str(idx): idx for idx in range(0, 0xFF)}
data.pop("112")
data["112"] = 1
data["113"] = 2
assert orjson.loads(orjson.dumps(data)) == data
def test_dict_0xffff(self):
"dk_size <= 0xffff"
data = {str(idx): idx for idx in range(0, 0xFFFF)}
data.pop("112")
data["112"] = 1
data["113"] = 2
assert orjson.loads(orjson.dumps(data)) == data
def test_dict_0xffff_repeated(self):
"dk_size <= 0xffff repeated"
for _ in range(0, 100):
data = {str(idx): idx for idx in range(0, 0xFFFF)}
data.pop("112")
data["112"] = 1
data["113"] = 2
assert orjson.loads(orjson.dumps(data)) == data
def test_dict_dict(self):
class C:
def __init__(self):
self.a = 0
self.b = 1
assert orjson.dumps(C().__dict__) == b'{"a":0,"b":1}'
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_typeddict.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
try:
from typing import TypedDict # type: ignore
except ImportError:
from typing_extensions import TypedDict
class TestTypedDict:
def test_typeddict(self):
"""
dumps() TypedDict
"""
class TypedDict1(TypedDict):
a: str
b: int
obj = TypedDict1(a="a", b=1)
assert orjson.dumps(obj) == b'{"a":"a","b":1}'
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_append_newline.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
from .util import read_fixture_obj
class TestAppendNewline:
def test_dumps_newline(self):
"""
dumps() OPT_APPEND_NEWLINE
"""
assert orjson.dumps([], option=orjson.OPT_APPEND_NEWLINE) == b"[]\n"
def test_twitter_newline(self):
"""
loads(),dumps() twitter.json OPT_APPEND_NEWLINE
"""
val = read_fixture_obj("twitter.json.xz")
assert orjson.loads(orjson.dumps(val, option=orjson.OPT_APPEND_NEWLINE)) == val
def test_canada(self):
"""
loads(), dumps() canada.json OPT_APPEND_NEWLINE
"""
val = read_fixture_obj("canada.json.xz")
assert orjson.loads(orjson.dumps(val, option=orjson.OPT_APPEND_NEWLINE)) == val
def test_citm_catalog_newline(self):
"""
loads(), dumps() citm_catalog.json OPT_APPEND_NEWLINE
"""
val = read_fixture_obj("citm_catalog.json.xz")
assert orjson.loads(orjson.dumps(val, option=orjson.OPT_APPEND_NEWLINE)) == val
def test_github_newline(self):
"""
loads(), dumps() github.json OPT_APPEND_NEWLINE
"""
val = read_fixture_obj("github.json.xz")
assert orjson.loads(orjson.dumps(val, option=orjson.OPT_APPEND_NEWLINE)) == val
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_reentrant.py | import orjson
class C:
c: "C"
def __del__(self):
orjson.loads('"' + "a" * 10000 + '"')
def test_reentrant():
c = C()
c.c = c
del c
orjson.loads("[" + "[]," * 1000 + "[]]")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_enum.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import enum
import pytest
import orjson
class StrEnum(str, enum.Enum):
AAA = "aaa"
class IntEnum(int, enum.Enum):
ONE = 1
class IntEnumEnum(enum.IntEnum):
ONE = 1
class IntFlagEnum(enum.IntFlag):
ONE = 1
class FlagEnum(enum.Flag):
ONE = 1
class AutoEnum(enum.auto):
A = "a"
class FloatEnum(float, enum.Enum):
ONE = 1.1
class Custom:
def __init__(self, val):
self.val = val
def default(obj):
if isinstance(obj, Custom):
return obj.val
raise TypeError
class UnspecifiedEnum(enum.Enum):
A = "a"
B = 1
C = FloatEnum.ONE
D = {"d": IntEnum.ONE}
E = Custom("c")
F = datetime.datetime(1970, 1, 1)
class TestEnum:
def test_cannot_subclass(self):
"""
enum.Enum cannot be subclassed
obj->ob_type->ob_base will always be enum.EnumMeta
"""
with pytest.raises(TypeError):
class Subclass(StrEnum): # type: ignore
B = "b"
def test_arbitrary_enum(self):
assert orjson.dumps(UnspecifiedEnum.A) == b'"a"'
assert orjson.dumps(UnspecifiedEnum.B) == b"1"
assert orjson.dumps(UnspecifiedEnum.C) == b"1.1"
assert orjson.dumps(UnspecifiedEnum.D) == b'{"d":1}'
def test_custom_enum(self):
assert orjson.dumps(UnspecifiedEnum.E, default=default) == b'"c"'
def test_enum_options(self):
assert (
orjson.dumps(UnspecifiedEnum.F, option=orjson.OPT_NAIVE_UTC)
== b'"1970-01-01T00:00:00+00:00"'
)
def test_int_enum(self):
assert orjson.dumps(IntEnum.ONE) == b"1"
def test_intenum_enum(self):
assert orjson.dumps(IntEnumEnum.ONE) == b"1"
def test_intflag_enum(self):
assert orjson.dumps(IntFlagEnum.ONE) == b"1"
def test_flag_enum(self):
assert orjson.dumps(FlagEnum.ONE) == b"1"
def test_auto_enum(self):
assert orjson.dumps(AutoEnum.A) == b'"a"'
def test_float_enum(self):
assert orjson.dumps(FloatEnum.ONE) == b"1.1"
def test_str_enum(self):
assert orjson.dumps(StrEnum.AAA) == b'"aaa"'
def test_bool_enum(self):
with pytest.raises(TypeError):
class BoolEnum(bool, enum.Enum): # type: ignore
TRUE = True
def test_non_str_keys_enum(self):
assert (
orjson.dumps({StrEnum.AAA: 1}, option=orjson.OPT_NON_STR_KEYS)
== b'{"aaa":1}'
)
assert (
orjson.dumps({IntEnum.ONE: 1}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":1}'
)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_transform.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
from .util import read_fixture_bytes
def _read_file(filename):
return read_fixture_bytes(filename, "transform").strip(b"\n").strip(b"\r")
class TestJSONTestSuiteTransform:
def _pass_transform(self, filename, reference=None):
data = _read_file(filename)
assert orjson.dumps(orjson.loads(data)) == (reference or data)
def _fail_transform(self, filename):
data = _read_file(filename)
with pytest.raises(orjson.JSONDecodeError):
orjson.loads(data)
def test_number_1(self):
"""
number_1.0.json
"""
self._pass_transform("number_1.0.json")
def test_number_1e6(self):
"""
number_1e6.json
"""
self._pass_transform("number_1e6.json", b"[1000000.0]")
def test_number_1e_999(self):
"""
number_1e-999.json
"""
self._pass_transform("number_1e-999.json", b"[0.0]")
def test_number_10000000000000000999(self):
"""
number_10000000000000000999.json
"""
# cannot serialize due to range
assert orjson.loads(_read_file("number_10000000000000000999.json")) == [
10000000000000000999
]
def test_number_1000000000000000(self):
"""
number_1000000000000000.json
"""
self._pass_transform("number_1000000000000000.json")
def test_object_key_nfc_nfd(self):
"""
object_key_nfc_nfd.json
"""
self._pass_transform("object_key_nfc_nfd.json")
def test_object_key_nfd_nfc(self):
"""
object_key_nfd_nfc.json
"""
self._pass_transform("object_key_nfd_nfc.json")
def test_object_same_key_different_values(self):
"""
object_same_key_different_values.json
"""
self._pass_transform("object_same_key_different_values.json", b'{"a":2}')
def test_object_same_key_same_value(self):
"""
object_same_key_same_value.json
"""
self._pass_transform("object_same_key_same_value.json", b'{"a":1}')
def test_object_same_key_unclear_values(self):
"""
object_same_key_unclear_values.json
"""
data = _read_file("object_same_key_unclear_values.json")
# varies by backend
assert data in (b'{"a":-0.0}', b'{"a":0, "a":-0}')
def test_string_1_escaped_invalid_codepoint(self):
"""
string_1_escaped_invalid_codepoint.json
"""
self._fail_transform("string_1_escaped_invalid_codepoint.json")
def test_string_1_invalid_codepoint(self):
"""
string_1_invalid_codepoint.json
"""
self._fail_transform("string_1_invalid_codepoint.json")
def test_string_2_escaped_invalid_codepoints(self):
"""
string_2_escaped_invalid_codepoints.json
"""
self._fail_transform("string_2_escaped_invalid_codepoints.json")
def test_string_2_invalid_codepoints(self):
"""
string_2_invalid_codepoints.json
"""
self._fail_transform("string_2_invalid_codepoints.json")
def test_string_3_escaped_invalid_codepoints(self):
"""
string_3_escaped_invalid_codepoints.json
"""
self._fail_transform("string_3_escaped_invalid_codepoints.json")
def test_string_3_invalid_codepoints(self):
"""
string_3_invalid_codepoints.json
"""
self._fail_transform("string_3_invalid_codepoints.json")
def test_string_with_escaped_NULL(self):
"""
string_with_escaped_NULL.json
"""
self._pass_transform("string_with_escaped_NULL.json")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_ujson.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import pytest
import orjson
class TestUltraJSON:
def test_doubleLongIssue(self):
sut = {"a": -4342969734183514}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
def test_doubleLongDecimalIssue(self):
sut = {"a": -12345678901234.56789012}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
assert sut == decoded
def test_encodeDecodeLongDecimal(self):
sut = {"a": -528656961.4399388}
encoded = orjson.dumps(sut)
orjson.loads(encoded)
def test_decimalDecodeTest(self):
sut = {"a": 4.56}
encoded = orjson.dumps(sut)
decoded = orjson.loads(encoded)
pytest.approx(sut["a"], decoded["a"])
def test_encodeDictWithUnicodeKeys(self):
val = {
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
"key1": "value1",
}
orjson.dumps(val)
val = {
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
"بن": "value1",
}
orjson.dumps(val)
def test_encodeArrayOfNestedArrays(self):
val = [[[[]]]] * 20 # type: ignore
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeArrayOfDoubles(self):
val = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeStringConversion2(self):
val = "A string \\ / \b \f \n \r \t"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == b'"A string \\\\ / \\b \\f \\n \\r \\t"'
assert val == orjson.loads(output)
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
val = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeControlEscaping(self):
val = "\x19"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert val == dec
assert enc == orjson.dumps(val)
def test_encodeUnicodeConversion2(self):
val = "\xe6\x97\xa5\xd1\x88"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicodeSurrogatePair(self):
val = "\xf0\x90\x8d\x86"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicode4BytesUTF8(self):
val = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_encodeUnicode4BytesUTF8Highest(self):
val = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def testEncodeUnicodeBMP(self):
s = "\U0001f42e\U0001f42e\U0001f42d\U0001f42d" # 🐮🐮🐭🐭
orjson.dumps(s)
json.dumps(s)
assert json.loads(json.dumps(s)) == s
assert orjson.loads(orjson.dumps(s)) == s
def testEncodeSymbols(self):
s = "\u273f\u2661\u273f" # ✿♡✿
encoded = orjson.dumps(s)
encoded_json = json.dumps(s)
decoded = orjson.loads(encoded)
assert s == decoded
encoded = orjson.dumps(s)
# json outputs an unicode object
encoded_json = json.dumps(s, ensure_ascii=False)
assert encoded == encoded_json.encode("utf-8")
decoded = orjson.loads(encoded)
assert s == decoded
def test_encodeArrayInArray(self):
val = [[[[]]]] # type: ignore
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeIntConversion(self):
val = 31337
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeIntNegConversion(self):
val = -31337
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeLongNegConversion(self):
val = -9223372036854775808
output = orjson.dumps(val)
orjson.loads(output)
orjson.loads(output)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeListConversion(self):
val = [1, 2, 3, 4]
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeDictConversion(self):
val = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
assert val == orjson.loads(output)
def test_encodeNoneConversion(self):
val = None
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeTrueConversion(self):
val = True
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeFalseConversion(self):
val = False
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
def test_encodeToUTF8(self):
val = b"\xe6\x97\xa5\xd1\x88".decode("utf-8")
enc = orjson.dumps(val)
dec = orjson.loads(enc)
assert enc == orjson.dumps(val)
assert dec == orjson.loads(enc)
def test_decodeFromUnicode(self):
val = '{"obj": 31337}'
dec1 = orjson.loads(val)
dec2 = orjson.loads(str(val))
assert dec1 == dec2
def test_decodeJibberish(self):
val = "fdsa sda v9sa fdsa"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenArrayStart(self):
val = "["
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenObjectStart(self):
val = "{"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenArrayEnd(self):
val = "]"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenObjectEnd(self):
val = "}"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeObjectDepthTooBig(self):
val = "{" * (1024 * 1024)
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringUnterminated(self):
val = '"TESTING'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringUntermEscapeSequence(self):
val = '"TESTING\\"'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeStringBadEscape(self):
val = '"TESTING\\"'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeTrueBroken(self):
val = "tru"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeFalseBroken(self):
val = "fa"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeNullBroken(self):
val = "n"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenDictKeyTypeLeakTest(self):
val = '{{1337:""}}'
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenDictLeakTest(self):
val = '{{"key":"}'
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeBrokenListLeakTest(self):
val = "[[[true"
for _ in range(1000):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoKey(self):
val = "{{{{31337}}}}"
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoColonOrValue(self):
val = '{{{{"key"}}}}'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeDictWithNoValue(self):
val = '{{{{"key":}}}}'
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_decodeNumericIntPos(self):
val = "31337"
assert 31337 == orjson.loads(val)
def test_decodeNumericIntNeg(self):
assert -31337 == orjson.loads("-31337")
def test_encodeNullCharacter(self):
val = "31337 \x00 1337"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
val = "\x00"
output = orjson.dumps(val)
assert val == orjson.loads(output)
assert output == orjson.dumps(val)
assert val == orjson.loads(output)
assert b'" \\u0000\\r\\n "' == orjson.dumps(" \u0000\r\n ")
def test_decodeNullCharacter(self):
val = '"31337 \\u0000 31337"'
assert orjson.loads(val) == json.loads(val)
def test_decodeEscape(self):
base = "\u00e5".encode()
quote = b'"'
val = quote + base + quote
assert json.loads(val) == orjson.loads(val)
def test_decodeBigEscape(self):
for _ in range(10):
base = "\u00e5".encode()
quote = b'"'
val = quote + (base * 1024 * 1024 * 2) + quote
assert json.loads(val) == orjson.loads(val)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_non_str_keys.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import dataclasses
import datetime
import uuid
import pytest
import orjson
try:
import pytz
except ImportError:
pytz = None # type: ignore
try:
import numpy
except ImportError:
numpy = None # type: ignore
class SubStr(str):
pass
class TestNonStrKeyTests:
def test_dict_keys_duplicate(self):
"""
OPT_NON_STR_KEYS serializes duplicate keys
"""
assert (
orjson.dumps({"1": True, 1: False}, option=orjson.OPT_NON_STR_KEYS)
== b'{"1":true,"1":false}'
)
def test_dict_keys_int(self):
assert (
orjson.dumps({1: True, 2: False}, option=orjson.OPT_NON_STR_KEYS)
== b'{"1":true,"2":false}'
)
def test_dict_keys_substr(self):
assert (
orjson.dumps({SubStr("aaa"): True}, option=orjson.OPT_NON_STR_KEYS)
== b'{"aaa":true}'
)
def test_dict_keys_substr_passthrough(self):
"""
OPT_PASSTHROUGH_SUBCLASS does not affect OPT_NON_STR_KEYS
"""
assert (
orjson.dumps(
{SubStr("aaa"): True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_SUBCLASS,
)
== b'{"aaa":true}'
)
def test_dict_keys_substr_invalid(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({SubStr("\ud800"): True}, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_strict(self):
"""
OPT_NON_STR_KEYS does not respect OPT_STRICT_INTEGER
"""
assert (
orjson.dumps(
{9223372036854775807: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"9223372036854775807":true}'
)
def test_dict_keys_int_range_valid_i64(self):
"""
OPT_NON_STR_KEYS has a i64 range for int, valid
"""
assert (
orjson.dumps(
{9223372036854775807: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"9223372036854775807":true}'
)
assert (
orjson.dumps(
{-9223372036854775807: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"-9223372036854775807":true}'
)
assert (
orjson.dumps(
{9223372036854775809: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"9223372036854775809":true}'
)
def test_dict_keys_int_range_valid_u64(self):
"""
OPT_NON_STR_KEYS has a u64 range for int, valid
"""
assert (
orjson.dumps(
{0: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"0":true}'
)
assert (
orjson.dumps(
{18446744073709551615: True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER,
)
== b'{"18446744073709551615":true}'
)
def test_dict_keys_int_range_invalid(self):
"""
OPT_NON_STR_KEYS has a range of i64::MIN to u64::MAX
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({-9223372036854775809: True}, option=orjson.OPT_NON_STR_KEYS)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({18446744073709551616: True}, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_float(self):
assert (
orjson.dumps({1.1: True, 2.2: False}, option=orjson.OPT_NON_STR_KEYS)
== b'{"1.1":true,"2.2":false}'
)
def test_dict_keys_inf(self):
assert (
orjson.dumps({float("Infinity"): True}, option=orjson.OPT_NON_STR_KEYS)
== b'{"null":true}'
)
assert (
orjson.dumps({float("-Infinity"): True}, option=orjson.OPT_NON_STR_KEYS)
== b'{"null":true}'
)
def test_dict_keys_nan(self):
assert (
orjson.dumps({float("NaN"): True}, option=orjson.OPT_NON_STR_KEYS)
== b'{"null":true}'
)
def test_dict_keys_bool(self):
assert (
orjson.dumps({True: True, False: False}, option=orjson.OPT_NON_STR_KEYS)
== b'{"true":true,"false":false}'
)
def test_dict_keys_datetime(self):
assert (
orjson.dumps(
{datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True},
option=orjson.OPT_NON_STR_KEYS,
)
== b'{"2000-01-01T02:03:04.000123":true}'
)
def test_dict_keys_datetime_opt(self):
assert (
orjson.dumps(
{datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True},
option=orjson.OPT_NON_STR_KEYS
| orjson.OPT_OMIT_MICROSECONDS
| orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z,
)
== b'{"2000-01-01T02:03:04Z":true}'
)
def test_dict_keys_datetime_passthrough(self):
"""
OPT_PASSTHROUGH_DATETIME does not affect OPT_NON_STR_KEYS
"""
assert (
orjson.dumps(
{datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_DATETIME,
)
== b'{"2000-01-01T02:03:04.000123":true}'
)
def test_dict_keys_uuid(self):
"""
OPT_NON_STR_KEYS always serializes UUID as keys
"""
assert (
orjson.dumps(
{uuid.UUID("7202d115-7ff3-4c81-a7c1-2a1f067b1ece"): True},
option=orjson.OPT_NON_STR_KEYS,
)
== b'{"7202d115-7ff3-4c81-a7c1-2a1f067b1ece":true}'
)
def test_dict_keys_date(self):
assert (
orjson.dumps(
{datetime.date(1970, 1, 1): True}, option=orjson.OPT_NON_STR_KEYS
)
== b'{"1970-01-01":true}'
)
def test_dict_keys_time(self):
assert (
orjson.dumps(
{datetime.time(12, 15, 59, 111): True},
option=orjson.OPT_NON_STR_KEYS,
)
== b'{"12:15:59.000111":true}'
)
def test_dict_non_str_and_sort_keys(self):
assert (
orjson.dumps(
{
"other": 1,
datetime.date(1970, 1, 5): 2,
datetime.date(1970, 1, 3): 3,
},
option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS,
)
== b'{"1970-01-03":3,"1970-01-05":2,"other":1}'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_dict_keys_time_err(self):
"""
OPT_NON_STR_KEYS propagates errors in types
"""
val = datetime.time(12, 15, 59, 111, tzinfo=pytz.timezone("Asia/Shanghai"))
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_str(self):
assert (
orjson.dumps({"1": True}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":true}'
)
def test_dict_keys_type(self):
class Obj:
a: str
val = Obj()
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
def test_dict_keys_array(self):
with pytest.raises(TypeError):
{numpy.array([1, 2]): True}
def test_dict_keys_dataclass(self):
@dataclasses.dataclass
class Dataclass:
a: str
with pytest.raises(TypeError):
{Dataclass("a"): True}
def test_dict_keys_dataclass_hash(self):
@dataclasses.dataclass
class Dataclass:
a: str
def __hash__(self):
return 1
obj = {Dataclass("a"): True}
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_list(self):
with pytest.raises(TypeError):
{[]: True}
def test_dict_keys_dict(self):
with pytest.raises(TypeError):
{{}: True}
def test_dict_keys_tuple(self):
obj = {(): True}
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_unknown(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({frozenset(): True}, option=orjson.OPT_NON_STR_KEYS)
def test_dict_keys_no_str_call(self):
class Obj:
a: str
def __str__(self):
return "Obj"
val = Obj()
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_roundtrip.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
from .util import read_fixture_str
class TestJsonChecker:
def _run_roundtrip_json(self, filename):
data = read_fixture_str(filename, "roundtrip")
assert orjson.dumps(orjson.loads(data)) == data.encode("utf-8")
def test_roundtrip001(self):
"""
roundtrip001.json
"""
self._run_roundtrip_json("roundtrip01.json")
def test_roundtrip002(self):
"""
roundtrip002.json
"""
self._run_roundtrip_json("roundtrip02.json")
def test_roundtrip003(self):
"""
roundtrip003.json
"""
self._run_roundtrip_json("roundtrip03.json")
def test_roundtrip004(self):
"""
roundtrip004.json
"""
self._run_roundtrip_json("roundtrip04.json")
def test_roundtrip005(self):
"""
roundtrip005.json
"""
self._run_roundtrip_json("roundtrip05.json")
def test_roundtrip006(self):
"""
roundtrip006.json
"""
self._run_roundtrip_json("roundtrip06.json")
def test_roundtrip007(self):
"""
roundtrip007.json
"""
self._run_roundtrip_json("roundtrip07.json")
def test_roundtrip008(self):
"""
roundtrip008.json
"""
self._run_roundtrip_json("roundtrip08.json")
def test_roundtrip009(self):
"""
roundtrip009.json
"""
self._run_roundtrip_json("roundtrip09.json")
def test_roundtrip010(self):
"""
roundtrip010.json
"""
self._run_roundtrip_json("roundtrip10.json")
def test_roundtrip011(self):
"""
roundtrip011.json
"""
self._run_roundtrip_json("roundtrip11.json")
def test_roundtrip012(self):
"""
roundtrip012.json
"""
self._run_roundtrip_json("roundtrip12.json")
def test_roundtrip013(self):
"""
roundtrip013.json
"""
self._run_roundtrip_json("roundtrip13.json")
def test_roundtrip014(self):
"""
roundtrip014.json
"""
self._run_roundtrip_json("roundtrip14.json")
def test_roundtrip015(self):
"""
roundtrip015.json
"""
self._run_roundtrip_json("roundtrip15.json")
def test_roundtrip016(self):
"""
roundtrip016.json
"""
self._run_roundtrip_json("roundtrip16.json")
def test_roundtrip017(self):
"""
roundtrip017.json
"""
self._run_roundtrip_json("roundtrip17.json")
def test_roundtrip018(self):
"""
roundtrip018.json
"""
self._run_roundtrip_json("roundtrip18.json")
def test_roundtrip019(self):
"""
roundtrip019.json
"""
self._run_roundtrip_json("roundtrip19.json")
def test_roundtrip020(self):
"""
roundtrip020.json
"""
self._run_roundtrip_json("roundtrip20.json")
def test_roundtrip021(self):
"""
roundtrip021.json
"""
self._run_roundtrip_json("roundtrip21.json")
def test_roundtrip022(self):
"""
roundtrip022.json
"""
self._run_roundtrip_json("roundtrip22.json")
def test_roundtrip023(self):
"""
roundtrip023.json
"""
self._run_roundtrip_json("roundtrip23.json")
def test_roundtrip024(self):
"""
roundtrip024.json
"""
self._run_roundtrip_json("roundtrip24.json")
def test_roundtrip025(self):
"""
roundtrip025.json
"""
self._run_roundtrip_json("roundtrip25.json")
def test_roundtrip026(self):
"""
roundtrip026.json
"""
self._run_roundtrip_json("roundtrip26.json")
def test_roundtrip027(self):
"""
roundtrip027.json
"""
self._run_roundtrip_json("roundtrip27.json")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_default.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import sys
import uuid
import pytest
import orjson
try:
import numpy
except ImportError:
numpy = None # type: ignore
class Custom:
def __init__(self):
self.name = uuid.uuid4().hex
def __str__(self):
return f"{self.__class__.__name__}({self.name})"
class Recursive:
def __init__(self, cur):
self.cur = cur
def default_recursive(obj):
if obj.cur != 0:
obj.cur -= 1
return obj
return obj.cur
def default_raises(obj):
raise TypeError
class TestType:
def test_default_not_callable(self):
"""
dumps() default not callable
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(Custom(), default=NotImplementedError)
ran = False
try:
orjson.dumps(Custom(), default=NotImplementedError)
except Exception as err:
assert isinstance(err, orjson.JSONEncodeError)
assert str(err) == "default serializer exceeds recursion limit"
ran = True
assert ran
def test_default_func(self):
"""
dumps() default function
"""
ref = Custom()
def default(obj):
return str(obj)
assert orjson.dumps(ref, default=default) == b'"%s"' % str(ref).encode("utf-8")
def test_default_func_none(self):
"""
dumps() default function None ok
"""
assert orjson.dumps(Custom(), default=lambda x: None) == b"null"
def test_default_func_empty(self):
"""
dumps() default function no explicit return
"""
ref = Custom()
def default(obj):
if isinstance(obj, set):
return list(obj)
assert orjson.dumps(ref, default=default) == b"null"
assert orjson.dumps({ref}, default=default) == b"[null]"
def test_default_func_exc(self):
"""
dumps() default function raises exception
"""
def default(obj):
raise NotImplementedError
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(Custom(), default=default)
ran = False
try:
orjson.dumps(Custom(), default=default)
except Exception as err:
assert isinstance(err, orjson.JSONEncodeError)
assert str(err) == "Type is not JSON serializable: Custom"
ran = True
assert ran
def test_default_exception_type(self):
"""
dumps() TypeError in default() raises orjson.JSONEncodeError
"""
ref = Custom()
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(ref, default=default_raises)
def test_default_vectorcall_str(self):
"""
dumps() default function vectorcall str
"""
class SubStr(str):
pass
obj = SubStr("saasa")
ref = b'"%s"' % str(obj).encode("utf-8")
assert (
orjson.dumps(obj, option=orjson.OPT_PASSTHROUGH_SUBCLASS, default=str)
== ref
)
def test_default_vectorcall_list(self):
"""
dumps() default function vectorcall list
"""
obj = {1, 2}
ref = b"[1,2]"
assert orjson.dumps(obj, default=list) == ref
def test_default_func_nested_str(self):
"""
dumps() default function nested str
"""
ref = Custom()
def default(obj):
return str(obj)
assert orjson.dumps({"a": ref}, default=default) == b'{"a":"%s"}' % str(
ref
).encode("utf-8")
def test_default_func_list(self):
"""
dumps() default function nested list
"""
ref = Custom()
def default(obj):
if isinstance(obj, Custom):
return [str(obj)]
assert orjson.dumps({"a": ref}, default=default) == b'{"a":["%s"]}' % str(
ref
).encode("utf-8")
def test_default_func_nested_list(self):
"""
dumps() default function list
"""
ref = Custom()
def default(obj):
return str(obj)
assert orjson.dumps([ref] * 100, default=default) == b"[%s]" % b",".join(
b'"%s"' % str(ref).encode("utf-8") for _ in range(100)
)
def test_default_func_bytes(self):
"""
dumps() default function errors on non-str
"""
ref = Custom()
def default(obj):
return bytes(obj)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(ref, default=default)
ran = False
try:
orjson.dumps(ref, default=default)
except Exception as err:
assert isinstance(err, orjson.JSONEncodeError)
assert str(err) == "Type is not JSON serializable: Custom"
ran = True
assert ran
def test_default_func_invalid_str(self):
"""
dumps() default function errors on invalid str
"""
ref = Custom()
def default(obj):
return "\ud800"
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(ref, default=default)
def test_default_lambda_ok(self):
"""
dumps() default lambda
"""
ref = Custom()
assert orjson.dumps(ref, default=lambda x: str(x)) == b'"%s"' % str(ref).encode(
"utf-8"
)
def test_default_callable_ok(self):
"""
dumps() default callable
"""
class CustomSerializer:
def __init__(self):
self._cache = {}
def __call__(self, obj):
if obj not in self._cache:
self._cache[obj] = str(obj)
return self._cache[obj]
ref_obj = Custom()
ref_bytes = b'"%s"' % str(ref_obj).encode("utf-8")
for obj in [ref_obj] * 100:
assert orjson.dumps(obj, default=CustomSerializer()) == ref_bytes
def test_default_recursion(self):
"""
dumps() default recursion limit
"""
assert orjson.dumps(Recursive(254), default=default_recursive) == b"0"
def test_default_recursion_reset(self):
"""
dumps() default recursion limit reset
"""
assert (
orjson.dumps(
[Recursive(254), {"a": "b"}, Recursive(254), Recursive(254)],
default=default_recursive,
)
== b'[0,{"a":"b"},0,0]'
)
def test_default_recursion_infinite(self):
"""
dumps() default infinite recursion
"""
ref = Custom()
def default(obj):
return obj
refcount = sys.getrefcount(ref)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(ref, default=default)
assert sys.getrefcount(ref) == refcount
def test_reference_cleanup_default_custom_pass(self):
ref = Custom()
def default(obj):
if isinstance(ref, Custom):
return str(ref)
raise TypeError
refcount = sys.getrefcount(ref)
orjson.dumps(ref, default=default)
assert sys.getrefcount(ref) == refcount
def test_reference_cleanup_default_custom_error(self):
"""
references to encoded objects are cleaned up
"""
ref = Custom()
def default(obj):
raise TypeError
refcount = sys.getrefcount(ref)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(ref, default=default)
assert sys.getrefcount(ref) == refcount
def test_reference_cleanup_default_subclass(self):
ref = datetime.datetime(1970, 1, 1, 0, 0, 0)
def default(obj):
if isinstance(ref, datetime.datetime):
return repr(ref)
raise TypeError
refcount = sys.getrefcount(ref)
orjson.dumps(ref, option=orjson.OPT_PASSTHROUGH_DATETIME, default=default)
assert sys.getrefcount(ref) == refcount
def test_reference_cleanup_default_subclass_lambda(self):
ref = uuid.uuid4()
refcount = sys.getrefcount(ref)
orjson.dumps(
ref, option=orjson.OPT_PASSTHROUGH_DATETIME, default=lambda val: str(val)
)
assert sys.getrefcount(ref) == refcount
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
def test_default_numpy(self):
ref = numpy.array([""] * 100)
refcount = sys.getrefcount(ref)
orjson.dumps(
ref, option=orjson.OPT_SERIALIZE_NUMPY, default=lambda val: val.tolist()
)
assert sys.getrefcount(ref) == refcount
def test_default_set(self):
"""
dumps() default function with set
"""
def default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
assert orjson.dumps({1, 2}, default=default) == b"[1,2]"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_subclass.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import json
import pytest
import orjson
class SubStr(str):
pass
class SubInt(int):
pass
class SubDict(dict):
pass
class SubList(list):
pass
class SubFloat(float):
pass
class SubTuple(tuple):
pass
class TestSubclass:
def test_subclass_str(self):
assert orjson.dumps(SubStr("zxc")) == b'"zxc"'
def test_subclass_str_invalid(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubStr("\ud800"))
def test_subclass_int(self):
assert orjson.dumps(SubInt(1)) == b"1"
def test_subclass_int_64(self):
for val in (9223372036854775807, -9223372036854775807):
assert orjson.dumps(SubInt(val)) == str(val).encode("utf-8")
def test_subclass_int_53(self):
for val in (9007199254740992, -9007199254740992):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubInt(val), option=orjson.OPT_STRICT_INTEGER)
def test_subclass_dict(self):
assert orjson.dumps(SubDict({"a": "b"})) == b'{"a":"b"}'
def test_subclass_list(self):
assert orjson.dumps(SubList(["a", "b"])) == b'["a","b"]'
ref = [True] * 512
assert orjson.loads(orjson.dumps(SubList(ref))) == ref
def test_subclass_float(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubFloat(1.1))
assert json.dumps(SubFloat(1.1)) == "1.1"
def test_subclass_tuple(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubTuple((1, 2)))
assert json.dumps(SubTuple((1, 2))) == "[1, 2]"
def test_namedtuple(self):
Point = collections.namedtuple("Point", ["x", "y"])
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(Point(1, 2))
def test_subclass_circular_dict(self):
obj = SubDict({})
obj["obj"] = obj
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
def test_subclass_circular_list(self):
obj = SubList([])
obj.append(obj)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
def test_subclass_circular_nested(self):
obj = SubDict({})
obj["list"] = SubList([{"obj": obj}])
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj)
class TestSubclassPassthrough:
def test_subclass_str(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubStr("zxc"), option=orjson.OPT_PASSTHROUGH_SUBCLASS)
def test_subclass_int(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubInt(1), option=orjson.OPT_PASSTHROUGH_SUBCLASS)
def test_subclass_dict(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubDict({"a": "b"}), option=orjson.OPT_PASSTHROUGH_SUBCLASS)
def test_subclass_list(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(SubList(["a", "b"]), option=orjson.OPT_PASSTHROUGH_SUBCLASS)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_type.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import io
import sys
import pytest
try:
import xxhash
except ImportError:
xxhash = None
import orjson
class TestType:
def test_fragment(self):
"""
orjson.JSONDecodeError on fragments
"""
for val in ("n", "{", "[", "t"):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_invalid(self):
"""
orjson.JSONDecodeError on invalid
"""
for val in ('{"age", 44}', "[31337,]", "[,31337]", "[]]", "[,]"):
pytest.raises(orjson.JSONDecodeError, orjson.loads, val)
def test_str(self):
"""
str
"""
for obj, ref in (("blah", b'"blah"'), ("東京", b'"\xe6\x9d\xb1\xe4\xba\xac"')):
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
def test_str_latin1(self):
"""
str latin1
"""
assert orjson.loads(orjson.dumps("üýþÿ")) == "üýþÿ"
def test_str_long(self):
"""
str long
"""
for obj in ("aaaa" * 1024, "üýþÿ" * 1024, "好" * 1024, "�" * 1024):
assert orjson.loads(orjson.dumps(obj)) == obj
def test_str_2mib(self):
ref = '🐈🐈🐈🐈🐈"üýa0s9999🐈🐈🐈🐈🐈9\0999\\9999' * 1024 * 50
assert orjson.loads(orjson.dumps(ref)) == ref
def test_str_very_long(self):
"""
str long enough to trigger overflow in bytecount
"""
for obj in ("aaaa" * 20000, "üýþÿ" * 20000, "好" * 20000, "�" * 20000):
assert orjson.loads(orjson.dumps(obj)) == obj
def test_str_replacement(self):
"""
str roundtrip �
"""
assert orjson.dumps("�") == b'"\xef\xbf\xbd"'
assert orjson.loads(b'"\xef\xbf\xbd"') == "�"
def test_str_trailing_4_byte(self):
ref = "うぞ〜😏🙌"
assert orjson.loads(orjson.dumps(ref)) == ref
def test_str_ascii_control(self):
"""
worst case format_escaped_str_with_escapes() allocation
"""
ref = "\x01\x1f" * 1024 * 16
assert orjson.loads(orjson.dumps(ref)) == ref
assert orjson.loads(orjson.dumps(ref, option=orjson.OPT_INDENT_2)) == ref
def test_str_escape_quote_0(self):
assert orjson.dumps('"aaaaaaabb') == b'"\\"aaaaaaabb"'
def test_str_escape_quote_1(self):
assert orjson.dumps('a"aaaaaabb') == b'"a\\"aaaaaabb"'
def test_str_escape_quote_2(self):
assert orjson.dumps('aa"aaaaabb') == b'"aa\\"aaaaabb"'
def test_str_escape_quote_3(self):
assert orjson.dumps('aaa"aaaabb') == b'"aaa\\"aaaabb"'
def test_str_escape_quote_4(self):
assert orjson.dumps('aaaa"aaabb') == b'"aaaa\\"aaabb"'
def test_str_escape_quote_5(self):
assert orjson.dumps('aaaaa"aabb') == b'"aaaaa\\"aabb"'
def test_str_escape_quote_6(self):
assert orjson.dumps('aaaaaa"abb') == b'"aaaaaa\\"abb"'
def test_str_escape_quote_7(self):
assert orjson.dumps('aaaaaaa"bb') == b'"aaaaaaa\\"bb"'
def test_str_escape_quote_8(self):
assert orjson.dumps('aaaaaaaab"') == b'"aaaaaaaab\\""'
def test_str_escape_quote_multi(self):
assert (
orjson.dumps('aa"aaaaabbbbbbbbbbbbbbbbbbbb"bb')
== b'"aa\\"aaaaabbbbbbbbbbbbbbbbbbbb\\"bb"'
)
def test_str_escape_quote_buffer(self):
orjson.dumps(['"' * 4096] * 1024)
def test_str_escape_backslash_0(self):
assert orjson.dumps("\\aaaaaaabb") == b'"\\\\aaaaaaabb"'
def test_str_escape_backslash_1(self):
assert orjson.dumps("a\\aaaaaabb") == b'"a\\\\aaaaaabb"'
def test_str_escape_backslash_2(self):
assert orjson.dumps("aa\\aaaaabb") == b'"aa\\\\aaaaabb"'
def test_str_escape_backslash_3(self):
assert orjson.dumps("aaa\\aaaabb") == b'"aaa\\\\aaaabb"'
def test_str_escape_backslash_4(self):
assert orjson.dumps("aaaa\\aaabb") == b'"aaaa\\\\aaabb"'
def test_str_escape_backslash_5(self):
assert orjson.dumps("aaaaa\\aabb") == b'"aaaaa\\\\aabb"'
def test_str_escape_backslash_6(self):
assert orjson.dumps("aaaaaa\\abb") == b'"aaaaaa\\\\abb"'
def test_str_escape_backslash_7(self):
assert orjson.dumps("aaaaaaa\\bb") == b'"aaaaaaa\\\\bb"'
def test_str_escape_backslash_8(self):
assert orjson.dumps("aaaaaaaab\\") == b'"aaaaaaaab\\\\"'
def test_str_escape_backslash_multi(self):
assert (
orjson.dumps("aa\\aaaaabbbbbbbbbbbbbbbbbbbb\\bb")
== b'"aa\\\\aaaaabbbbbbbbbbbbbbbbbbbb\\\\bb"'
)
def test_str_escape_backslash_buffer(self):
orjson.dumps(["\\" * 4096] * 1024)
def test_str_escape_x32_0(self):
assert orjson.dumps("\taaaaaaabb") == b'"\\taaaaaaabb"'
def test_str_escape_x32_1(self):
assert orjson.dumps("a\taaaaaabb") == b'"a\\taaaaaabb"'
def test_str_escape_x32_2(self):
assert orjson.dumps("aa\taaaaabb") == b'"aa\\taaaaabb"'
def test_str_escape_x32_3(self):
assert orjson.dumps("aaa\taaaabb") == b'"aaa\\taaaabb"'
def test_str_escape_x32_4(self):
assert orjson.dumps("aaaa\taaabb") == b'"aaaa\\taaabb"'
def test_str_escape_x32_5(self):
assert orjson.dumps("aaaaa\taabb") == b'"aaaaa\\taabb"'
def test_str_escape_x32_6(self):
assert orjson.dumps("aaaaaa\tabb") == b'"aaaaaa\\tabb"'
def test_str_escape_x32_7(self):
assert orjson.dumps("aaaaaaa\tbb") == b'"aaaaaaa\\tbb"'
def test_str_escape_x32_8(self):
assert orjson.dumps("aaaaaaaab\t") == b'"aaaaaaaab\\t"'
def test_str_escape_x32_multi(self):
assert (
orjson.dumps("aa\taaaaabbbbbbbbbbbbbbbbbbbb\tbb")
== b'"aa\\taaaaabbbbbbbbbbbbbbbbbbbb\\tbb"'
)
def test_str_escape_x32_buffer(self):
orjson.dumps(["\t" * 4096] * 1024)
def test_str_emoji(self):
ref = "®️"
assert orjson.loads(orjson.dumps(ref)) == ref
def test_str_emoji_escape(self):
ref = '/"®️/"'
assert orjson.loads(orjson.dumps(ref)) == ref
def test_very_long_list(self):
orjson.dumps([[]] * 1024 * 16)
def test_very_long_list_pretty(self):
orjson.dumps([[]] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_dict(self):
orjson.dumps([{}] * 1024 * 16)
def test_very_long_dict_pretty(self):
orjson.dumps([{}] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_str_empty(self):
orjson.dumps([""] * 1024 * 16)
def test_very_long_str_empty_pretty(self):
orjson.dumps([""] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_str_not_empty(self):
orjson.dumps(["a"] * 1024 * 16)
def test_very_long_str_not_empty_pretty(self):
orjson.dumps(["a"] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_bool(self):
orjson.dumps([True] * 1024 * 16)
def test_very_long_bool_pretty(self):
orjson.dumps([True] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_int(self):
orjson.dumps([(2**64) - 1] * 1024 * 16)
def test_very_long_int_pretty(self):
orjson.dumps([(2**64) - 1] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_very_long_float(self):
orjson.dumps([sys.float_info.max] * 1024 * 16)
def test_very_long_float_pretty(self):
orjson.dumps([sys.float_info.max] * 1024 * 16, option=orjson.OPT_INDENT_2)
def test_str_surrogates_loads(self):
"""
str unicode surrogates loads()
"""
pytest.raises(orjson.JSONDecodeError, orjson.loads, '"\ud800"')
pytest.raises(orjson.JSONDecodeError, orjson.loads, '"\ud83d\ude80"')
pytest.raises(orjson.JSONDecodeError, orjson.loads, '"\udcff"')
pytest.raises(
orjson.JSONDecodeError, orjson.loads, b'"\xed\xa0\xbd\xed\xba\x80"'
) # \ud83d\ude80
def test_str_surrogates_dumps(self):
"""
str unicode surrogates dumps()
"""
pytest.raises(orjson.JSONEncodeError, orjson.dumps, "\ud800")
pytest.raises(orjson.JSONEncodeError, orjson.dumps, "\ud83d\ude80")
pytest.raises(orjson.JSONEncodeError, orjson.dumps, "\udcff")
pytest.raises(orjson.JSONEncodeError, orjson.dumps, {"\ud83d\ude80": None})
pytest.raises(
orjson.JSONEncodeError, orjson.dumps, b"\xed\xa0\xbd\xed\xba\x80"
) # \ud83d\ude80
@pytest.mark.skipif(
xxhash is None, reason="xxhash install broken on win, python3.9, Azure"
)
def test_str_ascii(self):
"""
str is ASCII but not compact
"""
digest = xxhash.xxh32_hexdigest("12345")
for _ in range(2):
assert orjson.dumps(digest) == b'"b30d56b4"'
def test_bytes_dumps(self):
"""
bytes dumps not supported
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps([b"a"])
def test_bytes_loads(self):
"""
bytes loads
"""
assert orjson.loads(b"[]") == []
def test_bytearray_loads(self):
"""
bytearray loads
"""
arr = bytearray()
arr.extend(b"[]")
assert orjson.loads(arr) == []
def test_memoryview_loads(self):
"""
memoryview loads
"""
arr = bytearray()
arr.extend(b"[]")
assert orjson.loads(memoryview(arr)) == []
def test_bytesio_loads(self):
"""
memoryview loads
"""
arr = io.BytesIO(b"[]")
assert orjson.loads(arr.getbuffer()) == []
def test_bool(self):
"""
bool
"""
for obj, ref in ((True, "true"), (False, "false")):
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == obj
def test_bool_true_array(self):
"""
bool true array
"""
obj = [True] * 256
ref = ("[" + ("true," * 255) + "true]").encode("utf-8")
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
def test_bool_false_array(self):
"""
bool false array
"""
obj = [False] * 256
ref = ("[" + ("false," * 255) + "false]").encode("utf-8")
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
def test_none(self):
"""
null
"""
obj = None
ref = "null"
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == obj
def test_int(self):
"""
int compact and non-compact
"""
obj = [-5000, -1000, -10, -5, -2, -1, 0, 1, 2, 5, 10, 1000, 50000]
ref = b"[-5000,-1000,-10,-5,-2,-1,0,1,2,5,10,1000,50000]"
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
def test_null_array(self):
"""
null array
"""
obj = [None] * 256
ref = ("[" + ("null," * 255) + "null]").encode("utf-8")
assert orjson.dumps(obj) == ref
assert orjson.loads(ref) == obj
def test_nan_dumps(self):
"""
NaN serializes to null
"""
assert orjson.dumps(float("NaN")) == b"null"
def test_nan_loads(self):
"""
NaN is not valid JSON
"""
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[NaN]")
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[nan]")
def test_infinity_dumps(self):
"""
Infinity serializes to null
"""
assert orjson.dumps(float("Infinity")) == b"null"
def test_infinity_loads(self):
"""
Infinity, -Infinity is not valid JSON
"""
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[infinity]")
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[Infinity]")
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[-Infinity]")
with pytest.raises(orjson.JSONDecodeError):
orjson.loads("[-infinity]")
def test_int_53(self):
"""
int 53-bit
"""
for val in (9007199254740991, -9007199254740991):
assert orjson.loads(str(val)) == val
assert orjson.dumps(val, option=orjson.OPT_STRICT_INTEGER) == str(
val
).encode("utf-8")
def test_int_53_exc(self):
"""
int 53-bit exception on 64-bit
"""
for val in (9007199254740992, -9007199254740992):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(val, option=orjson.OPT_STRICT_INTEGER)
def test_int_53_exc_usize(self):
"""
int 53-bit exception on 64-bit usize
"""
for val in (9223372036854775808, 18446744073709551615):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(val, option=orjson.OPT_STRICT_INTEGER)
def test_int_53_exc_128(self):
"""
int 53-bit exception on 128-bit
"""
val = 2**65
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(val, option=orjson.OPT_STRICT_INTEGER)
def test_int_64(self):
"""
int 64-bit
"""
for val in (9223372036854775807, -9223372036854775807):
assert orjson.loads(str(val)) == val
assert orjson.dumps(val) == str(val).encode("utf-8")
def test_uint_64(self):
"""
uint 64-bit
"""
for val in (0, 9223372036854775808, 18446744073709551615):
assert orjson.loads(str(val)) == val
assert orjson.dumps(val) == str(val).encode("utf-8")
def test_int_128(self):
"""
int 128-bit
"""
for val in (18446744073709551616, -9223372036854775809):
pytest.raises(orjson.JSONEncodeError, orjson.dumps, val)
def test_float(self):
"""
float
"""
assert -1.1234567893 == orjson.loads("-1.1234567893")
assert -1.234567893 == orjson.loads("-1.234567893")
assert -1.34567893 == orjson.loads("-1.34567893")
assert -1.4567893 == orjson.loads("-1.4567893")
assert -1.567893 == orjson.loads("-1.567893")
assert -1.67893 == orjson.loads("-1.67893")
assert -1.7893 == orjson.loads("-1.7893")
assert -1.893 == orjson.loads("-1.893")
assert -1.3 == orjson.loads("-1.3")
assert 1.1234567893 == orjson.loads("1.1234567893")
assert 1.234567893 == orjson.loads("1.234567893")
assert 1.34567893 == orjson.loads("1.34567893")
assert 1.4567893 == orjson.loads("1.4567893")
assert 1.567893 == orjson.loads("1.567893")
assert 1.67893 == orjson.loads("1.67893")
assert 1.7893 == orjson.loads("1.7893")
assert 1.893 == orjson.loads("1.893")
assert 1.3 == orjson.loads("1.3")
def test_float_precision_loads(self):
"""
float precision loads()
"""
assert orjson.loads("31.245270191439438") == 31.245270191439438
assert orjson.loads("-31.245270191439438") == -31.245270191439438
assert orjson.loads("121.48791951161945") == 121.48791951161945
assert orjson.loads("-121.48791951161945") == -121.48791951161945
assert orjson.loads("100.78399658203125") == 100.78399658203125
assert orjson.loads("-100.78399658203125") == -100.78399658203125
def test_float_precision_dumps(self):
"""
float precision dumps()
"""
assert orjson.dumps(31.245270191439438) == b"31.245270191439438"
assert orjson.dumps(-31.245270191439438) == b"-31.245270191439438"
assert orjson.dumps(121.48791951161945) == b"121.48791951161945"
assert orjson.dumps(-121.48791951161945) == b"-121.48791951161945"
assert orjson.dumps(100.78399658203125) == b"100.78399658203125"
assert orjson.dumps(-100.78399658203125) == b"-100.78399658203125"
def test_float_edge(self):
"""
float edge cases
"""
assert orjson.dumps(0.8701) == b"0.8701"
assert orjson.loads("0.8701") == 0.8701
assert (
orjson.loads("0.0000000000000000000000000000000000000000000000000123e50")
== 1.23
)
assert orjson.loads("0.4e5") == 40000.0
assert orjson.loads("0.00e-00") == 0.0
assert orjson.loads("0.4e-001") == 0.04
assert orjson.loads("0.123456789e-12") == 1.23456789e-13
assert orjson.loads("1.234567890E+34") == 1.23456789e34
assert orjson.loads("23456789012E66") == 2.3456789012e76
def test_float_notation(self):
"""
float notation
"""
for val in ("1.337E40", "1.337e+40", "1337e40", "1.337E-4"):
obj = orjson.loads(val)
assert obj == float(val)
assert orjson.dumps(val) == ('"%s"' % val).encode("utf-8")
def test_list(self):
"""
list
"""
obj = ["a", "😊", True, {"b": 1.1}, 2]
ref = '["a","😊",true,{"b":1.1},2]'
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == obj
def test_tuple(self):
"""
tuple
"""
obj = ("a", "😊", True, {"b": 1.1}, 2)
ref = '["a","😊",true,{"b":1.1},2]'
assert orjson.dumps(obj) == ref.encode("utf-8")
assert orjson.loads(ref) == list(obj)
def test_object(self):
"""
object() dumps()
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(object())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_fixture.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import pytest
import orjson
from .util import read_fixture_bytes, read_fixture_str
class TestFixture:
def test_twitter(self):
"""
loads(),dumps() twitter.json
"""
val = read_fixture_str("twitter.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_canada(self):
"""
loads(), dumps() canada.json
"""
val = read_fixture_str("canada.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_citm_catalog(self):
"""
loads(), dumps() citm_catalog.json
"""
val = read_fixture_str("citm_catalog.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_github(self):
"""
loads(), dumps() github.json
"""
val = read_fixture_str("github.json.xz")
read = orjson.loads(val)
assert orjson.loads(orjson.dumps(read)) == read
def test_blns(self):
"""
loads() blns.json JSONDecodeError
https://github.com/minimaxir/big-list-of-naughty-strings
"""
val = read_fixture_bytes("blns.txt.xz")
for line in val.split(b"\n"):
if line and not line.startswith(b"#"):
with pytest.raises(orjson.JSONDecodeError):
_ = orjson.loads(b'"' + val + b'"')
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_uuid.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import uuid
import pytest
import orjson
class TestUUID:
def test_uuid_immutable(self):
"""
UUID objects are immutable
"""
val = uuid.uuid4()
with pytest.raises(TypeError):
val.int = 1 # type: ignore
with pytest.raises(TypeError):
val.int = None # type: ignore
def test_uuid_int(self):
"""
UUID.int is a 128-bit integer
"""
val = uuid.UUID("7202d115-7ff3-4c81-a7c1-2a1f067b1ece")
assert isinstance(val.int, int)
assert val.int >= 2**64
assert val.int < 2**128
assert val.int, 151546616840194781678008611711208857294
def test_uuid_overflow(self):
"""
UUID.int can't trigger errors in _PyLong_AsByteArray
"""
with pytest.raises(ValueError):
uuid.UUID(int=2**128)
with pytest.raises(ValueError):
uuid.UUID(int=-1)
def test_uuid_subclass(self):
"""
UUID subclasses are not serialized
"""
class AUUID(uuid.UUID):
pass
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(AUUID("{12345678-1234-5678-1234-567812345678}"))
def test_serializes_withopt(self):
"""
dumps() accepts deprecated OPT_SERIALIZE_UUID
"""
assert (
orjson.dumps(
uuid.UUID("7202d115-7ff3-4c81-a7c1-2a1f067b1ece"),
option=orjson.OPT_SERIALIZE_UUID,
)
== b'"7202d115-7ff3-4c81-a7c1-2a1f067b1ece"'
)
def test_nil_uuid(self):
assert (
orjson.dumps(uuid.UUID("00000000-0000-0000-0000-000000000000"))
== b'"00000000-0000-0000-0000-000000000000"'
)
def test_all_ways_to_create_uuid_behave_equivalently(self):
# Note that according to the docstring for the uuid.UUID class, all the
# forms below are equivalent -- they end up with the same value for
# `self.int`, which is all that really matters
uuids = [
uuid.UUID("{12345678-1234-5678-1234-567812345678}"),
uuid.UUID("12345678123456781234567812345678"),
uuid.UUID("urn:uuid:12345678-1234-5678-1234-567812345678"),
uuid.UUID(bytes=b"\x12\x34\x56\x78" * 4),
uuid.UUID(
bytes_le=b"\x78\x56\x34\x12\x34\x12\x78\x56"
+ b"\x12\x34\x56\x78\x12\x34\x56\x78"
),
uuid.UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)),
uuid.UUID(int=0x12345678123456781234567812345678),
]
result = orjson.dumps(uuids)
canonical_uuids = ['"%s"' % str(u) for u in uuids]
serialized = ("[%s]" % ",".join(canonical_uuids)).encode("utf8")
assert result == serialized
def test_serializes_correctly_with_leading_zeroes(self):
instance = uuid.UUID(int=0x00345678123456781234567812345678)
assert orjson.dumps(instance) == ('"%s"' % str(instance)).encode("utf8")
def test_all_uuid_creation_functions_create_serializable_uuids(self):
uuids = (
uuid.uuid1(),
uuid.uuid3(uuid.NAMESPACE_DNS, "python.org"),
uuid.uuid4(),
uuid.uuid5(uuid.NAMESPACE_DNS, "python.org"),
)
for val in uuids:
assert orjson.dumps(val) == f'"{val}"'.encode("utf-8")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_datetime.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import pytest
import orjson
try:
import zoneinfo
_ = zoneinfo.ZoneInfo("Europe/Amsterdam")
except Exception: # ImportError,ZoneInfoNotFoundError
zoneinfo = None # type: ignore
try:
import pytz
except ImportError:
pytz = None # type: ignore
try:
import pendulum
except ImportError:
pendulum = None # type: ignore
try:
from dateutil import tz
except ImportError:
tz = None # type: ignore
AMSTERDAM_1937_DATETIMES = (
b'["1937-01-01T12:00:27.000087+00:20"]', # tzinfo<2022b and an example in RFC 3339
b'["1937-01-01T12:00:27.000087+00:00"]', # tzinfo>=2022b
)
AMSTERDAM_1937_DATETIMES_WITH_Z = (
b'["1937-01-01T12:00:27.000087+00:20"]',
b'["1937-01-01T12:00:27.000087Z"]',
)
class TestDatetime:
def test_datetime_naive(self):
"""
datetime.datetime naive prints without offset
"""
assert (
orjson.dumps([datetime.datetime(2000, 1, 1, 2, 3, 4, 123)])
== b'["2000-01-01T02:03:04.000123"]'
)
def test_datetime_naive_utc(self):
"""
datetime.datetime naive with opt assumes UTC
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)],
option=orjson.OPT_NAIVE_UTC,
)
== b'["2000-01-01T02:03:04.000123+00:00"]'
)
def test_datetime_min(self):
"""
datetime.datetime min range
"""
assert (
orjson.dumps(
[datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0, 0)],
option=orjson.OPT_NAIVE_UTC,
)
== b'["0001-01-01T00:00:00+00:00"]'
)
def test_datetime_max(self):
"""
datetime.datetime max range
"""
assert (
orjson.dumps(
[datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 50, 999999)],
option=orjson.OPT_NAIVE_UTC,
)
== b'["9999-12-31T23:59:50.999999+00:00"]'
)
def test_datetime_three_digits(self):
"""
datetime.datetime three digit year
"""
assert (
orjson.dumps(
[datetime.datetime(312, 1, 1)],
option=orjson.OPT_NAIVE_UTC,
)
== b'["0312-01-01T00:00:00+00:00"]'
)
def test_datetime_two_digits(self):
"""
datetime.datetime two digit year
"""
assert (
orjson.dumps(
[datetime.datetime(46, 1, 1)],
option=orjson.OPT_NAIVE_UTC,
)
== b'["0046-01-01T00:00:00+00:00"]'
)
@pytest.mark.skipif(tz is None, reason="dateutil optional")
def test_datetime_tz_assume(self):
"""
datetime.datetime tz with assume UTC uses tz
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 1, 1, 2, 3, 4, 0, tzinfo=tz.gettz("Asia/Shanghai")
)
],
option=orjson.OPT_NAIVE_UTC,
)
== b'["2018-01-01T02:03:04+08:00"]'
)
def test_datetime_timezone_utc(self):
"""
datetime.datetime.utc
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 6, 1, 2, 3, 4, 0, tzinfo=datetime.timezone.utc
)
]
)
== b'["2018-06-01T02:03:04+00:00"]'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_pytz_utc(self):
"""
pytz.UTC
"""
assert (
orjson.dumps([datetime.datetime(2018, 6, 1, 2, 3, 4, 0, tzinfo=pytz.UTC)])
== b'["2018-06-01T02:03:04+00:00"]'
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_zoneinfo_utc(self):
"""
zoneinfo.ZoneInfo("UTC")
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 6, 1, 2, 3, 4, 0, tzinfo=zoneinfo.ZoneInfo("UTC")
)
]
)
== b'["2018-06-01T02:03:04+00:00"]'
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_zoneinfo_positive(self):
assert (
orjson.dumps(
[
datetime.datetime(
2018,
1,
1,
2,
3,
4,
0,
tzinfo=zoneinfo.ZoneInfo("Asia/Shanghai"),
)
]
)
== b'["2018-01-01T02:03:04+08:00"]'
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_zoneinfo_negative(self):
assert (
orjson.dumps(
[
datetime.datetime(
2018,
6,
1,
2,
3,
4,
0,
tzinfo=zoneinfo.ZoneInfo("America/New_York"),
)
]
)
== b'["2018-06-01T02:03:04-04:00"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_pendulum_utc(self):
"""
datetime.datetime UTC
"""
assert (
orjson.dumps(
[datetime.datetime(2018, 6, 1, 2, 3, 4, 0, tzinfo=pendulum.UTC)]
)
== b'["2018-06-01T02:03:04+00:00"]'
)
@pytest.mark.skipif(tz is None, reason="dateutil optional")
def test_datetime_arrow_positive(self):
"""
datetime.datetime positive UTC
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 1, 1, 2, 3, 4, 0, tzinfo=tz.gettz("Asia/Shanghai")
)
]
)
== b'["2018-01-01T02:03:04+08:00"]'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_pytz_positive(self):
"""
datetime.datetime positive UTC
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 1, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("Asia/Shanghai")
)
]
)
== b'["2018-01-01T02:03:04+08:00"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_pendulum_positive(self):
"""
datetime.datetime positive UTC
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
1,
1,
2,
3,
4,
0,
tzinfo=pendulum.timezone("Asia/Shanghai"), # type: ignore
)
]
)
== b'["2018-01-01T02:03:04+08:00"]'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_pytz_negative_dst(self):
"""
datetime.datetime negative UTC DST
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018, 6, 1, 2, 3, 4, 0, tzinfo=pytz.timezone("America/New_York")
)
]
)
== b'["2018-06-01T02:03:04-04:00"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_pendulum_negative_dst(self):
"""
datetime.datetime negative UTC DST
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
6,
1,
2,
3,
4,
0,
tzinfo=pendulum.timezone("America/New_York"), # type: ignore
)
]
)
== b'["2018-06-01T02:03:04-04:00"]'
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_zoneinfo_negative_non_dst(self):
"""
datetime.datetime negative UTC non-DST
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=zoneinfo.ZoneInfo("America/New_York"),
)
]
)
== b'["2018-12-01T02:03:04-05:00"]'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_pytz_negative_non_dst(self):
"""
datetime.datetime negative UTC non-DST
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=pytz.timezone("America/New_York"),
)
]
)
== b'["2018-12-01T02:03:04-05:00"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_pendulum_negative_non_dst(self):
"""
datetime.datetime negative UTC non-DST
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=pendulum.timezone("America/New_York"), # type: ignore
)
]
)
== b'["2018-12-01T02:03:04-05:00"]'
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_zoneinfo_partial_hour(self):
"""
datetime.datetime UTC offset partial hour
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=zoneinfo.ZoneInfo("Australia/Adelaide"),
)
]
)
== b'["2018-12-01T02:03:04+10:30"]'
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_pytz_partial_hour(self):
"""
datetime.datetime UTC offset partial hour
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=pytz.timezone("Australia/Adelaide"),
)
]
)
== b'["2018-12-01T02:03:04+10:30"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_pendulum_partial_hour(self):
"""
datetime.datetime UTC offset partial hour
"""
assert (
orjson.dumps(
[
datetime.datetime(
2018,
12,
1,
2,
3,
4,
0,
tzinfo=pendulum.timezone("Australia/Adelaide"), # type: ignore
)
]
)
== b'["2018-12-01T02:03:04+10:30"]'
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_partial_second_pendulum_supported(self):
"""
datetime.datetime UTC offset round seconds
https://tools.ietf.org/html/rfc3339#section-5.8
"""
assert (
orjson.dumps(
[
datetime.datetime(
1937,
1,
1,
12,
0,
27,
87,
tzinfo=pendulum.timezone("Europe/Amsterdam"), # type: ignore
)
]
)
in AMSTERDAM_1937_DATETIMES
)
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_datetime_partial_second_zoneinfo(self):
"""
datetime.datetime UTC offset round seconds
https://tools.ietf.org/html/rfc3339#section-5.8
"""
assert (
orjson.dumps(
[
datetime.datetime(
1937,
1,
1,
12,
0,
27,
87,
tzinfo=zoneinfo.ZoneInfo("Europe/Amsterdam"),
)
]
)
in AMSTERDAM_1937_DATETIMES
)
@pytest.mark.skipif(pytz is None, reason="pytz optional")
def test_datetime_partial_second_pytz(self):
"""
datetime.datetime UTC offset round seconds
https://tools.ietf.org/html/rfc3339#section-5.8
"""
assert (
orjson.dumps(
[
datetime.datetime(
1937,
1,
1,
12,
0,
27,
87,
tzinfo=pytz.timezone("Europe/Amsterdam"),
)
]
)
in AMSTERDAM_1937_DATETIMES
)
@pytest.mark.skipif(tz is None, reason="dateutil optional")
def test_datetime_partial_second_dateutil(self):
"""
datetime.datetime UTC offset round seconds
https://tools.ietf.org/html/rfc3339#section-5.8
"""
assert (
orjson.dumps(
[
datetime.datetime(
1937, 1, 1, 12, 0, 27, 87, tzinfo=tz.gettz("Europe/Amsterdam")
)
]
)
in AMSTERDAM_1937_DATETIMES
)
def test_datetime_microsecond_max(self):
"""
datetime.datetime microsecond max
"""
assert (
orjson.dumps(datetime.datetime(2000, 1, 1, 0, 0, 0, 999999))
== b'"2000-01-01T00:00:00.999999"'
)
def test_datetime_microsecond_min(self):
"""
datetime.datetime microsecond min
"""
assert (
orjson.dumps(datetime.datetime(2000, 1, 1, 0, 0, 0, 1))
== b'"2000-01-01T00:00:00.000001"'
)
def test_datetime_omit_microseconds(self):
"""
datetime.datetime OPT_OMIT_MICROSECONDS
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)],
option=orjson.OPT_OMIT_MICROSECONDS,
)
== b'["2000-01-01T02:03:04"]'
)
def test_datetime_omit_microseconds_naive(self):
"""
datetime.datetime naive OPT_OMIT_MICROSECONDS
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)],
option=orjson.OPT_NAIVE_UTC | orjson.OPT_OMIT_MICROSECONDS,
)
== b'["2000-01-01T02:03:04+00:00"]'
)
def test_time_omit_microseconds(self):
"""
datetime.time OPT_OMIT_MICROSECONDS
"""
assert (
orjson.dumps(
[datetime.time(2, 3, 4, 123)], option=orjson.OPT_OMIT_MICROSECONDS
)
== b'["02:03:04"]'
)
def test_datetime_utc_z_naive_omit(self):
"""
datetime.datetime naive OPT_UTC_Z
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)],
option=orjson.OPT_NAIVE_UTC
| orjson.OPT_UTC_Z
| orjson.OPT_OMIT_MICROSECONDS,
)
== b'["2000-01-01T02:03:04Z"]'
)
def test_datetime_utc_z_naive(self):
"""
datetime.datetime naive OPT_UTC_Z
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)],
option=orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z,
)
== b'["2000-01-01T02:03:04.000123Z"]'
)
def test_datetime_utc_z_without_tz(self):
"""
datetime.datetime naive OPT_UTC_Z
"""
assert (
orjson.dumps(
[datetime.datetime(2000, 1, 1, 2, 3, 4, 123)], option=orjson.OPT_UTC_Z
)
== b'["2000-01-01T02:03:04.000123"]'
)
@pytest.mark.skipif(tz is None, reason="dateutil optional")
def test_datetime_utc_z_with_tz(self):
"""
datetime.datetime naive OPT_UTC_Z
"""
assert (
orjson.dumps(
[
datetime.datetime(
2000, 1, 1, 0, 0, 0, 1, tzinfo=datetime.timezone.utc
)
],
option=orjson.OPT_UTC_Z,
)
== b'["2000-01-01T00:00:00.000001Z"]'
)
assert (
orjson.dumps(
[
datetime.datetime(
1937, 1, 1, 12, 0, 27, 87, tzinfo=tz.gettz("Europe/Amsterdam")
)
],
option=orjson.OPT_UTC_Z,
)
in AMSTERDAM_1937_DATETIMES_WITH_Z
)
@pytest.mark.skipif(pendulum is None, reason="pendulum not installed")
def test_datetime_roundtrip(self):
"""
datetime.datetime parsed by pendulum
"""
obj = datetime.datetime(2000, 1, 1, 0, 0, 0, 1, tzinfo=datetime.timezone.utc)
serialized = orjson.dumps(obj).decode("utf-8").replace('"', "")
parsed = pendulum.parse(serialized)
for attr in ("year", "month", "day", "hour", "minute", "second", "microsecond"):
assert getattr(obj, attr) == getattr(parsed, attr)
class TestDate:
def test_date(self):
"""
datetime.date
"""
assert orjson.dumps([datetime.date(2000, 1, 13)]) == b'["2000-01-13"]'
def test_date_min(self):
"""
datetime.date MINYEAR
"""
assert (
orjson.dumps([datetime.date(datetime.MINYEAR, 1, 1)]) == b'["0001-01-01"]'
)
def test_date_max(self):
"""
datetime.date MAXYEAR
"""
assert (
orjson.dumps([datetime.date(datetime.MAXYEAR, 12, 31)]) == b'["9999-12-31"]'
)
def test_date_three_digits(self):
"""
datetime.date three digit year
"""
assert (
orjson.dumps(
[datetime.date(312, 1, 1)],
)
== b'["0312-01-01"]'
)
def test_date_two_digits(self):
"""
datetime.date two digit year
"""
assert (
orjson.dumps(
[datetime.date(46, 1, 1)],
)
== b'["0046-01-01"]'
)
class TestTime:
def test_time(self):
"""
datetime.time
"""
assert orjson.dumps([datetime.time(12, 15, 59, 111)]) == b'["12:15:59.000111"]'
assert orjson.dumps([datetime.time(12, 15, 59)]) == b'["12:15:59"]'
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_time_tz(self):
"""
datetime.time with tzinfo error
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
[
datetime.time(
12, 15, 59, 111, tzinfo=zoneinfo.ZoneInfo("Asia/Shanghai")
)
]
)
def test_time_microsecond_max(self):
"""
datetime.time microsecond max
"""
assert orjson.dumps(datetime.time(0, 0, 0, 999999)) == b'"00:00:00.999999"'
def test_time_microsecond_min(self):
"""
datetime.time microsecond min
"""
assert orjson.dumps(datetime.time(0, 0, 0, 1)) == b'"00:00:00.000001"'
class TestDateclassPassthrough:
def test_passthrough_datetime(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
datetime.datetime(1970, 1, 1), option=orjson.OPT_PASSTHROUGH_DATETIME
)
def test_passthrough_date(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
datetime.date(1970, 1, 1), option=orjson.OPT_PASSTHROUGH_DATETIME
)
def test_passthrough_time(self):
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
datetime.time(12, 0, 0), option=orjson.OPT_PASSTHROUGH_DATETIME
)
def test_passthrough_datetime_default(self):
def default(obj):
return obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
assert (
orjson.dumps(
datetime.datetime(1970, 1, 1),
option=orjson.OPT_PASSTHROUGH_DATETIME,
default=default,
)
== b'"Thu, 01 Jan 1970 00:00:00 GMT"'
)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_dataclass.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import abc
import uuid
from dataclasses import InitVar, asdict, dataclass, field
from enum import Enum
from typing import ClassVar, Dict, Optional
import pytest
import orjson
class AnEnum(Enum):
ONE = 1
TWO = 2
@dataclass
class EmptyDataclass:
pass
@dataclass
class EmptyDataclassSlots:
__slots__ = ()
@dataclass
class Dataclass1:
name: str
number: int
sub: Optional["Dataclass1"]
@dataclass
class Dataclass2:
name: Optional[str] = field(default="?")
@dataclass
class Dataclass3:
a: str
b: int
c: dict
d: bool
e: float
f: list
g: tuple
@dataclass
class Dataclass4:
a: str = field()
b: int = field(metadata={"unrelated": False})
c: float = 1.1
@dataclass
class Datasubclass(Dataclass1):
additional: bool
@dataclass
class Slotsdataclass:
__slots__ = ("a", "b", "_c", "d")
a: str
b: int
_c: str
d: InitVar[str]
cls_var: ClassVar[str] = "cls"
@dataclass
class Defaultdataclass:
a: uuid.UUID
b: AnEnum
@dataclass
class UnsortedDataclass:
c: int
b: int
a: int
d: Optional[Dict]
@dataclass
class InitDataclass:
a: InitVar[str]
b: InitVar[str]
cls_var: ClassVar[str] = "cls"
ab: str = ""
def __post_init__(self, a: str, b: str):
self._other = 1
self.ab = f"{a} {b}"
class AbstractBase(abc.ABC):
@abc.abstractmethod
def key(self):
raise NotImplementedError
@dataclass(frozen=True)
class ConcreteAbc(AbstractBase):
__slots__ = ("attr",)
attr: float
def key(self):
return "dkjf"
class TestDataclass:
def test_dataclass(self):
"""
dumps() dataclass
"""
obj = Dataclass1("a", 1, None)
assert orjson.dumps(obj) == b'{"name":"a","number":1,"sub":null}'
def test_dataclass_recursive(self):
"""
dumps() dataclass recursive
"""
obj = Dataclass1("a", 1, Dataclass1("b", 2, None))
assert (
orjson.dumps(obj)
== b'{"name":"a","number":1,"sub":{"name":"b","number":2,"sub":null}}'
)
def test_dataclass_circular(self):
"""
dumps() dataclass circular
"""
obj1 = Dataclass1("a", 1, None)
obj2 = Dataclass1("b", 2, obj1)
obj1.sub = obj2
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj1)
def test_dataclass_empty(self):
"""
dumps() no attributes
"""
assert orjson.dumps(EmptyDataclass()) == b"{}"
def test_dataclass_empty_slots(self):
"""
dumps() no attributes slots
"""
assert orjson.dumps(EmptyDataclassSlots()) == b"{}"
def test_dataclass_default_arg(self):
"""
dumps() dataclass default arg
"""
obj = Dataclass2()
assert orjson.dumps(obj) == b'{"name":"?"}'
def test_dataclass_types(self):
"""
dumps() dataclass types
"""
obj = Dataclass3("a", 1, {"a": "b"}, True, 1.1, [1, 2], (3, 4))
assert (
orjson.dumps(obj)
== b'{"a":"a","b":1,"c":{"a":"b"},"d":true,"e":1.1,"f":[1,2],"g":[3,4]}'
)
def test_dataclass_metadata(self):
"""
dumps() dataclass metadata
"""
obj = Dataclass4("a", 1, 2.1)
assert orjson.dumps(obj) == b'{"a":"a","b":1,"c":2.1}'
def test_dataclass_classvar(self):
"""
dumps() dataclass class variable
"""
obj = Dataclass4("a", 1)
assert orjson.dumps(obj) == b'{"a":"a","b":1,"c":1.1}'
def test_dataclass_subclass(self):
"""
dumps() dataclass subclass
"""
obj = Datasubclass("a", 1, None, False)
assert (
orjson.dumps(obj)
== b'{"name":"a","number":1,"sub":null,"additional":false}'
)
def test_dataclass_slots(self):
"""
dumps() dataclass with __slots__ does not include under attributes, InitVar, or ClassVar
"""
obj = Slotsdataclass("a", 1, "c", "d")
assert "__dict__" not in dir(obj)
assert orjson.dumps(obj) == b'{"a":"a","b":1}'
def test_dataclass_default(self):
"""
dumps() dataclass with default
"""
def default(__obj):
if isinstance(__obj, uuid.UUID):
return str(__obj)
elif isinstance(__obj, Enum):
return __obj.value
obj = Defaultdataclass(
uuid.UUID("808989c0-00d5-48a8-b5c4-c804bf9032f2"), AnEnum.ONE
)
assert (
orjson.dumps(obj, default=default)
== b'{"a":"808989c0-00d5-48a8-b5c4-c804bf9032f2","b":1}'
)
def test_dataclass_sort(self):
"""
OPT_SORT_KEYS has no effect on dataclasses
"""
obj = UnsortedDataclass(1, 2, 3, None)
assert (
orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
== b'{"c":1,"b":2,"a":3,"d":null}'
)
def test_dataclass_sort_sub(self):
"""
dataclass fast path does not prevent OPT_SORT_KEYS from cascading
"""
obj = UnsortedDataclass(1, 2, 3, {"f": 2, "e": 1})
assert (
orjson.dumps(obj, option=orjson.OPT_SORT_KEYS)
== b'{"c":1,"b":2,"a":3,"d":{"e":1,"f":2}}'
)
def test_dataclass_under(self):
"""
dumps() does not include under attributes, InitVar, or ClassVar
"""
obj = InitDataclass("zxc", "vbn")
assert orjson.dumps(obj) == b'{"ab":"zxc vbn"}'
def test_dataclass_option(self):
"""
dumps() accepts deprecated OPT_SERIALIZE_DATACLASS
"""
obj = Dataclass1("a", 1, None)
assert (
orjson.dumps(obj, option=orjson.OPT_SERIALIZE_DATACLASS)
== b'{"name":"a","number":1,"sub":null}'
)
class TestDataclassPassthrough:
def test_dataclass_passthrough_raise(self):
"""
dumps() dataclass passes to default with OPT_PASSTHROUGH_DATACLASS
"""
obj = Dataclass1("a", 1, None)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(obj, option=orjson.OPT_PASSTHROUGH_DATACLASS)
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
InitDataclass("zxc", "vbn"), option=orjson.OPT_PASSTHROUGH_DATACLASS
)
def test_dataclass_passthrough_default(self):
"""
dumps() dataclass passes to default with OPT_PASSTHROUGH_DATACLASS
"""
obj = Dataclass1("a", 1, None)
assert (
orjson.dumps(obj, option=orjson.OPT_PASSTHROUGH_DATACLASS, default=asdict)
== b'{"name":"a","number":1,"sub":null}'
)
def default(obj):
if isinstance(obj, Dataclass1):
return {"name": obj.name, "number": obj.number}
raise TypeError
assert (
orjson.dumps(obj, option=orjson.OPT_PASSTHROUGH_DATACLASS, default=default)
== b'{"name":"a","number":1}'
)
class TestAbstractDataclass:
def test_dataclass_abc(self):
obj = ConcreteAbc(1.0)
assert orjson.dumps(obj) == b'{"attr":1.0}'
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/test/test_memory.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import dataclasses
import datetime
import gc
import random
from typing import List
try:
import pytz
except ImportError:
pytz = None # type: ignore
try:
import psutil
except ImportError:
psutil = None # type: ignore
import pytest
import orjson
try:
import numpy
except ImportError:
numpy = None # type: ignore
try:
import pandas
except ImportError:
pandas = None # type: ignore
FIXTURE = '{"a":[81891289, 8919812.190129012], "b": false, "c": null, "d": "東京"}'
def default(obj):
return str(obj)
@dataclasses.dataclass
class Member:
id: int
active: bool
@dataclasses.dataclass
class Object:
id: int
updated_at: datetime.datetime
name: str
members: List[Member]
DATACLASS_FIXTURE = [
Object(
i,
datetime.datetime.now(datetime.timezone.utc)
+ datetime.timedelta(seconds=random.randint(0, 10000)),
str(i) * 3,
[Member(j, True) for j in range(0, 10)],
)
for i in range(100000, 101000)
]
MAX_INCREASE = 4194304 # 4MiB
class Unsupported:
pass
class TestMemory:
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_loads(self):
"""
loads() memory leak
"""
proc = psutil.Process()
gc.collect()
val = orjson.loads(FIXTURE)
assert val
mem = proc.memory_info().rss
for _ in range(10000):
val = orjson.loads(FIXTURE)
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_loads_memoryview(self):
"""
loads() memory leak using memoryview
"""
proc = psutil.Process()
gc.collect()
fixture = FIXTURE.encode("utf-8")
val = orjson.loads(fixture)
assert val
mem = proc.memory_info().rss
for _ in range(10000):
val = orjson.loads(memoryview(fixture))
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_dumps(self):
"""
dumps() memory leak
"""
proc = psutil.Process()
gc.collect()
fixture = orjson.loads(FIXTURE)
val = orjson.dumps(fixture)
assert val
mem = proc.memory_info().rss
for _ in range(10000):
val = orjson.dumps(fixture)
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_loads_exc(self):
"""
loads() memory leak exception without a GC pause
"""
proc = psutil.Process()
gc.disable()
mem = proc.memory_info().rss
n = 10000
i = 0
for _ in range(n):
try:
orjson.loads("")
except orjson.JSONDecodeError:
i += 1
assert n == i
assert proc.memory_info().rss <= mem + MAX_INCREASE
gc.enable()
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_dumps_exc(self):
"""
dumps() memory leak exception without a GC pause
"""
proc = psutil.Process()
gc.disable()
data = Unsupported()
mem = proc.memory_info().rss
n = 10000
i = 0
for _ in range(n):
try:
orjson.dumps(data)
except orjson.JSONEncodeError:
i += 1
assert n == i
assert proc.memory_info().rss <= mem + MAX_INCREASE
gc.enable()
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_dumps_default(self):
"""
dumps() default memory leak
"""
proc = psutil.Process()
gc.collect()
fixture = orjson.loads(FIXTURE)
class Custom:
def __init__(self, name):
self.name = name
def __str__(self):
return f"{self.__class__.__name__}({self.name})"
fixture["custom"] = Custom("orjson")
val = orjson.dumps(fixture, default=default)
mem = proc.memory_info().rss
for _ in range(10000):
val = orjson.dumps(fixture, default=default)
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_dumps_dataclass(self):
"""
dumps() dataclass memory leak
"""
proc = psutil.Process()
gc.collect()
val = orjson.dumps(DATACLASS_FIXTURE)
assert val
mem = proc.memory_info().rss
for _ in range(100):
val = orjson.dumps(DATACLASS_FIXTURE)
assert val
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(
psutil is None or pytz is None,
reason="psutil not installed",
)
def test_memory_dumps_pytz_tzinfo(self):
"""
dumps() pytz tzinfo memory leak
"""
proc = psutil.Process()
gc.collect()
dt = datetime.datetime.now()
val = orjson.dumps(pytz.UTC.localize(dt))
assert val
mem = proc.memory_info().rss
for _ in range(50000):
val = orjson.dumps(pytz.UTC.localize(dt))
assert val
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_loads_keys(self):
"""
loads() memory leak with number of keys causing cache eviction
"""
proc = psutil.Process()
gc.collect()
fixture = {"key_%s" % idx: "value" for idx in range(1024)}
assert len(fixture) == 1024
val = orjson.dumps(fixture)
loaded = orjson.loads(val)
assert loaded
mem = proc.memory_info().rss
for _ in range(100):
loaded = orjson.loads(val)
assert loaded
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
@pytest.mark.skipif(numpy is None, reason="numpy is not installed")
def test_memory_dumps_numpy(self):
"""
dumps() numpy memory leak
"""
proc = psutil.Process()
gc.collect()
fixture = numpy.random.rand(4, 4, 4)
val = orjson.dumps(fixture, option=orjson.OPT_SERIALIZE_NUMPY)
assert val
mem = proc.memory_info().rss
for _ in range(100):
val = orjson.dumps(fixture, option=orjson.OPT_SERIALIZE_NUMPY)
assert val
assert val
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
@pytest.mark.skipif(pandas is None, reason="pandas is not installed")
def test_memory_dumps_pandas(self):
"""
dumps() pandas memory leak
"""
proc = psutil.Process()
gc.collect()
numpy.random.rand(4, 4, 4)
df = pandas.Series(numpy.random.rand(4, 4, 4).tolist())
val = df.map(orjson.dumps)
assert not val.empty
mem = proc.memory_info().rss
for _ in range(100):
val = df.map(orjson.dumps)
assert not val.empty
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
@pytest.mark.skipif(psutil is None, reason="psutil not installed")
def test_memory_dumps_fragment(self):
"""
dumps() Fragment memory leak
"""
proc = psutil.Process()
gc.collect()
orjson.dumps(orjson.Fragment(str(0)))
mem = proc.memory_info().rss
for i in range(10000):
orjson.dumps(orjson.Fragment(str(i)))
gc.collect()
assert proc.memory_info().rss <= mem + MAX_INCREASE
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/ci/azure-pipelines.yml | variables:
toolchain: nightly-2024-09-25
jobs:
- job: win_python313_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.13.0\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.13.0'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python312_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.12.2\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.12.2'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python311_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.11.4\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.11.4'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python310_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.10.8\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.10.8'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python39_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.9.13\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.9.13'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python38_amd64
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.8.10\x64\python.exe
rustup: https://win.rustup.rs/x86_64
target: x86_64-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.8.10'
addToPath: true
architecture: 'x64'
- checkout: self
- template: ./azure-win.yml
- job: win_python313_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.13.0\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.13.0'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
- job: win_python312_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.12.2\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.12.2'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
- job: win_python311_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.11.4\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.11.4'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
- job: win_python310_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.10.8\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.10.8'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
- job: win_python39_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.9.13\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.9.13'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
- job: win_python38_x86
pool:
vmImage: windows-2022
variables:
interpreter: C:\hostedtoolcache\windows\Python\3.8.10\x86\python.exe
rustup: https://win.rustup.rs/x86
target: i686-pc-windows-msvc
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.8.10'
addToPath: true
architecture: 'x86'
- checkout: self
- template: ./azure-win.yml
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/ci/config.toml | [unstable]
build-std = ["core", "std", "alloc", "proc_macro", "panic_abort"]
build-std-features = ["panic_immediate_abort"]
[target.x86_64-apple-darwin]
linker = "clang"
rustflags = ["-C", "target-cpu=x86-64-v2", "-Z", "tune-cpu=generic"]
[target.aarch64-apple-darwin]
linker = "clang"
rustflags = ["-C", "target-cpu=apple-m1"]
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/ci/deploy | #!/usr/bin/env bash
set -eou pipefail
if [ -z ${DRONE_TAG+x} ]; then
tag=$(git name-rev --tags --name-only $(git rev-parse HEAD))
else
tag="$DRONE_TAG"
fi
echo "$tag"
if [[ "$tag" == "undefined" ]]; then
echo "not on a tag"
exit 0
fi
maturin upload --skip-existing "$1"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/ci/sdist.toml | [source.crates-io]
replace-with = "vendored-sources"
[source.vendored-sources]
directory = "include/cargo"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/ci/azure-win.yml | parameters:
interpreter: ''
rustup: ''
target: ''
toolchain: ''
steps:
- script: |
curl $(rustup) -o rustup-init.exe
rustup-init.exe -y --default-host $(target) --default-toolchain $(toolchain)-$(target) --profile minimal
set PATH=%PATH%;%USERPROFILE%\.cargo\bin
rustup default $(toolchain)-$(target)
rustup component add rust-src
mkdir .cargo
cp ci/config.toml .cargo/config.toml
echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin"
displayName: rustup
- script: python.exe -m pip install --upgrade pip "maturin>=1,<2" wheel
displayName: build dependencies
- script: python.exe -m pip install -r test\requirements.txt -r integration\requirements.txt
displayName: test dependencies
- script: maturin.exe build --release --features=no-panic,unstable-simd,yyjson --strip --interpreter $(interpreter) --target $(target)
displayName: build
env:
CFLAGS: "-Os -flto"
LDFLAGS: "-Wl,--as-needed"
RUSTFLAGS: "-C lto=fat -Z mir-opt-level=4 -D warnings"
CARGO_UNSTABLE_SPARSE_REGISTRY: "true"
UNSAFE_PYO3_SKIP_VERSION_CHECK: "1"
- script: python.exe -m pip install orjson --no-index --find-links=D:\a\1\s\target\wheels
displayName: install
- script: python.exe -m pytest -s -rxX -v test
env:
PYTHONMALLOC: "debug"
displayName: pytest
- script: python.exe integration\thread
displayName: thread
- script: python.exe integration\init
displayName: init
- bash: ./ci/deploy /d/a/1/s/target/wheels/*.whl
displayName: deploy
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pydataclass | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import dataclasses
import io
import json
import os
from timeit import timeit
from typing import List
import rapidjson
import simplejson
import ujson
from tabulate import tabulate
import orjson
os.sched_setaffinity(os.getpid(), {0, 1})
@dataclasses.dataclass
class Member:
id: int
active: bool
@dataclasses.dataclass
class Object:
id: int
name: str
members: List[Member]
objects_as_dataclass = [
Object(i, str(i) * 3, [Member(j, True) for j in range(0, 10)])
for i in range(100000, 102000)
]
objects_as_dict = [dataclasses.asdict(each) for each in objects_as_dataclass]
output_in_kib = len(orjson.dumps(objects_as_dict)) / 1024
print(f"{output_in_kib:,.0f}KiB output (orjson)")
def default(__obj):
if dataclasses.is_dataclass(__obj):
return dataclasses.asdict(__obj)
headers = ("Library", "dict (ms)", "dataclass (ms)", "vs. orjson")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
ITERATIONS = 100
def per_iter_latency(val):
if val is None:
return None
return (val * 1000) / ITERATIONS
table = []
for lib_name in LIBRARIES:
if lib_name == "json":
as_dict = timeit(
lambda: json.dumps(objects_as_dict).encode("utf-8"),
number=ITERATIONS,
)
as_dataclass = timeit(
lambda: json.dumps(objects_as_dataclass, default=default).encode("utf-8"),
number=ITERATIONS,
)
elif lib_name == "simplejson":
as_dict = timeit(
lambda: simplejson.dumps(objects_as_dict).encode("utf-8"),
number=ITERATIONS,
)
as_dataclass = timeit(
lambda: simplejson.dumps(objects_as_dataclass, default=default).encode(
"utf-8"
),
number=ITERATIONS,
)
elif lib_name == "ujson":
as_dict = timeit(
lambda: ujson.dumps(objects_as_dict).encode("utf-8"),
number=ITERATIONS,
)
as_dataclass = None
elif lib_name == "rapidjson":
as_dict = timeit(
lambda: rapidjson.dumps(objects_as_dict).encode("utf-8"),
number=ITERATIONS,
)
as_dataclass = timeit(
lambda: rapidjson.dumps(objects_as_dataclass, default=default).encode(
"utf-8"
),
number=ITERATIONS,
)
elif lib_name == "orjson":
as_dict = timeit(lambda: orjson.dumps(objects_as_dict), number=ITERATIONS)
as_dataclass = timeit(
lambda: orjson.dumps(
objects_as_dataclass, None, orjson.OPT_SERIALIZE_DATACLASS
),
number=ITERATIONS,
)
orjson_as_dataclass = per_iter_latency(as_dataclass)
else:
raise NotImplementedError
as_dict = per_iter_latency(as_dict)
as_dataclass = per_iter_latency(as_dataclass)
if lib_name == "orjson":
compared_to_orjson = 1
elif as_dataclass:
compared_to_orjson = int(as_dataclass / orjson_as_dataclass)
else:
compared_to_orjson = None
table.append(
(
lib_name,
f"{as_dict:,.2f}" if as_dict else "",
f"{as_dataclass:,.2f}" if as_dataclass else "",
f"{compared_to_orjson:d}" if compared_to_orjson else "",
)
)
buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/profile | #!/bin/sh -e
# usage: ./profile data/citm_catalog.json.xz loads
perf record -g --delay 250 ./bench/run_func "$@"
perf report --percent-limit 0.1
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pyindent | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import io
import json
import lzma
import os
import sys
from pathlib import Path
from timeit import timeit
import rapidjson
import simplejson
import ujson
from tabulate import tabulate
import orjson
os.sched_setaffinity(os.getpid(), {0, 1})
dirname = os.path.join(os.path.dirname(__file__), "..", "data")
def read_fixture_obj(filename):
path = Path(dirname, filename)
if path.suffix == ".xz":
contents = lzma.decompress(path.read_bytes())
else:
contents = path.read_bytes()
return orjson.loads(contents)
filename = sys.argv[1] if len(sys.argv) >= 1 else ""
data = read_fixture_obj(f"{filename}.json.xz")
headers = ("Library", "compact (ms)", "pretty (ms)", "vs. orjson")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
output_in_kib_compact = len(orjson.dumps(data)) / 1024
output_in_kib_pretty = len(orjson.dumps(data, option=orjson.OPT_INDENT_2)) / 1024
# minimum 2s runtime for orjson compact
ITERATIONS = int(2 / (timeit(lambda: orjson.dumps(data), number=20) / 20))
print(
f"{output_in_kib_compact:,.0f}KiB compact, {output_in_kib_pretty:,.0f}KiB pretty, {ITERATIONS} iterations"
)
def per_iter_latency(val):
if val is None:
return None
return (val * 1000) / ITERATIONS
def test_correctness(serialized):
return orjson.loads(serialized) == data
table = []
for lib_name in LIBRARIES:
print(f"{lib_name}...")
if lib_name == "json":
time_compact = timeit(
lambda: json.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_pretty = timeit(
lambda: json.dumps(data, indent=2).encode("utf-8"),
number=ITERATIONS,
)
correct = test_correctness(json.dumps(data, indent=2).encode("utf-8"))
elif lib_name == "simplejson":
time_compact = timeit(
lambda: simplejson.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_pretty = timeit(
lambda: simplejson.dumps(data, indent=2).encode("utf-8"),
number=ITERATIONS,
)
correct = test_correctness(simplejson.dumps(data, indent=2).encode("utf-8"))
elif lib_name == "ujson":
time_compact = timeit(
lambda: ujson.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_pretty = timeit(
lambda: ujson.dumps(data, indent=2).encode("utf-8"),
number=ITERATIONS,
)
correct = test_correctness(ujson.dumps(data, indent=2).encode("utf-8"))
elif lib_name == "rapidjson":
time_compact = timeit(lambda: rapidjson.dumps(data), number=ITERATIONS)
time_pretty = timeit(lambda: rapidjson.dumps(data, indent=2), number=ITERATIONS)
correct = test_correctness(rapidjson.dumps(data, indent=2))
elif lib_name == "orjson":
time_compact = timeit(lambda: orjson.dumps(data), number=ITERATIONS)
time_pretty = timeit(
lambda: orjson.dumps(data, None, orjson.OPT_INDENT_2),
number=ITERATIONS,
)
correct = test_correctness(orjson.dumps(data, None, orjson.OPT_INDENT_2))
orjson_time_pretty = per_iter_latency(time_pretty)
else:
raise NotImplementedError
time_compact = per_iter_latency(time_compact)
if not correct:
time_pretty = None
else:
time_pretty = per_iter_latency(time_pretty)
if lib_name == "orjson":
compared_to_orjson = 1
elif time_pretty:
compared_to_orjson = time_pretty / orjson_time_pretty
else:
compared_to_orjson = None
table.append(
(
lib_name,
f"{time_compact:,.2f}" if time_compact else "",
f"{time_pretty:,.2f}" if time_pretty else "",
f"{compared_to_orjson:,.1f}" if compared_to_orjson else "",
)
)
buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pybench | #!/usr/bin/env bash
set -eou pipefail
pytest \
--verbose \
--benchmark-min-time=1 \
--benchmark-max-time=5 \
--benchmark-disable-gc \
--benchmark-autosave \
--benchmark-save-data \
--random-order \
"bench/benchmark_$1.py"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/lint | #!/usr/bin/env bash
set -eou pipefail
to_lint="./bench/*.py ./pysrc/orjson/__init__.pyi ./test/*.py script/pydataclass script/pymem
script/pysort script/pynumpy script/pynonstr script/pycorrectness script/graph integration/init
integration/wsgi.py integration/typestubs.py integration/thread"
ruff check ${to_lint} --fix
ruff format ${to_lint}
mypy --ignore-missing-imports --check-untyped-defs ./bench/*.py ./pysrc/orjson/__init__.pyi ./test/*.py
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pysort | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import io
import json
import lzma
import os
from pathlib import Path
from timeit import timeit
import rapidjson
import simplejson
import ujson
from tabulate import tabulate
import orjson
os.sched_setaffinity(os.getpid(), {0, 1})
dirname = os.path.join(os.path.dirname(__file__), "..", "data")
def read_fixture_obj(filename):
path = Path(dirname, filename)
if path.suffix == ".xz":
contents = lzma.decompress(path.read_bytes())
else:
contents = path.read_bytes()
return orjson.loads(contents)
data = read_fixture_obj("twitter.json.xz")
headers = ("Library", "unsorted (ms)", "sorted (ms)", "vs. orjson")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
ITERATIONS = 500
def per_iter_latency(val):
if val is None:
return None
return (val * 1000) / ITERATIONS
table = []
for lib_name in LIBRARIES:
if lib_name == "json":
time_unsorted = timeit(
lambda: json.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_sorted = timeit(
lambda: json.dumps(data, sort_keys=True).encode("utf-8"),
number=ITERATIONS,
)
elif lib_name == "simplejson":
time_unsorted = timeit(
lambda: simplejson.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_sorted = timeit(
lambda: simplejson.dumps(data, sort_keys=True).encode("utf-8"),
number=ITERATIONS,
)
elif lib_name == "ujson":
time_unsorted = timeit(
lambda: ujson.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_sorted = timeit(
lambda: ujson.dumps(data, sort_keys=True).encode("utf-8"),
number=ITERATIONS,
)
elif lib_name == "rapidjson":
time_unsorted = timeit(
lambda: rapidjson.dumps(data).encode("utf-8"),
number=ITERATIONS,
)
time_sorted = timeit(
lambda: rapidjson.dumps(data, sort_keys=True).encode("utf-8"),
number=ITERATIONS,
)
elif lib_name == "orjson":
time_unsorted = timeit(lambda: orjson.dumps(data), number=ITERATIONS)
time_sorted = timeit(
lambda: orjson.dumps(data, None, orjson.OPT_SORT_KEYS),
number=ITERATIONS,
)
orjson_time_sorted = per_iter_latency(time_sorted)
else:
raise NotImplementedError
time_unsorted = per_iter_latency(time_unsorted)
time_sorted = per_iter_latency(time_sorted)
if lib_name == "orjson":
compared_to_orjson = 1
elif time_unsorted:
compared_to_orjson = time_sorted / orjson_time_sorted
else:
compared_to_orjson = None
table.append(
(
lib_name,
f"{time_unsorted:,.2f}" if time_unsorted else "",
f"{time_sorted:,.2f}" if time_sorted else "",
f"{compared_to_orjson:,.1f}" if compared_to_orjson else "",
)
)
buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pynumpy | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import gc
import io
import json
import os
import sys
import time
from timeit import timeit
import numpy
import psutil
import rapidjson
import simplejson
from memory_profiler import memory_usage
from tabulate import tabulate
import orjson
os.sched_setaffinity(os.getpid(), {0, 1})
kind = sys.argv[1] if len(sys.argv) >= 1 else ""
if kind == "float16":
dtype = numpy.float16
array = numpy.random.random(size=(50000, 100)).astype(dtype)
elif kind == "float32":
dtype = numpy.float32
array = numpy.random.random(size=(50000, 100)).astype(dtype)
elif kind == "float64":
dtype = numpy.float64
array = numpy.random.random(size=(50000, 100))
assert array.dtype == numpy.float64
elif kind == "bool":
dtype = numpy.bool_
array = numpy.random.choice((True, False), size=(100000, 200))
elif kind == "int8":
dtype = numpy.int8
array = numpy.random.randint(((2**7) - 1), size=(100000, 100), dtype=dtype)
elif kind == "int16":
dtype = numpy.int16
array = numpy.random.randint(((2**15) - 1), size=(100000, 100), dtype=dtype)
elif kind == "int32":
dtype = numpy.int32
array = numpy.random.randint(((2**31) - 1), size=(100000, 100), dtype=dtype)
elif kind == "uint8":
dtype = numpy.uint8
array = numpy.random.randint(((2**8) - 1), size=(100000, 100), dtype=dtype)
elif kind == "uint16":
dtype = numpy.uint16
array = numpy.random.randint(((2**16) - 1), size=(100000, 100), dtype=dtype)
elif kind == "uint32":
dtype = numpy.uint32
array = numpy.random.randint(((2**31) - 1), size=(100000, 100), dtype=dtype)
else:
print(
"usage: pynumpy (bool|int16|int32|float16|float32|float64|int8|uint8|uint16|uint32)"
)
sys.exit(1)
proc = psutil.Process()
def default(__obj):
if isinstance(__obj, numpy.ndarray):
return __obj.tolist()
raise TypeError
headers = ("Library", "Latency (ms)", "RSS diff (MiB)", "vs. orjson")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
ITERATIONS = 10
def orjson_dumps():
return orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY)
ujson_dumps = None
def rapidjson_dumps():
return rapidjson.dumps(array, default=default).encode("utf-8")
def simplejson_dumps():
return simplejson.dumps(array, default=default).encode("utf-8")
def json_dumps():
return json.dumps(array, default=default).encode("utf-8")
output_in_mib = len(orjson_dumps()) / 1024 / 1024
print(f"{output_in_mib:,.1f}MiB {kind} output (orjson)")
gc.collect()
mem_before = proc.memory_full_info().rss / 1024 / 1024
def per_iter_latency(val):
if val is None:
return None
return (val * 1000) / ITERATIONS
def test_correctness(func):
return numpy.array_equal(array, numpy.array(orjson.loads(func()), dtype=dtype))
table = []
for lib_name in LIBRARIES:
gc.collect()
print(f"{lib_name}...")
func = locals()[f"{lib_name}_dumps"]
if func is None:
total_latency = None
latency = None
mem = None
correct = False
else:
total_latency = timeit(
func,
number=ITERATIONS,
)
latency = per_iter_latency(total_latency)
time.sleep(1)
mem = max(memory_usage((func,), interval=0.001, timeout=latency * 2))
correct = test_correctness(func)
if lib_name == "orjson":
compared_to_orjson = 1
orjson_latency = latency
elif latency:
compared_to_orjson = latency / orjson_latency
else:
compared_to_orjson = None
if not correct:
latency = None
mem = 0
mem_diff = mem - mem_before
table.append(
(
lib_name,
f"{latency:,.0f}" if latency else "",
f"{mem_diff:,.0f}" if mem else "",
f"{compared_to_orjson:,.1f}" if (latency and compared_to_orjson) else "",
)
)
buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/valgrind | #!/usr/bin/env bash
set -eou pipefail
valgrind pytest -v --ignore=test/test_memory.py test
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/vendor-yyjson | #!/usr/bin/env bash
set -eou pipefail
yyjson_version="b21c02904188add942d3c7cd4885422e4335f115"
curl -Ls -o include/yyjson/yyjson.c "https://raw.githubusercontent.com/ibireme/yyjson/${yyjson_version}/src/yyjson.c"
curl -Ls -o include/yyjson/yyjson.h "https://raw.githubusercontent.com/ibireme/yyjson/${yyjson_version}/src/yyjson.h"
sed -i 's/yyjson_api_inline void yyjson_doc_free/yyjson_api void yyjson_doc_free/g' include/yyjson/yyjson.h
sed -i 's/(flg & (YYJSON_READ_NUMBER_AS_RAW | YYJSON_READ_BIGNUM_AS_RAW)) != 0/false/g' include/yyjson/yyjson.c
sed -i 's/if (pre)/if (false)/g' include/yyjson/yyjson.c
sed -i 's/!false/true/g' include/yyjson/yyjson.c
sed -i 's/ && true//g' include/yyjson/yyjson.c
sed -i 's/true && //g' include/yyjson/yyjson.c
sed -i 's/unlikely(false)/false/g' include/yyjson/yyjson.c
sed -i 's/YYJSON_TYPE_STR | YYJSON_SUBTYPE_NOESC/YYJSON_TYPE_STR/g' include/yyjson/yyjson.c
sed -i 's/unlikely(pos == src)/false/g' include/yyjson/yyjson.c
sed -i 's/ yyjson_read_err dummy_err;//g' include/yyjson/yyjson.c
sed -i 's/ if (!err) err = &dummy_err;//g' include/yyjson/yyjson.c
sed -i 's/likely(!alc_ptr)/!alc_ptr/g' include/yyjson/yyjson.c
sed -i 's/unlikely(read_flag_eq(flg, YYJSON_READ_##_flag))/false/g' include/yyjson/yyjson.c
sed -i 's/has_read_flag(ALLOW_INF_AND_NAN)/false/g' include/yyjson/yyjson.c
sed -i 's/has_read_flag(ALLOW_COMMENTS)/false/g' include/yyjson/yyjson.c
sed -i 's/has_read_flag(BIGNUM_AS_RAW)/false/g' include/yyjson/yyjson.c
sed -i 's/if (pre && \*pre)/if (false)/g' include/yyjson/yyjson.c
sed -i 's/(pre && !false)/(false)/g' include/yyjson/yyjson.c
git apply include/yyjson-recursion-limit.patch
git apply include/yyjson-reduce-unused.patch
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/develop | #!/bin/sh -e
rm -f target/wheels/*
export UNSAFE_PYO3_BUILD_FREE_THREADED=1
export UNSAFE_PYO3_SKIP_VERSION_CHECK=1
export CC="${CC:-clang}"
export LD="${LD:-lld}"
export TARGET="${TARGET:-x86_64-unknown-linux-gnu}"
export CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-target}"
echo "CC: ${CC}, LD: ${LD}, LD_LIBRARY_PATH: ${LD_LIBRARY_PATH}"
export CFLAGS="-Os -fstrict-aliasing -fno-plt -flto=full -emit-llvm"
export LDFLAGS="-fuse-ld=${LD} -Wl,-plugin-opt=also-emit-llvm -Wl,--as-needed -Wl,-zrelro,-znow"
export RUSTFLAGS="-C linker=${CC} -C link-arg=-fuse-ld=${LD} -C linker-plugin-lto -C lto=fat -C link-arg=-Wl,-zrelro,-znow -Z mir-opt-level=4 -Z threads=8"
rm -f ${CARGO_TARGET_DIR}/wheels/*.whl
maturin build --target="${TARGET}" "$@"
uv pip install --link-mode=copy ${CARGO_TARGET_DIR}/wheels/*.whl
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/install-fedora | #!/usr/bin/env bash
set -eou pipefail
# export PYTHON=python3.11
# export PYTHON_PACKAGE=python3.11
# export RUST_TOOLCHAIN=nightly-2024-09-25
# export TARGET=x86_64-unknown-linux-gnu
# export VENV=.venv
# export CARGO_TARGET_DIR=/tmp/orjson
export VENV="${VENV:-.venv}"
export CARGO_TARGET_DIR="${CARGO_TARGET_DIR:-target}"
rm /etc/yum.repos.d/fedora-cisco-openh264.repo || true
dnf install --setopt=install_weak_deps=false -y rustup clang lld "${PYTHON_PACKAGE}"
rustup-init --default-toolchain "${RUST_TOOLCHAIN}-${TARGET}" --profile minimal --component rust-src -y
source "${HOME}/.cargo/env"
mkdir -p .cargo
cp ci/config.toml .cargo/config.toml
cargo fetch --target="${TARGET}" &
curl -LsSf https://astral.sh/uv/install.sh | sh
rm -rf "${VENV}"
uv venv --python "${PYTHON}" "${VENV}"
source "${VENV}/bin/activate"
uv pip install --upgrade "maturin>=1,<2" -r test/requirements.txt -r integration/requirements.txt
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/generate-yyjson | #!/usr/bin/env bash
set -eou pipefail
_repo="$(dirname "$(dirname "${BASH_SOURCE[0]}")")"
bindgen \
"${_repo}/include/yyjson/yyjson.h" \
--size_t-is-usize \
--disable-header-comment \
--no-derive-copy \
--no-derive-debug \
--no-doc-comments \
--no-layout-tests \
--allowlist-function=yyjson_alc_pool_init \
--allowlist-function=yyjson_doc_free \
--allowlist-function=yyjson_read_opts \
--allowlist-type=yyjson_alc \
--allowlist-type=yyjson_doc \
--allowlist-type=yyjson_read_code \
--allowlist-type=yyjson_read_err \
--allowlist-type=yyjson_val \
--allowlist-var=YYJSON_READ_NOFLAG \
--allowlist-var=YYJSON_READ_SUCCESS \
> "${_repo}/src/ffi/yyjson.rs"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pynonstr | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import datetime
import io
import json
import os
import random
from time import mktime
from timeit import timeit
import rapidjson
import simplejson
import ujson
from tabulate import tabulate
import orjson
os.sched_setaffinity(os.getpid(), {0, 1})
data_as_obj = []
for year in range(1920, 2020):
start = datetime.date(year, 1, 1)
array = [
(int(mktime((start + datetime.timedelta(days=i)).timetuple())), i + 1)
for i in range(0, 365)
]
array.append(("other", 0))
random.shuffle(array)
data_as_obj.append(dict(array))
data_as_str = orjson.loads(orjson.dumps(data_as_obj, option=orjson.OPT_NON_STR_KEYS))
headers = ("Library", "str keys (ms)", "int keys (ms)", "int keys sorted (ms)")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
ITERATIONS = 500
output_in_kib = len(orjson.dumps(data_as_str)) / 1024
print(f"{output_in_kib:,.0f}KiB output (orjson)")
def per_iter_latency(val):
if val is None:
return None
return (val * 1000) / ITERATIONS
def test_correctness(serialized):
return orjson.loads(serialized) == data_as_str
table = []
for lib_name in LIBRARIES:
print(f"{lib_name}...")
if lib_name == "json":
time_as_str = timeit(
lambda: json.dumps(data_as_str).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj = timeit(
lambda: json.dumps(data_as_obj).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj_sorted = (
None # TypeError: '<' not supported between instances of 'str' and 'int'
)
correct = False
elif lib_name == "simplejson":
time_as_str = timeit(
lambda: simplejson.dumps(data_as_str).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj = timeit(
lambda: simplejson.dumps(data_as_obj).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj_sorted = timeit(
lambda: simplejson.dumps(data_as_obj, sort_keys=True).encode("utf-8"),
number=ITERATIONS,
)
correct = test_correctness(
simplejson.dumps(data_as_obj, sort_keys=True).encode("utf-8")
)
elif lib_name == "ujson":
time_as_str = timeit(
lambda: ujson.dumps(data_as_str).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj = timeit(
lambda: ujson.dumps(data_as_obj).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj_sorted = None # segfault
correct = False
elif lib_name == "rapidjson":
time_as_str = timeit(
lambda: rapidjson.dumps(data_as_str).encode("utf-8"),
number=ITERATIONS,
)
time_as_obj = None
time_as_obj_sorted = None
correct = False
elif lib_name == "orjson":
time_as_str = timeit(
lambda: orjson.dumps(data_as_str, None, orjson.OPT_NON_STR_KEYS),
number=ITERATIONS,
)
time_as_obj = timeit(
lambda: orjson.dumps(data_as_obj, None, orjson.OPT_NON_STR_KEYS),
number=ITERATIONS,
)
time_as_obj_sorted = timeit(
lambda: orjson.dumps(
data_as_obj, None, orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS
),
number=ITERATIONS,
)
correct = test_correctness(
orjson.dumps(
data_as_obj, None, orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS
)
)
else:
raise NotImplementedError
time_as_str = per_iter_latency(time_as_str)
time_as_obj = per_iter_latency(time_as_obj)
if not correct:
time_as_obj_sorted = None
else:
time_as_obj_sorted = per_iter_latency(time_as_obj_sorted)
table.append(
(
lib_name,
f"{time_as_str:,.2f}" if time_as_str else "",
f"{time_as_obj:,.2f}" if time_as_obj else "",
f"{time_as_obj_sorted:,.2f}" if time_as_obj_sorted else "",
)
)
buf = io.StringIO()
buf.write(tabulate(table, headers, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pycorrectness | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import io
import json
import lzma
import os
from pathlib import Path
import rapidjson
import simplejson
import ujson
from tabulate import tabulate
import orjson
dirname = os.path.join(os.path.dirname(__file__), "..", "data")
LIBRARIES = ["orjson", "ujson", "rapidjson", "simplejson", "json"]
LIBRARY_FUNC_MAP = {
"orjson": orjson.loads,
"ujson": ujson.loads,
"rapidjson": rapidjson.loads,
"simplejson": simplejson.loads,
"json": json.loads,
}
def read_fixture_bytes(filename, subdir=None):
if subdir is None:
parts = (dirname, filename)
else:
parts = (dirname, subdir, filename)
path = Path(*parts)
if path.suffix == ".xz":
contents = lzma.decompress(path.read_bytes())
else:
contents = path.read_bytes()
return contents
PARSING = {
filename: read_fixture_bytes(filename, "parsing")
for filename in os.listdir("data/parsing")
}
JSONCHECKER = {
filename: read_fixture_bytes(filename, "jsonchecker")
for filename in os.listdir("data/jsonchecker")
}
RESULTS = collections.defaultdict(dict)
def test_passed(libname, fixture):
passed = []
loads = LIBRARY_FUNC_MAP[libname]
try:
passed.append(loads(fixture) == orjson.loads(fixture))
passed.append(
loads(fixture.decode("utf-8")) == orjson.loads(fixture.decode("utf-8"))
)
except Exception:
passed.append(False)
return all(passed)
def test_failed(libname, fixture):
rejected_as_bytes = False
loads = LIBRARY_FUNC_MAP[libname]
try:
loads(fixture)
except Exception:
rejected_as_bytes = True
rejected_as_str = False
try:
loads(fixture.decode("utf-8"))
except Exception:
rejected_as_str = True
return rejected_as_bytes and rejected_as_str
MISTAKEN_PASSES = {key: 0 for key in LIBRARIES}
MISTAKEN_FAILS = {key: 0 for key in LIBRARIES}
PASS_WHITELIST = ("fail01.json", "fail18.json")
def should_pass(filename):
return (
filename.startswith("y_")
or filename.startswith("pass")
or filename in PASS_WHITELIST
)
def should_fail(filename):
return (
filename.startswith("n_")
or filename.startswith("i_string")
or filename.startswith("i_object")
or filename.startswith("fail")
) and filename not in PASS_WHITELIST
for libname in LIBRARIES:
for fixture_set in (PARSING, JSONCHECKER):
for filename, fixture in fixture_set.items():
if should_pass(filename):
res = test_passed(libname, fixture)
RESULTS[filename][libname] = res
if not res:
MISTAKEN_PASSES[libname] += 1
elif should_fail(filename):
res = test_failed(libname, fixture)
RESULTS[filename][libname] = res
if not res:
MISTAKEN_FAILS[libname] += 1
elif filename.startswith("i_"):
continue
else:
raise NotImplementedError
FILENAMES = sorted(list(PARSING.keys()) + list(JSONCHECKER.keys()))
tab_results = []
for filename in FILENAMES:
entry = [
filename,
]
for libname in LIBRARIES:
try:
entry.append("ok" if RESULTS[filename][libname] else "fail")
except KeyError:
continue
tab_results.append(entry)
buf = io.StringIO()
buf.write(tabulate(tab_results, ["Fixture"] + LIBRARIES, tablefmt="github"))
buf.write("\n")
print(buf.getvalue())
failure_results = [
[libname, MISTAKEN_FAILS[libname], MISTAKEN_PASSES[libname]]
for libname in LIBRARIES
]
buf = io.StringIO()
buf.write(
tabulate(
failure_results,
[
"Library",
"Invalid JSON documents not rejected",
"Valid JSON documents not deserialized",
],
tablefmt="github",
)
)
buf.write("\n")
print(buf.getvalue())
num_results = len([each for each in tab_results if len(each) > 1])
print(f"{num_results} documents tested")
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pymem | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import io
import subprocess
from tabulate import tabulate
buf = io.StringIO()
headers = ("Library", "import, read() RSS (MiB)", "loads() increase in RSS (MiB)")
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
FIXTURES = ("canada.json", "citm_catalog.json", "github.json", "twitter.json")
for fixture in sorted(FIXTURES, reverse=True):
table = []
buf.write("\n" + "#### " + fixture + "\n\n")
for lib_name in LIBRARIES:
proc = subprocess.Popen(
("bench/run_mem", f"data/{fixture}.xz", lib_name), stdout=subprocess.PIPE
)
output = proc.stdout.readline().decode("utf-8").strip().split(",")
mem_base = int(output[0]) / 1024 / 1024
mem_diff = int(output[1]) / 1024 / 1024
correct = bool(int(output[2]))
if correct:
table.append((lib_name, f"{mem_base:,.1f}", f"{mem_diff:,.1f}"))
else:
table.append((lib_name, "", ""))
buf.write(tabulate(table, headers, tablefmt="github") + "\n")
print(buf.getvalue())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pytest | #!/bin/sh -e
PYTHONMALLOC="debug" pytest -s test
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/graph | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import io
import math
import os
import pandas as pd
import seaborn
from matplotlib import pyplot
from tabulate import tabulate
import orjson
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
def aggregate():
benchmarks_dir = os.path.join(".benchmarks", os.listdir(".benchmarks")[0])
res = collections.defaultdict(dict)
for filename in os.listdir(benchmarks_dir):
with open(os.path.join(benchmarks_dir, filename), "r") as fileh:
data = orjson.loads(fileh.read())
for each in data["benchmarks"]:
res[each["group"]][each["extra_info"]["lib"]] = {
"data": [val * 1000 for val in each["stats"]["data"]],
"median": each["stats"]["median"] * 1000,
"ops": each["stats"]["ops"],
"correct": each["extra_info"]["correct"],
}
return res
def tab(obj):
buf = io.StringIO()
headers = (
"Library",
"Median latency (milliseconds)",
"Operations per second",
"Relative (latency)",
)
seaborn.set(rc={"figure.facecolor": (0, 0, 0, 0)})
seaborn.set_style("darkgrid")
barplot_data = []
for group, val in sorted(obj.items(), reverse=True):
buf.write("\n" + "#### " + group + "\n\n")
table = []
for lib in LIBRARIES:
correct = val[lib]["correct"]
table.append(
[
lib,
val[lib]["median"] if correct else None,
int(val[lib]["ops"]) if correct else None,
0,
]
)
barplot_data.append(
{
"operation": "deserialization"
if "deserialization" in group
else "serialization",
"group": group.strip("serialization")
.strip("deserialization")
.strip(),
"library": lib,
"latency": val[lib]["median"],
"operations": int(val[lib]["ops"]) if correct else None,
}
)
orjson_baseline = table[0][1]
for each in table:
each[3] = (
"%.1f" % (each[1] / orjson_baseline)
if isinstance(each[1], float)
else None
)
if group.startswith("github"):
each[1] = "%.2f" % each[1] if isinstance(each[1], float) else None
else:
each[1] = "%.1f" % each[1] if isinstance(each[1], float) else None
buf.write(tabulate(table, headers, tablefmt="github") + "\n")
for operation in ("deserialization", "serialization"):
per_op_data = list(
(each for each in barplot_data if each["operation"] == operation)
)
if not per_op_data:
continue
max_y = 0
json_baseline = {}
for each in per_op_data:
if each["group"] == "witter.json":
each["group"] = "twitter.json"
if each["library"] == "json":
json_baseline[each["group"]] = each["operations"]
for each in per_op_data:
relative = each["operations"] / json_baseline[each["group"]]
each["relative"] = relative
max_y = max(max_y, relative)
p = pd.DataFrame.from_dict(per_op_data)
p.groupby("group")
graph = seaborn.barplot(
p,
x="group",
y="relative",
orient="x",
hue="library",
errorbar="sd",
legend="brief",
)
graph.set_xlabel("Document")
graph.set_ylabel("Operations/second relative to stdlib json")
pyplot.title(operation)
# ensure Y range
max_y = int(math.ceil(max_y))
if max_y > 10 and max_y % 2 > 0:
max_y = max_y + 1
pyplot.gca().set_yticks(
list(
{1, max_y}.union(
set(int(y) for y in pyplot.gca().get_yticks() if int(y) <= max_y)
)
)
)
# print Y as percent
pyplot.gca().set_yticklabels([f"{x}x" for x in pyplot.gca().get_yticks()])
# reference for stdlib
pyplot.axhline(y=1, color="#999", linestyle="dashed")
pyplot.savefig(fname=f"doc/{operation}", dpi=300)
pyplot.close()
print(buf.getvalue())
tab(aggregate())
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/script/pybench-empty | #!/usr/bin/env bash
set -eou pipefail
pytest \
--verbose \
--benchmark-min-time=1 \
--benchmark-max-time=5 \
--benchmark-disable-gc \
--benchmark-autosave \
--benchmark-save-data \
--random-order \
-k orjson \
"bench/benchmark_empty.py"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/typestubs.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
import orjson
orjson.JSONDecodeError(msg="the_msg", doc="the_doc", pos=1)
orjson.dumps(orjson.Fragment(b"{}"))
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/init | #!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import multiprocessing.pool
import sys
import orjson
NUM_PROC = 16
TEST_MESSAGE = "parallel import of orjson running..."
class Custom:
pass
def default(_):
return None
def func(_):
orjson.dumps(Custom(), option=orjson.OPT_SERIALIZE_NUMPY, default=default)
orjson.loads(b'{"a":1,"b":2,"c":3}')
def main():
sys.stdout.write(TEST_MESSAGE)
sys.stdout.flush()
with multiprocessing.pool.ThreadPool(processes=NUM_PROC) as pool:
pool.map(func, (i for i in range(0, NUM_PROC)))
sys.stdout.write(f"\r{TEST_MESSAGE} ok\n")
sys.stdout.flush()
if __name__ == "__main__":
main()
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/requirements.txt | flask;sys_platform!="win"
gunicorn;sys_platform!="win"
httpx==0.24.1;sys_platform!="win"
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/wsgi.py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
from datetime import datetime, timezone
from uuid import uuid4
from flask import Flask
import orjson
app = Flask(__name__)
NOW = datetime.now(timezone.utc)
@app.route("/")
def root():
data = {
"uuid": uuid4(),
"updated_at": NOW,
"data": [1, 2.2, None, True, False, orjson.Fragment(b"{}")],
}
payload = orjson.dumps(
data, option=orjson.OPT_NAIVE_UTC | orjson.OPT_OMIT_MICROSECONDS
)
return app.response_class(
response=payload,
status=200,
mimetype="application/json; charset=utf-8",
)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/run | #!/usr/bin/env bash
set -eou pipefail
_dir="$(dirname "${BASH_SOURCE[0]}")"
to_run="${@:-thread http init}"
export PYTHONMALLOC="debug"
if [[ $to_run == *"thread"* ]]; then
"${_dir}"/thread
fi
if [[ $to_run == *"http"* ]]; then
"${_dir}"/http --daemon
sleep 2
"${_dir}"/client 8001
set +e
pkill -f 'wsgi:app' # pkill not present on all CI envs
set -e
fi
if [[ $to_run == *"typestubs"* ]]; then
python "${_dir}"/typestubs.py
mypy "${_dir}"/typestubs.py
fi
if [[ $to_run == *"init"* ]]; then
"${_dir}"/init
fi
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/http | #!/usr/bin/env bash
set -e
_dir="$(dirname "${BASH_SOURCE[0]}")"
PYTHONPATH=${_dir} gunicorn --preload --bind localhost:8001 --workers 2 "$@" wsgi:app
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/thread | #!/usr/bin/env python3
import sys
import traceback
from concurrent.futures import ThreadPoolExecutor
from operator import itemgetter
from threading import get_ident
import orjson
DATA = sorted(
[
{
"id": i,
"name": "90as90ji0123ioj2390as90as90",
"body": "哈哈89asu89as😊89as9as90jas-😋0apjzxiojzx89hq23n",
"score": 901290129.1,
"bool": True,
"int": 9832,
"none": None,
}
for i in range(10)
],
key=itemgetter("id"),
)
STATUS = 0
TEST_MESSAGE = "thread test running..."
sys.stdout.write(TEST_MESSAGE)
sys.stdout.flush()
def test_func(n):
try:
assert sorted(orjson.loads(orjson.dumps(DATA)), key=itemgetter("id")) == DATA
except Exception:
traceback.print_exc()
print("thread %s: %s dumps, loads ERROR" % (get_ident(), n))
with ThreadPoolExecutor(max_workers=4) as executor:
executor.map(test_func, range(50000), chunksize=1000)
executor.shutdown(wait=True)
if STATUS == 0:
sys.stdout.write(f"\r{TEST_MESSAGE} ok\n")
else:
sys.stdout.write(f"\r{TEST_MESSAGE} error\n")
sys.exit(STATUS)
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson | lc_public_repos/langsmith-sdk/vendor/orjson/integration/client | #!/usr/bin/env python3
import asyncio
import sys
import time
import httpx
port = sys.argv[1]
url = f"http://127.0.0.1:{port}"
timeout = httpx.Timeout(5.0)
client = httpx.AsyncClient(timeout=timeout)
stop_time = time.time() + 5
TEST_MESSAGE = "http test running..."
async def main():
sys.stdout.write(TEST_MESSAGE)
sys.stdout.flush()
count = 0
while time.time() < stop_time:
res = await client.get(url)
count += 1
sys.stdout.write(f"\r{TEST_MESSAGE} ok, {count} requests made\n")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.run(main())
loop.close()
|
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail13.json | {"Numbers cannot have leading zeroes": 013} |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail28.json | ["line\
break"] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail26.json | ["tab\ character\ in\ string\ "] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail27.json | ["line
break"] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail31.json | [0e+-1] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail18.json | [[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail08.json | ["Extra close"]] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail19.json | {"Missing colon" null} |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail25.json | [" tab character in string "] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail15.json | ["Illegal backslash escape: \x15"] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail30.json | [0e+] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail32.json | {"Comma instead if closing brace": true, |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail10.json | {"Extra value after close": true} "misplaced quoted value" |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail14.json | {"Numbers cannot be hex": 0x14} |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail07.json | ["Comma after the close"], |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail01.json | "A JSON payload should be an object or array, not a string." |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail09.json | {"Extra comma": true,} |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail23.json | ["Bad value", truth] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail17.json | ["Illegal backslash escape: \017"] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail22.json | ["Colon instead of comma": false] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/pass02.json | [[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail29.json | [0e] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail04.json | ["extra comma",] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/pass01.json | [
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"0123456789": "digit",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1,
1e-1,
1e00,2e+00,2e-00
,"rosebud"] |
0 | lc_public_repos/langsmith-sdk/vendor/orjson/data | lc_public_repos/langsmith-sdk/vendor/orjson/data/jsonchecker/fail16.json | [\naked] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.