language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | wandb__wandb | wandb/sdk/data_types/saved_model.py | {
"start": 10981,
"end": 13616
} | class ____(_SavedModel[SavedModelObjType]):
_dep_py_files: list[str] | None = None
_dep_py_files_path: str | None = None
def __init__(
self,
obj_or_path: SavedModelObjType | str | pathlib.Path,
dep_py_files: list[str] | None = None,
):
super().__init__(obj_or_path)
if self.__class__ == _PicklingSavedModel:
raise TypeError(
"Cannot instantiate abstract _PicklingSavedModel class - please use SavedModel.init(...) instead."
)
if dep_py_files is not None and len(dep_py_files) > 0:
self._dep_py_files = dep_py_files
self._dep_py_files_path = os.path.abspath(
os.path.join(MEDIA_TMP.name, runid.generate_id())
)
os.makedirs(self._dep_py_files_path, exist_ok=True)
for extra_file in self._dep_py_files:
if os.path.isfile(extra_file):
shutil.copy(extra_file, self._dep_py_files_path)
elif os.path.isdir(extra_file):
shutil.copytree(
extra_file,
os.path.join(
self._dep_py_files_path, os.path.basename(extra_file)
),
)
else:
raise ValueError(f"Invalid dependency file: {extra_file}")
@classmethod
def from_json(cls, json_obj: dict, source_artifact: Artifact) -> Self:
backup_path = [p for p in sys.path]
if (
"dep_py_files_path" in json_obj
and json_obj["dep_py_files_path"] is not None
):
dl_path = _load_dir_from_artifact(
source_artifact, json_obj["dep_py_files_path"]
)
assert dl_path is not None
sys.path.append(dl_path)
inst = super().from_json(json_obj, source_artifact) # type: ignore
sys.path = backup_path
return inst # type: ignore
def to_json(self, run_or_artifact: wandb.Run | Artifact) -> dict:
json_obj = super().to_json(run_or_artifact)
assert isinstance(run_or_artifact, wandb.Artifact)
if self._dep_py_files_path is not None:
json_obj["dep_py_files_path"] = _add_deterministic_dir_to_artifact(
run_or_artifact,
self._dep_py_files_path,
os.path.join(".wb_data", "extra_files"),
)
return json_obj
def _get_torch() -> ModuleType:
return cast(
ModuleType,
util.get_module("torch", "ModelAdapter requires `torch`"),
)
| _PicklingSavedModel |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 7522,
"end": 7881
} | class ____(resource.Resource):
def render(self, request):
from twisted.internet import reactor
def response():
request.setHeader(b"Content-Length", b"20")
request.write(b"partial")
close_connection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
| BrokenDownloadResource |
python | langchain-ai__langchain | libs/core/tests/unit_tests/runnables/test_runnable.py | {
"start": 6680,
"end": 121947
} | class ____(BaseRetriever):
@override
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
@override
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
@pytest.mark.skipif(
PYDANTIC_VERSION_AT_LEAST_210,
reason=(
"Only test with most recent version of pydantic. "
"Pydantic introduced small fixes to generated JSONSchema on minor versions."
),
)
def test_schemas(snapshot: SnapshotAssertion) -> None:
fake = FakeRunnable() # str -> int
assert fake.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
assert fake.get_config_jsonschema(include=["tags", "metadata", "run_name"]) == {
"properties": {
"metadata": {
"default": None,
"title": "Metadata",
"type": "object",
},
"run_name": {"default": None, "title": "Run Name", "type": "string"},
"tags": {
"default": None,
"items": {"type": "string"},
"title": "Tags",
"type": "array",
},
},
"title": "FakeRunnableConfig",
"type": "object",
}
fake_bound = FakeRunnable().bind(a="b") # str -> int
assert fake_bound.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_bound.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,)) # str -> int
assert fake_w_fallbacks.get_input_jsonschema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_w_fallbacks.get_output_jsonschema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
def typed_lambda_impl(x: str) -> int:
return len(x)
typed_lambda = RunnableLambda(typed_lambda_impl) # str -> int
assert typed_lambda.get_input_jsonschema() == {
"title": "typed_lambda_impl_input",
"type": "string",
}
assert typed_lambda.get_output_jsonschema() == {
"title": "typed_lambda_impl_output",
"type": "integer",
}
async def typed_async_lambda_impl(x: str) -> int:
return len(x)
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl) # str -> int
assert typed_async_lambda.get_input_jsonschema() == {
"title": "typed_async_lambda_impl_input",
"type": "string",
}
assert typed_async_lambda.get_output_jsonschema() == {
"title": "typed_async_lambda_impl_output",
"type": "integer",
}
fake_ret = FakeRetriever() # str -> list[Document]
assert fake_ret.get_input_jsonschema() == {
"title": "FakeRetrieverInput",
"type": "string",
}
assert _normalize_schema(fake_ret.get_output_jsonschema()) == {
"$defs": {
"Document": {
"description": "Class for storing a piece of text and "
"associated metadata.\n"
"\n"
"!!! note\n"
" `Document` is for **retrieval workflows**, not chat I/O. For "
"sending text\n"
" to an LLM in a conversation, use message types from "
"`langchain.messages`.\n"
"\n"
"Example:\n"
" ```python\n"
" from langchain_core.documents import Document\n"
"\n"
" document = Document(\n"
' page_content="Hello, world!", '
'metadata={"source": "https://example.com"}\n'
" )\n"
" ```",
"properties": {
"id": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Id",
},
"metadata": {"title": "Metadata", "type": "object"},
"page_content": {"title": "Page Content", "type": "string"},
"type": {
"const": "Document",
"default": "Document",
"title": "Type",
},
},
"required": ["page_content"],
"title": "Document",
"type": "object",
}
},
"items": {"$ref": "#/$defs/Document"},
"title": "FakeRetrieverOutput",
"type": "array",
}
fake_llm = FakeListLLM(responses=["a"]) # str -> list[list[str]]
assert _schema(fake_llm.input_schema) == snapshot(name="fake_llm_input_schema")
assert _schema(fake_llm.output_schema) == {
"title": "FakeListLLMOutput",
"type": "string",
}
fake_chat = FakeListChatModel(responses=["a"]) # str -> list[list[str]]
assert _schema(fake_chat.input_schema) == snapshot(name="fake_chat_input_schema")
assert _schema(fake_chat.output_schema) == snapshot(name="fake_chat_output_schema")
chat_prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="history"),
("human", "Hello, how are you?"),
]
)
assert _normalize_schema(chat_prompt.get_input_jsonschema()) == snapshot(
name="chat_prompt_input_schema"
)
assert _normalize_schema(chat_prompt.get_output_jsonschema()) == snapshot(
name="chat_prompt_output_schema"
)
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert _schema(prompt.output_schema) == snapshot(name="prompt_output_schema")
prompt_mapper = PromptTemplate.from_template("Hello, {name}!").map()
assert _normalize_schema(prompt_mapper.get_input_jsonschema()) == {
"$defs": {
"PromptInput": {
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
"title": "PromptInput",
"type": "object",
}
},
"default": None,
"items": {"$ref": "#/$defs/PromptInput"},
"title": "RunnableEach<PromptTemplate>Input",
"type": "array",
}
assert _schema(prompt_mapper.output_schema) == snapshot(
name="prompt_mapper_output_schema"
)
list_parser = CommaSeparatedListOutputParser()
assert _schema(list_parser.input_schema) == snapshot(
name="list_parser_input_schema"
)
assert _schema(list_parser.output_schema) == {
"title": "CommaSeparatedListOutputParserOutput",
"type": "array",
"items": {"type": "string"},
}
seq = prompt | fake_llm | list_parser
assert seq.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert seq.get_output_jsonschema() == {
"type": "array",
"items": {"type": "string"},
"title": "CommaSeparatedListOutputParserOutput",
}
router: Runnable = RouterRunnable({})
assert _schema(router.input_schema) == {
"$ref": "#/definitions/RouterInput",
"definitions": {
"RouterInput": {
"description": "Router input.",
"properties": {
"input": {"title": "Input"},
"key": {"title": "Key", "type": "string"},
},
"required": ["key", "input"],
"title": "RouterInput",
"type": "object",
}
},
"title": "RouterRunnableInput",
}
assert router.get_output_jsonschema() == {"title": "RouterRunnableOutput"}
seq_w_map: Runnable = (
prompt
| fake_llm
| {
"original": RunnablePassthrough(input_type=str),
"as_list": list_parser,
"length": typed_lambda_impl,
}
)
assert seq_w_map.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
}
assert seq_w_map.get_output_jsonschema() == {
"title": "RunnableParallel<original,as_list,length>Output",
"type": "object",
"properties": {
"original": {"title": "Original", "type": "string"},
"length": {"title": "Length", "type": "integer"},
"as_list": {
"title": "As List",
"type": "array",
"items": {"type": "string"},
},
},
"required": ["original", "as_list", "length"],
}
# Add a test for schema of runnable assign
def foo(x: int) -> int:
return x
foo_ = RunnableLambda(foo)
assert foo_.assign(bar=lambda _: "foo").get_output_schema().model_json_schema() == {
"properties": {"bar": {"title": "Bar"}, "root": {"title": "Root"}},
"required": ["root", "bar"],
"title": "RunnableAssignOutput",
"type": "object",
}
def test_passthrough_assign_schema() -> None:
retriever = FakeRetriever() # str -> list[Document]
prompt = PromptTemplate.from_template("{context} {question}")
fake_llm = FakeListLLM(responses=["a"]) # str -> list[list[str]]
seq_w_assign: Runnable = (
RunnablePassthrough.assign(context=itemgetter("question") | retriever)
| prompt
| fake_llm
)
assert seq_w_assign.get_input_jsonschema() == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "RunnableSequenceInput",
"type": "object",
"required": ["question"],
}
assert seq_w_assign.get_output_jsonschema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
invalid_seq_w_assign: Runnable = (
RunnablePassthrough.assign(context=itemgetter("question") | retriever)
| fake_llm
)
# fallback to RunnableAssign.input_schema if next runnable doesn't have
# expected dict input_schema
assert invalid_seq_w_assign.get_input_jsonschema() == {
"properties": {"question": {"title": "Question"}},
"title": "RunnableParallel<context>Input",
"type": "object",
"required": ["question"],
}
def test_lambda_schemas(snapshot: SnapshotAssertion) -> None:
first_lambda = lambda x: x["hello"] # noqa: E731
assert RunnableLambda(first_lambda).get_input_jsonschema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}},
"required": ["hello"],
}
second_lambda = lambda x, y: (x["hello"], x["bye"], y["bah"]) # noqa: E731
assert RunnableLambda(second_lambda).get_input_jsonschema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}, "bye": {"title": "Bye"}},
"required": ["bye", "hello"],
}
def get_value(value): # type: ignore[no-untyped-def] # noqa: ANN001,ANN202
return value["variable_name"]
assert RunnableLambda(get_value).get_input_jsonschema() == {
"title": "get_value_input",
"type": "object",
"properties": {"variable_name": {"title": "Variable Name"}},
"required": ["variable_name"],
}
async def aget_value(value): # type: ignore[no-untyped-def] # noqa: ANN001,ANN202
return (value["variable_name"], value.get("another"))
assert RunnableLambda(aget_value).get_input_jsonschema() == {
"title": "aget_value_input",
"type": "object",
"properties": {
"another": {"title": "Another"},
"variable_name": {"title": "Variable Name"},
},
"required": ["another", "variable_name"],
}
async def aget_values(value): # type: ignore[no-untyped-def] # noqa: ANN001,ANN202
return {
"hello": value["variable_name"],
"bye": value["variable_name"],
"byebye": value["yo"],
}
assert RunnableLambda(aget_values).get_input_jsonschema() == {
"title": "aget_values_input",
"type": "object",
"properties": {
"variable_name": {"title": "Variable Name"},
"yo": {"title": "Yo"},
},
"required": ["variable_name", "yo"],
}
class InputType(TypedDict):
variable_name: str
yo: int
class OutputType(TypedDict):
hello: str
bye: str
byebye: int
async def aget_values_typed(value: InputType) -> OutputType:
return {
"hello": value["variable_name"],
"bye": value["variable_name"],
"byebye": value["yo"],
}
assert _normalize_schema(
RunnableLambda(
aget_values_typed # type: ignore[arg-type]
).get_input_jsonschema()
) == _normalize_schema(
{
"$defs": {
"InputType": {
"properties": {
"variable_name": {
"title": "Variable Name",
"type": "string",
},
"yo": {"title": "Yo", "type": "integer"},
},
"required": ["variable_name", "yo"],
"title": "InputType",
"type": "object",
}
},
"allOf": [{"$ref": "#/$defs/InputType"}],
"title": "aget_values_typed_input",
}
)
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
RunnableLambda(aget_values_typed).get_output_jsonschema() # type: ignore[arg-type]
) == snapshot(name="schema8")
def test_with_types_with_type_generics() -> None:
"""Verify that with_types works if we use things like list[int]."""
def foo(x: int) -> None:
"""Add one to the input."""
raise NotImplementedError
# Try specifying some
RunnableLambda(foo).with_types(
output_type=list[int], # type: ignore[arg-type]
input_type=list[int], # type: ignore[arg-type]
)
RunnableLambda(foo).with_types(
output_type=Sequence[int], # type: ignore[arg-type]
input_type=Sequence[int], # type: ignore[arg-type]
)
def test_schema_with_itemgetter() -> None:
"""Test runnable with itemgetter."""
foo: Runnable = RunnableLambda(itemgetter("hello"))
assert _schema(foo.input_schema) == {
"properties": {"hello": {"title": "Hello"}},
"required": ["hello"],
"title": "RunnableLambdaInput",
"type": "object",
}
prompt = ChatPromptTemplate.from_template("what is {language}?")
chain: Runnable = {"language": itemgetter("language")} | prompt
assert _schema(chain.input_schema) == {
"properties": {"language": {"title": "Language"}},
"required": ["language"],
"title": "RunnableParallel<language>Input",
"type": "object",
}
def test_schema_complex_seq() -> None:
prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?")
prompt2 = ChatPromptTemplate.from_template(
"what country is the city {city} in? respond in {language}"
)
model = FakeListChatModel(responses=[""])
chain1: Runnable = RunnableSequence(
prompt1, model, StrOutputParser(), name="city_chain"
)
assert chain1.name == "city_chain"
chain2: Runnable = (
{"city": chain1, "language": itemgetter("language")}
| prompt2
| model
| StrOutputParser()
)
assert chain2.get_input_jsonschema() == {
"title": "RunnableParallel<city,language>Input",
"type": "object",
"properties": {
"person": {"title": "Person", "type": "string"},
"language": {"title": "Language"},
},
"required": ["person", "language"],
}
assert chain2.get_output_jsonschema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
assert chain2.with_types(input_type=str).get_input_jsonschema() == {
"title": "RunnableSequenceInput",
"type": "string",
}
assert chain2.with_types(input_type=int).get_output_jsonschema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
class InputType(BaseModel):
person: str
assert chain2.with_types(input_type=InputType).get_input_jsonschema() == {
"title": "InputType",
"type": "object",
"properties": {"person": {"title": "Person", "type": "string"}},
"required": ["person"],
}
def test_configurable_fields(snapshot: SnapshotAssertion) -> None:
fake_llm = FakeListLLM(responses=["a"]) # str -> list[list[str]]
assert fake_llm.invoke("...") == "a"
fake_llm_configurable = fake_llm.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
assert fake_llm_configurable.invoke("...") == "a"
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
fake_llm_configurable.get_config_jsonschema()
) == snapshot(name="schema2")
fake_llm_configured = fake_llm_configurable.with_config(
configurable={"llm_responses": ["b"]}
)
assert fake_llm_configured.invoke("...") == "b"
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.invoke({"name": "John"}) == StringPromptValue(text="Hello, John!")
prompt_configurable = prompt.configurable_fields(
template=ConfigurableField(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
)
)
assert prompt_configurable.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John!"
)
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
prompt_configurable.get_config_jsonschema()
) == snapshot(name="schema3")
prompt_configured = prompt_configurable.with_config(
configurable={"prompt_template": "Hello, {name}! {name}!"}
)
assert prompt_configured.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John! John!"
)
assert prompt_configurable.with_config(
configurable={"prompt_template": "Hello {name} in {lang}"}
).get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
"required": ["lang", "name"],
}
chain_configurable = prompt_configurable | fake_llm_configurable | StrOutputParser()
assert chain_configurable.invoke({"name": "John"}) == "a"
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
chain_configurable.get_config_jsonschema()
) == snapshot(name="schema4")
assert (
chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).invoke({"name": "John", "lang": "en"})
== "c"
)
assert chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
"required": ["lang", "name"],
}
chain_with_map_configurable: Runnable = prompt_configurable | {
"llm1": fake_llm_configurable | StrOutputParser(),
"llm2": fake_llm_configurable | StrOutputParser(),
"llm3": fake_llm.configurable_fields(
responses=ConfigurableField("other_responses")
)
| StrOutputParser(),
}
assert chain_with_map_configurable.invoke({"name": "John"}) == {
"llm1": "a",
"llm2": "a",
"llm3": "a",
}
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
chain_with_map_configurable.get_config_jsonschema()
) == snapshot(name="schema5")
assert chain_with_map_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name}!",
"llm_responses": ["c"],
"other_responses": ["d"],
}
).invoke({"name": "John"}) == {"llm1": "c", "llm2": "c", "llm3": "d"}
def test_configurable_alts_factory() -> None:
fake_llm = FakeListLLM(responses=["a"]).configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=partial(FakeListLLM, responses=["b"]),
)
assert fake_llm.invoke("...") == "a"
assert fake_llm.with_config(configurable={"llm": "chat"}).invoke("...") == "b"
def test_configurable_fields_prefix_keys(snapshot: SnapshotAssertion) -> None:
fake_chat = FakeListChatModel(responses=["b"]).configurable_fields(
responses=ConfigurableFieldMultiOption(
id="responses",
name="Chat Responses",
options={
"hello": "A good morning to you!",
"bye": "See you later!",
"helpful": "How can I help you?",
},
default=["hello", "bye"],
),
# (sleep is a configurable field in FakeListChatModel)
sleep=ConfigurableField(
id="chat_sleep",
is_shared=True,
),
)
fake_llm = (
FakeListLLM(responses=["a"])
.configurable_fields(
responses=ConfigurableField(
id="responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
.configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=fake_chat | StrOutputParser(),
prefix_keys=True,
)
)
prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields(
template=ConfigurableFieldSingleOption(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
options={
"hello": "Hello, {name}!",
"good_morning": "A very good morning to you, {name}!",
},
default="hello",
)
)
chain = prompt | fake_llm
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(_schema(chain.config_schema())) == snapshot(
name="schema6"
)
def test_configurable_fields_example(snapshot: SnapshotAssertion) -> None:
fake_chat = FakeListChatModel(responses=["b"]).configurable_fields(
responses=ConfigurableFieldMultiOption(
id="chat_responses",
name="Chat Responses",
options={
"hello": "A good morning to you!",
"bye": "See you later!",
"helpful": "How can I help you?",
},
default=["hello", "bye"],
)
)
fake_llm = (
FakeListLLM(responses=["a"])
.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
.configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=fake_chat | StrOutputParser(),
)
)
prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields(
template=ConfigurableFieldSingleOption(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
options={
"hello": "Hello, {name}!",
"good_morning": "A very good morning to you, {name}!",
},
default="hello",
)
)
# deduplication of configurable fields
chain_configurable = prompt | fake_llm | (lambda x: {"name": x}) | prompt | fake_llm
assert chain_configurable.invoke({"name": "John"}) == "a"
if PYDANTIC_VERSION_AT_LEAST_29:
assert _normalize_schema(
chain_configurable.get_config_jsonschema()
) == snapshot(name="schema7")
assert (
chain_configurable.with_config(configurable={"llm": "chat"}).invoke(
{"name": "John"}
)
== "A good morning to you!"
)
assert (
chain_configurable.with_config(
configurable={"llm": "chat", "chat_responses": ["helpful"]}
).invoke({"name": "John"})
== "How can I help you?"
)
def test_passthrough_tap(mocker: MockerFixture) -> None:
fake = FakeRunnable()
mock = mocker.Mock()
seq: Runnable = RunnablePassthrough(mock) | fake | RunnablePassthrough(mock)
assert seq.invoke("hello", my_kwarg="value") == 5
assert mock.call_args_list == [
mocker.call("hello", my_kwarg="value"),
mocker.call(5),
]
mock.reset_mock()
assert seq.batch(["hello", "byebye"], my_kwarg="value") == [5, 6]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert seq.batch(["hello", "byebye"], my_kwarg="value", return_exceptions=True) == [
5,
6,
]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert sorted(
a
for a in seq.batch_as_completed(
["hello", "byebye"], my_kwarg="value", return_exceptions=True
)
) == [
(0, 5),
(1, 6),
]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert list(
seq.stream("hello", {"metadata": {"key": "value"}}, my_kwarg="value")
) == [5]
assert mock.call_args_list == [
mocker.call("hello", my_kwarg="value"),
mocker.call(5),
]
mock.reset_mock()
async def test_passthrough_tap_async(mocker: MockerFixture) -> None:
fake = FakeRunnable()
mock = mocker.Mock()
seq: Runnable = RunnablePassthrough(mock) | fake | RunnablePassthrough(mock)
assert await seq.ainvoke("hello", my_kwarg="value") == 5
assert mock.call_args_list == [
mocker.call("hello", my_kwarg="value"),
mocker.call(5),
]
mock.reset_mock()
assert await seq.abatch(["hello", "byebye"], my_kwarg="value") == [5, 6]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert await seq.abatch(
["hello", "byebye"], my_kwarg="value", return_exceptions=True
) == [
5,
6,
]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert sorted(
[
a
async for a in seq.abatch_as_completed(
["hello", "byebye"], my_kwarg="value", return_exceptions=True
)
]
) == [
(0, 5),
(1, 6),
]
assert len(mock.call_args_list) == 4
for call in [
mocker.call("hello", my_kwarg="value"),
mocker.call("byebye", my_kwarg="value"),
mocker.call(5),
mocker.call(6),
]:
assert call in mock.call_args_list
mock.reset_mock()
assert [
part
async for part in seq.astream(
"hello", {"metadata": {"key": "value"}}, my_kwarg="value"
)
] == [5]
assert mock.call_args_list == [
mocker.call("hello", my_kwarg="value"),
mocker.call(5),
]
async def test_with_config_metadata_passthrough(mocker: MockerFixture) -> None:
fake = FakeRunnableSerializable()
spy = mocker.spy(fake.__class__, "invoke")
fakew = fake.configurable_fields(hello=ConfigurableField(id="hello", name="Hello"))
assert (
fakew.with_config(tags=["a-tag"]).invoke(
"hello",
{
"configurable": {"hello": "there", "__secret_key": "nahnah"},
"metadata": {"bye": "now"},
},
)
== 5
)
assert spy.call_args_list[0].args[1:] == (
"hello",
{
"tags": ["a-tag"],
"callbacks": None,
"recursion_limit": 25,
"configurable": {"hello": "there", "__secret_key": "nahnah"},
"metadata": {"hello": "there", "bye": "now"},
},
)
spy.reset_mock()
def test_with_config(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.with_config(tags=["a-tag"]).invoke("hello") == 5
assert spy.call_args_list == [
mocker.call(
"hello",
{"tags": ["a-tag"], "metadata": {}, "configurable": {}},
),
]
spy.reset_mock()
fake_1: Runnable = RunnablePassthrough()
fake_2: Runnable = RunnablePassthrough()
spy_seq_step = mocker.spy(fake_1.__class__, "invoke")
sequence = fake_1.with_config(tags=["a-tag"]) | fake_2.with_config(
tags=["b-tag"], max_concurrency=5
)
assert sequence.invoke("hello") == "hello"
assert len(spy_seq_step.call_args_list) == 2
for i, call in enumerate(spy_seq_step.call_args_list):
assert call.args[1] == "hello"
if i == 0:
assert call.args[2].get("tags") == ["a-tag"]
assert call.args[2].get("max_concurrency") is None
else:
assert call.args[2].get("tags") == ["b-tag"]
assert call.args[2].get("max_concurrency") == 5
mocker.stop(spy_seq_step)
assert [
*fake.with_config(tags=["a-tag"]).stream(
"hello", {"metadata": {"key": "value"}}
)
] == [5]
assert spy.call_args_list == [
mocker.call(
"hello",
{"tags": ["a-tag"], "metadata": {"key": "value"}, "configurable": {}},
),
]
spy.reset_mock()
assert fake.with_config(recursion_limit=5).batch(
["hello", "wooorld"], [{"tags": ["a-tag"]}, {"metadata": {"key": "value"}}]
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(
sorted(spy.call_args_list, key=lambda x: 0 if x.args[0] == "hello" else 1)
):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert sorted(
c
for c in fake.with_config(recursion_limit=5).batch_as_completed(
["hello", "wooorld"],
[{"tags": ["a-tag"]}, {"metadata": {"key": "value"}}],
)
) == [(0, 5), (1, 7)]
assert len(spy.call_args_list) == 2
for i, call in enumerate(
sorted(spy.call_args_list, key=lambda x: 0 if x.args[0] == "hello" else 1)
):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.with_config(metadata={"a": "b"}).batch(
["hello", "wooorld"], {"tags": ["a-tag"]}
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {"a": "b"}
spy.reset_mock()
assert sorted(
c for c in fake.batch_as_completed(["hello", "wooorld"], {"tags": ["a-tag"]})
) == [(0, 5), (1, 7)]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
async def test_with_config_async(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
handler = ConsoleCallbackHandler()
assert (
await fake.with_config(metadata={"a": "b"}).ainvoke(
"hello", config={"callbacks": [handler]}
)
== 5
)
assert spy.call_args_list == [
mocker.call(
"hello",
{
"callbacks": [handler],
"metadata": {"a": "b"},
"configurable": {},
"tags": [],
},
),
]
spy.reset_mock()
assert [
part async for part in fake.with_config(metadata={"a": "b"}).astream("hello")
] == [5]
assert spy.call_args_list == [
mocker.call("hello", {"metadata": {"a": "b"}, "tags": [], "configurable": {}}),
]
spy.reset_mock()
assert await fake.with_config(recursion_limit=5, tags=["c"]).abatch(
["hello", "wooorld"], {"metadata": {"key": "value"}}
) == [
5,
7,
]
assert sorted(spy.call_args_list) == [
mocker.call(
"hello",
{
"metadata": {"key": "value"},
"tags": ["c"],
"callbacks": None,
"recursion_limit": 5,
"configurable": {},
},
),
mocker.call(
"wooorld",
{
"metadata": {"key": "value"},
"tags": ["c"],
"callbacks": None,
"recursion_limit": 5,
"configurable": {},
},
),
]
spy.reset_mock()
assert sorted(
[
c
async for c in fake.with_config(
recursion_limit=5, tags=["c"]
).abatch_as_completed(["hello", "wooorld"], {"metadata": {"key": "value"}})
]
) == [
(0, 5),
(1, 7),
]
assert len(spy.call_args_list) == 2
first_call = next(call for call in spy.call_args_list if call.args[0] == "hello")
assert first_call == mocker.call(
"hello",
{
"metadata": {"key": "value"},
"tags": ["c"],
"callbacks": None,
"recursion_limit": 5,
"configurable": {},
},
)
second_call = next(call for call in spy.call_args_list if call.args[0] == "wooorld")
assert second_call == mocker.call(
"wooorld",
{
"metadata": {"key": "value"},
"tags": ["c"],
"callbacks": None,
"recursion_limit": 5,
"configurable": {},
},
)
def test_default_method_implementations(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.invoke("hello", {"tags": ["a-tag"]}) == 5
assert spy.call_args_list == [
mocker.call("hello", {"tags": ["a-tag"]}),
]
spy.reset_mock()
assert [*fake.stream("hello", {"metadata": {"key": "value"}})] == [5]
assert spy.call_args_list == [
mocker.call("hello", {"metadata": {"key": "value"}}),
]
spy.reset_mock()
assert fake.batch(
["hello", "wooorld"], [{"tags": ["a-tag"]}, {"metadata": {"key": "value"}}]
) == [5, 7]
assert len(spy.call_args_list) == 2
for call in spy.call_args_list:
call_arg = call.args[0]
if call_arg == "hello":
assert call_arg == "hello"
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call_arg == "wooorld"
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.batch(["hello", "wooorld"], {"tags": ["a-tag"]}) == [5, 7]
assert len(spy.call_args_list) == 2
assert {call.args[0] for call in spy.call_args_list} == {"hello", "wooorld"}
for call in spy.call_args_list:
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
async def test_default_method_implementations_async(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert await fake.ainvoke("hello", config={"callbacks": []}) == 5
assert spy.call_args_list == [
mocker.call("hello", {"callbacks": []}),
]
spy.reset_mock()
assert [part async for part in fake.astream("hello")] == [5]
assert spy.call_args_list == [
mocker.call("hello", None),
]
spy.reset_mock()
assert await fake.abatch(["hello", "wooorld"], {"metadata": {"key": "value"}}) == [
5,
7,
]
assert {call.args[0] for call in spy.call_args_list} == {"hello", "wooorld"}
for call in spy.call_args_list:
assert call.args[1] == {
"metadata": {"key": "value"},
"tags": [],
"callbacks": None,
"recursion_limit": 25,
"configurable": {},
}
def test_prompt() -> None:
prompt = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
expected = ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert prompt.invoke({"question": "What is your name?"}) == expected
assert prompt.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [*prompt.stream({"question": "What is your name?"})] == [expected]
async def test_prompt_async() -> None:
prompt = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
expected = ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert await prompt.ainvoke({"question": "What is your name?"}) == expected
assert await prompt.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [
part async for part in prompt.astream({"question": "What is your name?"})
] == [expected]
stream_log = [
part async for part in prompt.astream_log({"question": "What is your name?"})
]
assert len(stream_log[0].ops) == 1
assert stream_log[0].ops[0]["op"] == "replace"
assert stream_log[0].ops[0]["path"] == ""
assert stream_log[0].ops[0]["value"]["logs"] == {}
assert stream_log[0].ops[0]["value"]["final_output"] is None
assert stream_log[0].ops[0]["value"]["streamed_output"] == []
assert isinstance(stream_log[0].ops[0]["value"]["id"], str)
assert stream_log[1:] == [
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": expected},
{
"op": "replace",
"path": "/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
),
]
stream_log_state = [
part
async for part in prompt.astream_log(
{"question": "What is your name?"}, diff=False
)
]
# remove random id
stream_log[0].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].state["id"] = "00000000-0000-0000-0000-000000000000"
# assert output with diff=False matches output with diff=True
assert stream_log_state[-1].ops == [op for chunk in stream_log for op in chunk.ops]
assert stream_log_state[-1] == RunLog(
*[op for chunk in stream_log for op in chunk.ops],
state={
"final_output": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
"id": "00000000-0000-0000-0000-000000000000",
"logs": {},
"streamed_output": [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
],
"type": "prompt",
"name": "ChatPromptTemplate",
},
)
# nested inside trace_with_chain_group
async with atrace_as_chain_group("a_group") as manager:
stream_log_nested = [
part
async for part in prompt.astream_log(
{"question": "What is your name?"}, config={"callbacks": manager}
)
]
assert len(stream_log_nested[0].ops) == 1
assert stream_log_nested[0].ops[0]["op"] == "replace"
assert stream_log_nested[0].ops[0]["path"] == ""
assert stream_log_nested[0].ops[0]["value"]["logs"] == {}
assert stream_log_nested[0].ops[0]["value"]["final_output"] is None
assert stream_log_nested[0].ops[0]["value"]["streamed_output"] == []
assert isinstance(stream_log_nested[0].ops[0]["value"]["id"], str)
assert stream_log_nested[1:] == [
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": expected},
{
"op": "replace",
"path": "/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
),
]
def test_prompt_template_params() -> None:
prompt = ChatPromptTemplate.from_template(
"Respond to the following question: {question}"
)
result = prompt.invoke(
{
"question": "test",
"topic": "test",
}
)
assert result == ChatPromptValue(
messages=[HumanMessage(content="Respond to the following question: test")]
)
with pytest.raises(KeyError):
prompt.invoke({})
def test_with_listeners(mocker: MockerFixture) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
mock_start = mocker.Mock()
mock_end = mocker.Mock()
chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
with trace_as_chain_group("hello") as manager:
chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke(
{"question": "Who are you?"}, {"callbacks": manager}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
async def test_with_listeners_async(mocker: MockerFixture) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
mock_start = mocker.Mock()
mock_end = mocker.Mock()
await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
async with atrace_as_chain_group("hello") as manager:
await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke(
{"question": "Who are you?"}, {"callbacks": manager}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
def test_with_listener_propagation(mocker: MockerFixture) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
mock_start = mocker.Mock()
mock_end = mocker.Mock()
chain_with_listeners = chain.with_listeners(on_start=mock_start, on_end=mock_end)
chain_with_listeners.with_retry().invoke({"question": "Who are you?"})
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
chain_with_listeners.with_types(output_type=str).invoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
chain_with_listeners.with_config({"tags": ["foo"]}).invoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
chain_with_listeners.bind(stop=["foo"]).invoke({"question": "Who are you?"})
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
mock_start_inner = mocker.Mock()
mock_end_inner = mocker.Mock()
chain_with_listeners.with_listeners(
on_start=mock_start_inner, on_end=mock_end_inner
).invoke({"question": "Who are you?"})
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
assert mock_start_inner.call_count == 1
assert mock_start_inner.call_args[0][0].name == "RunnableSequence"
assert mock_end_inner.call_count == 1
@freeze_time("2023-01-01")
@pytest.mark.usefixtures("deterministic_uuids")
def test_prompt_with_chat_model(
mocker: MockerFixture,
snapshot: SnapshotAssertion,
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == _any_id_ai_message(content="foo")
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "batch")
chat_spy = mocker.spy(chat.__class__, "batch")
tracer = FakeTracer()
assert chain.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
{"callbacks": [tracer]},
) == [
_any_id_ai_message(content="foo"),
_any_id_ai_message(content="foo"),
]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert chat_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert (
len(
[
r
for r in tracer.runs
if r.parent_run_id is None and len(r.child_runs) == 2
]
)
== 2
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "stream")
tracer = FakeTracer()
assert [
*chain.stream({"question": "What is your name?"}, {"callbacks": [tracer]})
] == [
_any_id_ai_message_chunk(content="f"),
_any_id_ai_message_chunk(content="o"),
_any_id_ai_message_chunk(content="o", chunk_position="last"),
]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
@freeze_time("2023-01-01")
@pytest.mark.usefixtures("deterministic_uuids")
async def test_prompt_with_chat_model_async(
mocker: MockerFixture,
snapshot: SnapshotAssertion,
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
chat_spy = mocker.spy(chat.__class__, "ainvoke")
tracer = FakeTracer()
assert await chain.ainvoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == _any_id_ai_message(content="foo")
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
chat_spy = mocker.spy(chat.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
{"callbacks": [tracer]},
) == [
_any_id_ai_message(content="foo"),
_any_id_ai_message(content="foo"),
]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert chat_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert (
len(
[
r
for r in tracer.runs
if r.parent_run_id is None and len(r.child_runs) == 2
]
)
== 2
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
chat_spy = mocker.spy(chat.__class__, "astream")
tracer = FakeTracer()
assert [
a
async for a in chain.astream(
{"question": "What is your name?"}, {"callbacks": [tracer]}
)
] == [
_any_id_ai_message_chunk(content="f"),
_any_id_ai_message_chunk(content="o"),
_any_id_ai_message_chunk(content="o", chunk_position="last"),
]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
@pytest.mark.skipif(
condition=sys.version_info[1] == 13,
reason=(
"temporary, py3.13 exposes some invalid assumptions about order of batch async "
"executions."
),
)
@freeze_time("2023-01-01")
async def test_prompt_with_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain: Runnable = prompt | llm
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == llm
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke({"question": "What is your name?"}, {"callbacks": [tracer]})
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
llm_spy = mocker.spy(llm.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
{"callbacks": [tracer]},
) == ["bar", "foo"]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert llm_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "astream")
tracer = FakeTracer()
assert [
token
async for token in chain.astream(
{"question": "What is your name?"}, {"callbacks": [tracer]}
)
] == ["bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
prompt_spy.reset_mock()
llm_spy.reset_mock()
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# Remove IDs from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
expected = [
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"logs": {},
"final_output": None,
"streamed_output": [],
"name": "RunnableSequence",
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000+00:00",
"streamed_output": [],
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000+00:00",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM",
"value": {
"end_time": None,
"final_output": None,
"metadata": {"ls_model_type": "llm", "ls_provider": "fakelist"},
"name": "FakeListLLM",
"start_time": "2023-01-01T00:00:00.000+00:00",
"streamed_output": [],
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "llm",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM/final_output",
"value": {
"generations": [
[{"generation_info": None, "text": "foo", "type": "Generation"}]
],
"llm_output": None,
"run": None,
"type": "LLMResult",
},
},
{
"op": "add",
"path": "/logs/FakeListLLM/end_time",
"value": "2023-01-01T00:00:00.000+00:00",
},
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": "foo"},
{"op": "replace", "path": "/final_output", "value": "foo"},
),
]
assert stream_log == expected
@freeze_time("2023-01-01")
async def test_prompt_with_llm_parser(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["bear, dog, cat", "tomato, lettuce, onion"])
parser = CommaSeparatedListOutputParser()
chain: Runnable = prompt | llm | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [llm]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
parser_spy = mocker.spy(parser.__class__, "ainvoke")
tracer = FakeTracer()
assert await chain.ainvoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == ["bear", "dog", "cat"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert parser_spy.call_args.args[1] == "bear, dog, cat"
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
mocker.stop(parser_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
llm_spy = mocker.spy(llm.__class__, "abatch")
parser_spy = mocker.spy(parser.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
{"callbacks": [tracer]},
) == [["tomato", "lettuce", "onion"], ["bear", "dog", "cat"]]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert llm_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert parser_spy.call_args.args[1] == [
"tomato, lettuce, onion",
"bear, dog, cat",
]
assert len(tracer.runs) == 2
assert all(
run.name == "RunnableSequence"
and run.run_type == "chain"
and len(run.child_runs) == 3
for run in tracer.runs
)
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
mocker.stop(parser_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "astream")
tracer = FakeTracer()
assert [
token
async for token in chain.astream(
{"question": "What is your name?"}, {"callbacks": [tracer]}
)
] == [["tomato"], ["lettuce"], ["onion"]]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
prompt_spy.reset_mock()
llm_spy.reset_mock()
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# Remove IDs from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
expected = [
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"logs": {},
"final_output": None,
"streamed_output": [],
"name": "RunnableSequence",
"type": "chain",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000+00:00",
"streamed_output": [],
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000+00:00",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeStreamingListLLM",
"value": {
"end_time": None,
"final_output": None,
"metadata": {
"ls_model_type": "llm",
"ls_provider": "fakestreaminglist",
},
"name": "FakeStreamingListLLM",
"start_time": "2023-01-01T00:00:00.000+00:00",
"streamed_output": [],
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "llm",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeStreamingListLLM/final_output",
"value": {
"generations": [
[
{
"generation_info": None,
"text": "bear, dog, cat",
"type": "Generation",
}
]
],
"llm_output": None,
"run": None,
"type": "LLMResult",
},
},
{
"op": "add",
"path": "/logs/FakeStreamingListLLM/end_time",
"value": "2023-01-01T00:00:00.000+00:00",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "CommaSeparatedListOutputParser",
"start_time": "2023-01-01T00:00:00.000+00:00",
"streamed_output": [],
"streamed_output_str": [],
"tags": ["seq:step:3"],
"type": "parser",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser/streamed_output/-",
"value": ["bear"],
}
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": ["bear"]},
{"op": "replace", "path": "/final_output", "value": ["bear"]},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser/streamed_output/-",
"value": ["dog"],
}
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": ["dog"]},
{"op": "add", "path": "/final_output/1", "value": "dog"},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser/streamed_output/-",
"value": ["cat"],
}
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": ["cat"]},
{"op": "add", "path": "/final_output/2", "value": "cat"},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser/final_output",
"value": {"output": ["bear", "dog", "cat"]},
},
{
"op": "add",
"path": "/logs/CommaSeparatedListOutputParser/end_time",
"value": "2023-01-01T00:00:00.000+00:00",
},
),
]
assert stream_log == expected
@freeze_time("2023-01-01")
async def test_stream_log_retriever() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{documents}"
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain: Runnable = (
{"documents": FakeRetriever(), "question": itemgetter("question")}
| prompt
| {"one": llm, "two": llm}
)
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# Remove IDs from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert sorted(cast("RunLog", add(stream_log)).state["logs"]) == [
"ChatPromptTemplate",
"FakeListLLM",
"FakeListLLM:2",
"FakeRetriever",
"RunnableLambda",
"RunnableParallel<documents,question>",
"RunnableParallel<one,two>",
]
@freeze_time("2023-01-01")
async def test_stream_log_lists() -> None:
async def list_producer(_: AsyncIterator[Any]) -> AsyncIterator[AddableDict]:
for i in range(4):
yield AddableDict(alist=[str(i)])
chain: Runnable = RunnableGenerator(list_producer)
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# Remove IDs from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert stream_log == [
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"final_output": None,
"logs": {},
"streamed_output": [],
"name": "list_producer",
"type": "chain",
},
}
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": {"alist": ["0"]}},
{"op": "replace", "path": "/final_output", "value": {"alist": ["0"]}},
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": {"alist": ["1"]}},
{"op": "add", "path": "/final_output/alist/1", "value": "1"},
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": {"alist": ["2"]}},
{"op": "add", "path": "/final_output/alist/2", "value": "2"},
),
RunLogPatch(
{"op": "add", "path": "/streamed_output/-", "value": {"alist": ["3"]}},
{"op": "add", "path": "/final_output/alist/3", "value": "3"},
),
]
state = add(stream_log)
assert isinstance(state, RunLog)
assert state.state == {
"final_output": {"alist": ["0", "1", "2", "3"]},
"logs": {},
"name": "list_producer",
"streamed_output": [
{"alist": ["0"]},
{"alist": ["1"]},
{"alist": ["2"]},
{"alist": ["3"]},
],
"type": "chain",
}
@freeze_time("2023-01-01")
async def test_prompt_with_llm_and_async_lambda(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
async def passthrough(value: Any) -> Any:
return value
chain = prompt | llm | passthrough
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [llm]
assert chain.last == RunnableLambda(func=passthrough)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke({"question": "What is your name?"}, {"callbacks": [tracer]})
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
@freeze_time("2023-01-01")
@pytest.mark.usefixtures("deterministic_uuids")
def test_prompt_with_chat_model_and_parser(
mocker: MockerFixture,
snapshot: SnapshotAssertion,
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == ["foo", "bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert parser_spy.call_args.args[1] == _any_id_ai_message(content="foo, bar")
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
@pytest.mark.usefixtures("deterministic_uuids")
def test_combining_sequences(
snapshot: SnapshotAssertion,
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
prompt2 = (
SystemMessagePromptTemplate.from_template("You are a nicer assistant.")
+ "{question}"
)
chat2 = FakeListChatModel(responses=["baz, qux"])
parser2 = CommaSeparatedListOutputParser()
input_formatter: RunnableLambda[list[str], dict[str, Any]] = RunnableLambda(
lambda x: {"question": x[0] + x[1]}
)
chain2 = cast("RunnableSequence", input_formatter | prompt2 | chat2 | parser2)
assert isinstance(chain, RunnableSequence)
assert chain2.first == input_formatter
assert chain2.middle == [prompt2, chat2]
assert chain2.last == parser2
assert dumps(chain2, pretty=True) == snapshot
combined_chain = cast("RunnableSequence", chain | chain2)
assert combined_chain.first == prompt
assert combined_chain.middle == [
chat,
parser,
input_formatter,
prompt2,
chat2,
]
assert combined_chain.last == parser2
assert dumps(combined_chain, pretty=True) == snapshot
# Test invoke
tracer = FakeTracer()
assert combined_chain.invoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == ["baz", "qux"]
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
def test_seq_dict_prompt_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
retriever = FakeRetriever()
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ """Context:
{documents}
Question:
{question}"""
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain: Runnable = (
{
"question": RunnablePassthrough[str]() | passthrough,
"documents": passthrough | retriever,
"just_to_test_lambda": passthrough,
}
| prompt
| chat
| parser
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert isinstance(chain.first, RunnableParallel)
assert chain.middle == [prompt, chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke("What is your name?", {"callbacks": [tracer]}) == [
"foo",
"bar",
]
assert prompt_spy.call_args.args[1] == {
"documents": [Document(page_content="foo"), Document(page_content="bar")],
"question": "What is your name?",
"just_to_test_lambda": "What is your name?",
}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(
content="You are a nice assistant.",
additional_kwargs={},
response_metadata={},
),
HumanMessage(
content="Context:\n"
"[Document(metadata={}, page_content='foo'), "
"Document(metadata={}, page_content='bar')]\n"
"\n"
"Question:\n"
"What is your name?",
additional_kwargs={},
response_metadata={},
),
]
)
assert parser_spy.call_args.args[1] == _any_id_ai_message(content="foo, bar")
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 4
map_run = parent_run.child_runs[0]
assert map_run.name == "RunnableParallel<question,documents,just_to_test_lambda>"
assert len(map_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat,
"llm": llm,
}
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == {
"chat": _any_id_ai_message(content="i'm a chatbot"),
"llm": "i'm a textbot",
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel<chat,llm>"
assert len(map_run.child_runs) == 2
@freeze_time("2023-01-01")
def test_router_runnable(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
chain1: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
chain2: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
router: Runnable = RouterRunnable({"math": chain1, "english": chain2})
chain: Runnable = {
"key": lambda x: x["key"],
"input": {"question": lambda x: x["question"]},
} | router
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
# Test invoke
router_spy = mocker.spy(router.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, {"callbacks": [tracer]})
== "4"
)
assert router_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "RunnableSequence" # TODO: should be RunnableRouter
assert len(router_run.child_runs) == 2
async def test_router_runnable_async() -> None:
chain1: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
chain2: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
router: Runnable = RouterRunnable({"math": chain1, "english": chain2})
chain: Runnable = {
"key": lambda x: x["key"],
"input": {"question": lambda x: x["question"]},
} | router
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
@freeze_time("2023-01-01")
def test_higher_order_lambda_runnable(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
math_chain: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
english_chain: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
input_map: Runnable = RunnableParallel(
key=lambda x: x["key"],
input={"question": lambda x: x["question"]},
)
def router(params: dict[str, Any]) -> Runnable:
if params["key"] == "math":
return itemgetter("input") | math_chain
if params["key"] == "english":
return itemgetter("input") | english_chain
msg = f"Unknown key: {params['key']}"
raise ValueError(msg)
chain: Runnable = input_map | router
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
# Test invoke
math_spy = mocker.spy(math_chain.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, {"callbacks": [tracer]})
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "router"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
async def test_higher_order_lambda_runnable_async(mocker: MockerFixture) -> None:
math_chain: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
english_chain: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
input_map: Runnable = RunnableParallel(
key=lambda x: x["key"],
input={"question": lambda x: x["question"]},
)
def router(value: dict[str, Any]) -> Runnable:
if value["key"] == "math":
return itemgetter("input") | math_chain
if value["key"] == "english":
return itemgetter("input") | english_chain
msg = f"Unknown key: {value['key']}"
raise ValueError(msg)
chain: Runnable = input_map | router
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[
{"key": "math", "question": "2 + 2"},
{"key": "english", "question": "2 + 2"},
]
)
assert result2 == ["4", "2"]
# Test ainvoke
async def arouter(params: dict[str, Any]) -> Runnable:
if params["key"] == "math":
return itemgetter("input") | math_chain
if params["key"] == "english":
return itemgetter("input") | english_chain
msg = f"Unknown key: {params['key']}"
raise ValueError(msg)
achain: Runnable = input_map | arouter
math_spy = mocker.spy(math_chain.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await achain.ainvoke(
{"key": "math", "question": "2 + 2"}, {"callbacks": [tracer]}
)
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "arouter"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_map(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": passthrough,
}
)
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
if PYDANTIC_VERSION_AT_LEAST_210:
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, {"callbacks": [tracer]}
) == {
"chat": _any_id_ai_message(content="i'm a chatbot"),
"llm": "i'm a textbot",
"passthrough": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel<chat,llm,passthrough>"
assert len(map_run.child_runs) == 3
def test_map_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": _any_id_ai_message_chunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
chain_pick_one = chain.pick("llm")
assert chain_pick_one.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "string",
}
stream = chain_pick_one.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] == "i"
assert len(streamed_chunks) == len(llm_res)
chain_pick_two = chain.assign(hello=RunnablePick("llm").pipe(llm)).pick(
[
"llm",
"hello",
]
)
assert chain_pick_two.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
"hello": {"title": "Hello", "type": "string"},
"llm": {"title": "Llm", "type": "string"},
},
"required": ["llm", "hello"],
}
stream = chain_pick_two.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"llm": "i"},
{"chat": _any_id_ai_message_chunk(content="i")},
]
if not (
# TODO: Rewrite properly the statement above
streamed_chunks[0] == {"llm": "i"}
or {"chat": _any_id_ai_message_chunk(content="i")}
):
msg = f"Got an unexpected chunk: {streamed_chunks[0]}"
raise AssertionError(msg)
assert len(streamed_chunks) == len(llm_res) + len(chat_res)
def test_map_stream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": _any_id_ai_message_chunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == "i'm a textbot"
async def test_map_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": _any_id_ai_message_chunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
final_value["chat"].id = AnyStr()
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
# Test astream_log state accumulation
final_state = None
streamed_ops = []
async for chunk in chain.astream_log({"question": "What is your name?"}):
streamed_ops.extend(chunk.ops)
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast("RunLog", final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert isinstance(final_state.state["id"], str)
assert len(final_state.ops) == len(streamed_ops)
assert len(final_state.state["logs"]) == 5
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"][
"final_output"
] == prompt.invoke({"question": "What is your name?"})
assert (
final_state.state["logs"]["RunnableParallel<chat,llm,passthrough>"]["name"]
== "RunnableParallel<chat,llm,passthrough>"
)
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeListChatModel",
"FakeStreamingListLLM",
"RunnableParallel<chat,llm,passthrough>",
"RunnablePassthrough",
]
# Test astream_log with include filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, include_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast("RunLog", final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 1
assert final_state.state["logs"]["FakeListChatModel"]["name"] == "FakeListChatModel"
# Test astream_log with exclude filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, exclude_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast("RunLog", final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 4
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"]["final_output"] == (
prompt.invoke({"question": "What is your name?"})
)
assert (
final_state.state["logs"]["RunnableParallel<chat,llm,passthrough>"]["name"]
== "RunnableParallel<chat,llm,passthrough>"
)
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeStreamingListLLM",
"RunnableParallel<chat,llm,passthrough>",
"RunnablePassthrough",
]
async def test_map_astream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == llm_res
simple_map = RunnableMap(passthrough=RunnablePassthrough())
assert loads(dumps(simple_map)) == simple_map
def test_with_config_with_config() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.with_config({"metadata": {"a": "b"}}).with_config(tags=["a-tag"])
) == dumpd(llm.with_config({"metadata": {"a": "b"}, "tags": ["a-tag"]}))
def test_metadata_is_merged() -> None:
"""Test metadata and tags defined in with_config and at are merged/concatend."""
foo = RunnableLambda(lambda x: x).with_config({"metadata": {"my_key": "my_value"}})
expected_metadata = {
"my_key": "my_value",
"my_other_key": "my_other_value",
}
with collect_runs() as cb:
foo.invoke("hi", {"metadata": {"my_other_key": "my_other_value"}})
run = cb.traced_runs[0]
assert run.extra is not None
assert run.extra["metadata"] == expected_metadata
def test_tags_are_appended() -> None:
"""Test tags from with_config are concatenated with those in invocation."""
foo = RunnableLambda(lambda x: x).with_config({"tags": ["my_key"]})
with collect_runs() as cb:
foo.invoke("hi", {"tags": ["invoked_key"]})
run = cb.traced_runs[0]
assert isinstance(run.tags, list)
assert sorted(run.tags) == sorted(["my_key", "invoked_key"])
def test_bind_bind() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.bind(stop=["Thought:"], one="two").bind(
stop=["Observation:"], hello="world"
)
) == dumpd(llm.bind(stop=["Observation:"], one="two", hello="world"))
def test_bind_with_lambda() -> None:
def my_function(_: Any, **kwargs: Any) -> int:
return 3 + kwargs.get("n", 0)
runnable = RunnableLambda(my_function).bind(n=1)
assert runnable.invoke({}) == 4
chunks = list(runnable.stream({}))
assert chunks == [4]
async def test_bind_with_lambda_async() -> None:
def my_function(_: Any, **kwargs: Any) -> int:
return 3 + kwargs.get("n", 0)
runnable = RunnableLambda(my_function).bind(n=1)
assert await runnable.ainvoke({}) == 4
chunks = [item async for item in runnable.astream({})]
assert chunks == [4]
def test_deep_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.stream({"question": "What up"})
chunks = list(stream)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
for chunk in (chain | RunnablePassthrough()).stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
def test_deep_stream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.stream({"question": "What up"})
chunks = list(stream)
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain.assign(hello=itemgetter("str") | llm)
assert chain_with_assign.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert chain_with_assign.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
"str": {"title": "Str", "type": "string"},
"hello": {"title": "Hello", "type": "string"},
},
"required": ["str", "hello"],
}
chunks = []
for chunk in chain_with_assign.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert chain_with_assign.invoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert chain_with_assign_shadow.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
"required": ["str", "hello"],
}
chunks = []
for chunk in chain_with_assign_shadow.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert chain_with_assign_shadow.invoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
async def test_deep_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.astream({"question": "What up"})
chunks = [chunk async for chunk in stream]
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
async for chunk in (chain | RunnablePassthrough()).astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
async def test_deep_astream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.astream({"question": "What up"})
chunks = [chunk async for chunk in stream]
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain.assign(
hello=itemgetter("str") | llm,
)
assert chain_with_assign.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert chain_with_assign.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
"str": {"title": "Str", "type": "string"},
"hello": {"title": "Hello", "type": "string"},
},
"required": ["str", "hello"],
}
chunks = []
async for chunk in chain_with_assign.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert await chain_with_assign.ainvoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain | RunnablePassthrough.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.get_input_jsonschema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
"required": ["question"],
}
assert chain_with_assign_shadow.get_output_jsonschema() == {
"title": "RunnableSequenceOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
"required": ["str", "hello"],
}
chunks = []
async for chunk in chain_with_assign_shadow.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert await chain_with_assign_shadow.ainvoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
def test_runnable_sequence_transform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = llm | StrOutputParser()
stream = chain.transform(llm.stream("Hi there!"))
chunks = list(stream)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
async def test_runnable_sequence_atransform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = llm | StrOutputParser()
stream = chain.atransform(llm.astream("Hi there!"))
chunks = [chunk async for chunk in stream]
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
| FakeRetriever |
python | keras-team__keras | keras/src/utils/tracking.py | {
"start": 4589,
"end": 6861
} | class ____(list):
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
values = [tracker.track(v) for v in values]
super().__init__(values or [])
def append(self, value):
if self.tracker:
self.tracker.track(value)
super().append(value)
def insert(self, index, value):
if self.tracker:
self.tracker.track(value)
super().insert(index, value)
def extend(self, values):
if self.tracker:
values = [self.tracker.track(v) for v in values]
super().extend(values)
def remove(self, value):
if self.tracker:
self.tracker.untrack(value)
try:
super().remove(value)
except ValueError:
python_utils.remove_by_id(self, value)
def pop(self, index=-1):
if self.tracker:
value = self[index]
self.tracker.untrack(value)
return super().pop(index)
else:
return super().pop(index)
def clear(self):
if self.tracker:
for value in self:
self.tracker.untrack(value)
super().clear()
def __delitem__(self, index):
value = self[index] # Get value before removing
super().__delitem__(index)
if self.tracker:
self.tracker.untrack(value)
def tree_flatten(self):
# For optree / dmtree
return (self, None)
@classmethod
def tree_unflatten(cls, metadata, children):
# For optree / dmtree
return cls(children)
def torchtree_flatten(self):
# For torchtree
# Returns (values, metadata)
return (self, None)
@classmethod
def torchtree_unflatten(cls, children, metadata):
# For torchtree
# Requires (children, metadata)
return cls(children)
def torchtree_flatten_with_keys(self):
# For torchtree
# Returns (children, metadata)
from torch.utils import _pytree as torch_tree
values, context = self.torchtree_flatten()
return [
(torch_tree.SequenceKey(i), v) for i, v in enumerate(values)
], context
@tree.register_tree_node_class
| TrackedList |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_tutorial02.py | {
"start": 315,
"end": 1927
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("tutorial02.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Example spreadsheet used in the tutorial 2."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Add a bold format to use to highlight cells.
bold = workbook.add_format({"bold": True})
# Add a number format for cells with money.
money_format = workbook.add_format({"num_format": "\\$#,##0"})
# Write some data headers.
worksheet.write("A1", "Item", bold)
worksheet.write("B1", "Cost", bold)
# Some data we want to write to the worksheet.
expenses = (
["Rent", 1000],
["Gas", 100],
["Food", 300],
["Gym", 50],
)
# Start from the first cell below the headers.
row = 1
col = 0
# Iterate over the data and write it out row by row.
for item, cost in expenses:
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost, money_format)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, "Total", bold)
worksheet.write(row, 1, "=SUM(B2:B5)", money_format, 1450)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 23341,
"end": 25963
} | class ____(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
)
Table(
"orders",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("description", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Order(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, Order, orders, users = (
cls.classes.User,
cls.classes.Order,
cls.tables.orders,
cls.tables.users,
)
cls.mapper_registry.map_imperatively(
User,
users,
properties=dict(
orders=relationship(
cls.mapper_registry.map_imperatively(Order, orders),
cascade="all",
)
),
)
def test_cascade_delete_noorphans(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
with fixture_session() as sess:
u = User(
name="jack",
orders=[
Order(description="someorder"),
Order(description="someotherorder"),
],
)
sess.add(u)
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
1,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
2,
)
del u.orders[0]
sess.delete(u)
sess.flush()
eq_(
sess.execute(
select(func.count("*")).select_from(users)
).scalar(),
0,
)
eq_(
sess.execute(
select(func.count("*")).select_from(orders)
).scalar(),
1,
)
| O2MCascadeDeleteNoOrphanTest |
python | ray-project__ray | ci/ray_ci/bisect/validator.py | {
"start": 48,
"end": 267
} | class ____(abc.ABC):
@abc.abstractmethod
def run(self, test: Test, revision: str) -> bool:
"""
Validate whether the test is passing or failing on the given revision
"""
pass
| Validator |
python | fluentpython__example-code-2e | 14-inheritance/diamond2.py | {
"start": 1614,
"end": 1720
} | class ____(A, U):
def ping(self):
print(f'{self}.ping() in LeafAU')
super().ping()
| LeafAU |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_blocks_schemas/client.py | {
"start": 3386,
"end": 6424
} | class ____(BaseAsyncClient):
async def create_block_schema(
self, block_schema: "BlockSchemaCreate"
) -> "BlockSchema":
"""
Create a block schema in the Prefect API.
"""
try:
response = await self.request(
"POST",
"/block_schemas/",
json=block_schema.model_dump(
mode="json",
exclude_unset=True,
exclude={"id", "block_type", "checksum"},
),
)
except HTTPStatusError as e:
if e.response.status_code == 409:
raise ObjectAlreadyExists(http_exc=e) from e
else:
raise
from prefect.client.schemas.objects import BlockSchema
return BlockSchema.model_validate(response.json())
async def read_block_schema_by_checksum(
self, checksum: str, version: str | None = None
) -> "BlockSchema":
"""
Look up a block schema checksum
"""
try:
response = await self.request(
"GET",
"/block_schemas/checksum/{checksum}",
path_params={"checksum": checksum},
**({"params": {"version": version}} if version else {}),
)
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
from prefect.client.schemas.objects import BlockSchema
return BlockSchema.model_validate(response.json())
async def read_block_schemas(self) -> "list[BlockSchema]":
"""
Read all block schemas
Raises:
httpx.RequestError: if a valid block schema was not found
Returns:
A BlockSchema.
"""
response = await self.request("POST", "/block_schemas/filter", json={})
from prefect.client.schemas.objects import BlockSchema
return BlockSchema.model_validate_list(response.json())
async def get_most_recent_block_schema_for_block_type(
self,
block_type_id: "UUID",
) -> "BlockSchema | None":
"""
Fetches the most recent block schema for a specified block type ID.
Args:
block_type_id: The ID of the block type.
Raises:
httpx.RequestError: If the request fails for any reason.
Returns:
The most recent block schema or None.
"""
try:
response = await self.request(
"POST",
"/block_schemas/filter",
json={
"block_schemas": {"block_type_id": {"any_": [str(block_type_id)]}},
"limit": 1,
},
)
except HTTPStatusError:
raise
from prefect.client.schemas.objects import BlockSchema
return next(iter(BlockSchema.model_validate_list(response.json())), None)
| BlocksSchemaAsyncClient |
python | wandb__wandb | wandb/vendor/pygments/lexers/configs.py | {
"start": 26920,
"end": 28266
} | class ____(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf']
filenames = ['pacman.conf']
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
| PacmanConfLexer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py | {
"start": 14515,
"end": 16109
} | class ____(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
("none_none", None, None, None),
("none_true", None, True, True),
("true_none", True, None, True),
("true_true", True, True, True),
("none_false", None, False, False),
("false_none", False, None, False),
("false_false", False, False, False),
)
def test_computes_an_or_if_non_contradicting(self, operator_hint_value,
provided_hint_value,
expected_result):
self.assertEqual(
expected_result,
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="should not be needed here"))
@parameterized.named_parameters(
("true_false", True, False),
("false_true", False, True),
)
def test_raises_if_contradicting(self, operator_hint_value,
provided_hint_value):
with self.assertRaisesRegex(ValueError, "my error message"):
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="my error message")
| UseOperatorOrProvidedHintUnlessContradictingTest |
python | django__django | tests/admin_inlines/admin.py | {
"start": 9114,
"end": 9208
} | class ____(admin.ModelAdmin):
inlines = [ClassTabularHorizontal]
| ClassAdminTabularHorizontal |
python | django__django | tests/model_formsets/models.py | {
"start": 229,
"end": 299
} | class ____(Author):
write_speed = models.IntegerField()
| BetterAuthor |
python | pytorch__pytorch | test/dynamo/test_graph_deduplication.py | {
"start": 2380,
"end": 3868
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[10, 10]", L_y_: "f32[10, 20]"):
subgraph_0 = self.subgraph_0
l_x_ = L_x_
l_y_ = L_y_
o1: "f32[10, 20]" = torch.sin(l_y_)
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); invoke_subgraph = None
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, o1); o1 = None
getitem_1: "f32[]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
invoke_subgraph_2 = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_x_, l_y_); subgraph_0 = l_x_ = l_y_ = None
getitem_2: "f32[]" = invoke_subgraph_2[0]; invoke_subgraph_2 = None
o4: "f32[]" = getitem_2 * getitem_2; getitem_2 = None
mul_1: "f32[]" = getitem_1 * o4; getitem_1 = o4 = None
return (mul_1,)
class subgraph_0(torch.nn.Module):
def forward(self, subgraph_input_l_x_, subgraph_input_l_y_):
x0: "f32[10, 10]" = subgraph_input_l_x_ + 1; subgraph_input_l_x_ = None
y0: "f32[10, 20]" = subgraph_input_l_y_ + 2; subgraph_input_l_y_ = None
sum_1: "f32[]" = x0.sum(); x0 = None
sum_2: "f32[]" = y0.sum(); y0 = None
z: "f32[]" = sum_1 + sum_2; sum_1 = sum_2 = None
return (z,)
""",
)
self.assertExpectedInline(
graph_str(fw_graphs[0]),
"""\
| GraphModule |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_value_lengths.py | {
"start": 994,
"end": 3179
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.value_length.equals"
condition_value_keys = ("value",)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, value, _metrics, **kwargs):
column_lengths, _, _ = _metrics.get(
f"column_values.value_length.{MetricPartialFunctionTypeSuffixes.MAP.value}"
)
return column_lengths == value
@column_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, value, _metrics, **kwargs):
column_lengths, _, _ = _metrics.get(
f"column_values.value_length.{MetricPartialFunctionTypeSuffixes.MAP.value}"
)
return column_lengths == value
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, value, _metrics, **kwargs):
column_lengths, _, _ = _metrics.get(
f"column_values.value_length.{MetricPartialFunctionTypeSuffixes.MAP.value}"
)
return column_lengths == value
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== f"column_values.value_length.equals.{MetricPartialFunctionTypeSuffixes.CONDITION.value}" # noqa: E501 # FIXME CoP
):
dependencies[
f"column_values.value_length.{MetricPartialFunctionTypeSuffixes.MAP.value}"
] = MetricConfiguration(
metric_name=f"column_values.value_length.{MetricPartialFunctionTypeSuffixes.MAP.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
return dependencies
| ColumnValuesValueLengthEquals |
python | numba__numba | numba/core/errors.py | {
"start": 23252,
"end": 23714
} | class ____(TypingError):
"""
Failure in typing a Literal type
"""
pass
# These Exception classes are just Numba copies of their Python equivalents for
# use internally in cases where we want e.g. type inference to keep on trying.
# Exceptions extending from NumbaError are considered "special" by Numba's
# internals and are treated differently to standard Python exceptions which are
# permitted to just propagate up the stack.
| LiteralTypingError |
python | huggingface__transformers | src/transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py | {
"start": 1346,
"end": 5166
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
apply_ocr = True
ocr_lang = None
tesseract_config = ""
valid_kwargs = LayoutLMv3ImageProcessorKwargs
def __init__(self, **kwargs: Unpack[LayoutLMv3ImageProcessorKwargs]):
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[LayoutLMv3ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
apply_ocr: bool,
ocr_lang: Optional[str],
tesseract_config: Optional[str],
return_tensors: Optional[Union[str, TensorType]],
disable_grouping: Optional[bool],
**kwargs,
) -> BatchFeature:
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, "pytesseract")
words_batch = []
boxes_batch = []
for image in images:
if image.is_cuda:
logger.warning_once(
"apply_ocr can only be performed on cpu. Tensors will be transferred to cpu before processing."
)
words, boxes = apply_tesseract(
image.cpu(), ocr_lang, tesseract_config, input_data_format=ChannelDimension.FIRST
)
words_batch.append(words)
boxes_batch.append(boxes)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
data = BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
if apply_ocr:
data["words"] = words_batch
data["boxes"] = boxes_batch
return data
__all__ = ["LayoutLMv3ImageProcessorFast"]
| LayoutLMv3ImageProcessorFast |
python | ray-project__ray | rllib/utils/schedules/exponential_schedule.py | {
"start": 294,
"end": 1833
} | class ____(Schedule):
"""Exponential decay schedule from `initial_p` to `final_p`.
Reduces output over `schedule_timesteps`. After this many time steps
always returns `final_p`.
"""
def __init__(
self,
schedule_timesteps: int,
framework: Optional[str] = None,
initial_p: float = 1.0,
decay_rate: float = 0.1,
):
"""Initializes a ExponentialSchedule instance.
Args:
schedule_timesteps: Number of time steps for which to
linearly anneal initial_p to final_p.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
initial_p: Initial output value.
decay_rate: The percentage of the original value after
100% of the time has been reached (see formula above).
>0.0: The smaller the decay-rate, the stronger the decay.
1.0: No decay at all.
"""
super().__init__(framework=framework)
assert schedule_timesteps > 0
self.schedule_timesteps = schedule_timesteps
self.initial_p = initial_p
self.decay_rate = decay_rate
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
"""Returns the result of: initial_p * decay_rate ** (`t`/t_max)."""
if self.framework == "torch" and torch and isinstance(t, torch.Tensor):
t = t.float()
return self.initial_p * self.decay_rate ** (t / self.schedule_timesteps)
| ExponentialSchedule |
python | kamyu104__LeetCode-Solutions | Python/get-maximum-in-generated-array.py | {
"start": 507,
"end": 968
} | class ____(object):
def getMaximumGenerated(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
nums = [0]*(n+1)
nums[1] = 1
result = 1
for i in xrange(2, n+1):
if i%2 == 0:
nums[i] = nums[i//2]
else:
nums[i] = nums[i//2] + nums[i//2+1]
result = max(result, nums[i])
return result
| Solution2 |
python | tensorflow__tensorflow | third_party/xla/.github/workflows/github_api.py | {
"start": 1021,
"end": 5232
} | class ____:
"""Wraps the GitHub REST API."""
def __init__(self, token: Optional[str] = None):
self._session = requests.Session()
self._session.headers["Accept"] = "application/vnd.github+json"
if token:
self._session.headers["Authorization"] = f"token {token}"
def _make_request(
self, verb: str, endpoint: str, **kwargs: dict[str, Any]
) -> requests.Response:
"""Helper method to make a request and raise an HTTPError if one occurred.
Arguments:
verb: The HTTP verb to use
endpoint: The endpoint to make the request to
**kwargs: The json that will be sent as the body of the request.
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
res = self._session.request(
verb,
urllib.parse.urljoin("https://api.github.com", endpoint),
json=kwargs,
)
res.raise_for_status()
return res.json()
def get_commit(self, repo: str, commit_id: str) -> requests.Response:
"""Gets a commit by it's SHA-1 hash.
https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#get-a-
commit
Arguments:
repo: a string of the form `owner/repo_name`, e.g. openxla/xla.
commit_id: a string describing the commit to get, e.g. `deadbeef` or
`HEAD`.
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
endpoint = f"repos/{repo}/commits/{commit_id}"
return self._make_request("GET", endpoint)
def write_issue_comment(
self, repo: str, issue_number: int, body: str
) -> requests.Response:
"""Writes a comment on an issue (or PR).
https://docs.github.com/en/rest/issues/comments?apiVersion=2022-11-
28#create-an-issue-comment
Arguments:
repo: a string of the form `owner/repo_name`, e.g. openxla/xla
issue_number: the issue (or PR) to comment on
body: the body of the comment
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
endpoint = f"repos/{repo}/issues/{issue_number}/comments"
return self._make_request("POST", endpoint, body=body)
def set_issue_status(
self, repo: str, issue_number: int, status: str
) -> requests.Response:
"""Sets the status of an issue (or PR).
https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#update-
an-issue
Arguments:
repo: a string of the form `owner/repo_name`, e.g. openxla/xla
issue_number: the issue (or PR) to set the status of
status: the status to set
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
endpoint = f"repos/{repo}/issues/{issue_number}"
return self._make_request("POST", endpoint, status=status)
def add_issue_labels(
self, repo: str, issue_number: int, labels: list[str]
) -> requests.Response:
"""Adds labels to an issue (or PR).
https://docs.github.com/en/actions/managing-issues-and-pull-requests/adding-labels-to-issues
Arguments:
repo: a string of the form `owner/repo_name`, e.g. openxla/xla
issue_number: the issue (or PR) to set the status of
labels: the labels to add to the issue
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
endpoint = f"repos/{repo}/issues/{issue_number}/labels"
return self._make_request("POST", endpoint, labels=labels)
def get_user_orgs(self, username: str) -> requests.Response:
"""Gets all public org memberships for a user.
https://docs.github.com/en/rest/orgs/orgs?apiVersion=2022-11-28#list-organizations-for-a-user
Arguments:
username: The user's GitHub username as a string.
Returns:
a requests.Response object containing the response from the API.
Raises:
requests.exceptions.HTTPError
"""
endpoint = f"users/{username}/orgs"
return self._make_request("GET", endpoint, username=username)
| GitHubAPI |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/langhelpers.py | {
"start": 47543,
"end": 48329
} | class ____(Generic[_T]):
def __init__(self, func: Callable[..., _T]):
self.func = func
self.clslevel = func
self.setfn: Optional[Callable[..., Any]] = None
def __get__(self, instance: Any, owner: Any) -> _T:
if instance is None:
clsval = self.clslevel(owner)
return clsval
else:
return self.func(instance)
def __set__(self, instance: Any, value: Any) -> None:
assert self.setfn is not None
self.setfn(instance, value)
def setter(self, func: Callable[..., Any]) -> rw_hybridproperty[_T]:
self.setfn = func
return self
def classlevel(self, func: Callable[..., Any]) -> rw_hybridproperty[_T]:
self.clslevel = func
return self
| rw_hybridproperty |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis13.py | {
"start": 315,
"end": 1593
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis13.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [54045312, 54043776]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_y_axis({"min": 0, "max": 16})
chart.set_x_axis({"min": 0, "max": 6})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_db_command.py | {
"start": 26242,
"end": 38248
} | class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@pytest.mark.parametrize("timezone", ["UTC", "Europe/Berlin", "America/Los_Angeles"])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_date_timezone_omitted(self, run_cleanup_mock, timezone):
"""
When timezone omitted we should always expect that the timestamp is
coerced to tz-aware with default timezone
"""
timestamp = "2021-01-01 00:00:00"
with patch("airflow.settings.TIMEZONE", pendulum.timezone(timezone)):
args = self.parser.parse_args(["db", "clean", "--clean-before-timestamp", f"{timestamp}", "-y"])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp, tz=timezone),
verbose=False,
confirm=False,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize("timezone", ["UTC", "Europe/Berlin", "America/Los_Angeles"])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_date_timezone_supplied(self, run_cleanup_mock, timezone):
"""
When tz included in the string then default timezone should not be used.
"""
timestamp = "2021-01-01 00:00:00+03:00"
with patch("airflow.settings.TIMEZONE", pendulum.timezone(timezone)):
args = self.parser.parse_args(["db", "clean", "--clean-before-timestamp", f"{timestamp}", "-y"])
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse(timestamp),
verbose=False,
confirm=False,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("confirm_arg", "expected"), [(["-y"], False), ([], True)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_confirm(self, run_cleanup_mock, confirm_arg, expected):
"""
When ``-y`` provided, ``confirm`` should be false.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*confirm_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=expected,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_arg", "expected"), [(["--skip-archive"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_skip_archive(self, run_cleanup_mock, extra_arg, expected):
"""
When ``--skip-archive`` provided, ``skip_archive`` should be True (False otherwise).
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=expected,
batch_size=None,
)
@pytest.mark.parametrize(("dry_run_arg", "expected"), [(["--dry-run"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_dry_run(self, run_cleanup_mock, dry_run_arg, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*dry_run_arg,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=expected,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_tables(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=expected,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--verbose"], True), ([], False)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_verbose(self, run_cleanup_mock, extra_args, expected):
"""
When tz included in the string then default timezone should not be used.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=expected,
confirm=True,
skip_archive=False,
batch_size=None,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--batch-size", "1234"], 1234), ([], None)])
@patch("airflow.cli.commands.db_command.run_cleanup")
def test_batch_size(self, run_cleanup_mock, extra_args, expected):
"""
batch_size should be forwarded to run_cleanup with correct type.
"""
args = self.parser.parse_args(
[
"db",
"clean",
"--clean-before-timestamp",
"2021-01-01",
*extra_args,
]
)
db_command.cleanup_tables(args)
run_cleanup_mock.assert_called_once_with(
table_names=None,
dry_run=False,
clean_before_timestamp=pendulum.parse("2021-01-01 00:00:00Z"),
verbose=False,
confirm=True,
skip_archive=False,
batch_size=expected,
)
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_export_archived_records(self, os_mock, export_archived_mock):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv", output_path="path", table_names=None, drop_archives=False, needs_confirm=True
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_tables_in_export_archived_records_command(
self, os_mock, export_archived_mock, extra_args, expected
):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
*extra_args,
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv",
output_path="path",
table_names=expected,
drop_archives=False,
needs_confirm=True,
)
@pytest.mark.parametrize(("extra_args", "expected"), [(["--drop-archives"], True), ([], False)])
@patch("airflow.cli.commands.db_command.export_archived_records")
@patch("airflow.cli.commands.db_command.os.path.isdir", return_value=True)
def test_drop_archives_in_export_archived_records_command(
self, os_mock, export_archived_mock, extra_args, expected
):
args = self.parser.parse_args(
[
"db",
"export-archived",
"--output-path",
"path",
*extra_args,
]
)
db_command.export_archived(args)
export_archived_mock.assert_called_once_with(
export_format="csv",
output_path="path",
table_names=None,
drop_archives=expected,
needs_confirm=True,
)
@pytest.mark.parametrize(
("extra_args", "expected"), [(["--tables", "hello, goodbye"], ["hello", "goodbye"]), ([], None)]
)
@patch("airflow.cli.commands.db_command.drop_archived_tables")
def test_tables_in_drop_archived_records_command(self, mock_drop_archived_records, extra_args, expected):
args = self.parser.parse_args(
[
"db",
"drop-archived",
*extra_args,
]
)
db_command.drop_archived(args)
mock_drop_archived_records.assert_called_once_with(table_names=expected, needs_confirm=True)
@pytest.mark.parametrize(("extra_args", "expected"), [(["-y"], False), ([], True)])
@patch("airflow.cli.commands.db_command.drop_archived_tables")
def test_confirm_in_drop_archived_records_command(self, mock_drop_archived_records, extra_args, expected):
args = self.parser.parse_args(
[
"db",
"drop-archived",
*extra_args,
]
)
db_command.drop_archived(args)
mock_drop_archived_records.assert_called_once_with(table_names=None, needs_confirm=expected)
def test_get_version_revision():
heads: dict[str, str] = {
"2.10.0": "22ed7efa9da2",
"2.10.3": "5f2621c13b39",
"3.0.0": "29ce7909c52b",
"3.0.3": "fe199e1abd77",
"3.1.0": "808787349f22",
}
assert db_command._get_version_revision("3.1.0", heads) == "808787349f22"
assert db_command._get_version_revision("3.1.1", heads) == "808787349f22"
assert db_command._get_version_revision("2.11.1", heads) == "5f2621c13b39"
assert db_command._get_version_revision("2.10.1", heads) == "22ed7efa9da2"
assert db_command._get_version_revision("2.0.0", heads) is None
@pytest.mark.parametrize(
("raw", "expected"),
[
("pa!sw0rd#", '"pa!sw0rd#"'),
('he"llo', '"he\\"llo"'),
("path\\file", '"path\\\\file"'),
(None, ""),
],
)
def test_quote_mysql_password_for_cnf(raw, expected):
password = db_command._quote_mysql_password_for_cnf(raw)
assert password == expected
| TestCLIDBClean |
python | rapidsai__cudf | python/cudf/cudf/core/udf/groupby_typing.py | {
"start": 9582,
"end": 9752
} | class ____(AbstractTemplate):
key = "GroupType.idxmin"
def generic(self, args, kws):
return nb_signature(self.this.index_type, recvr=self.this)
| GroupIdxMin |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py | {
"start": 4450,
"end": 6383
} | class ____(SequenceCandidate):
"""A lazy sequence to provide candidates to the resolver.
The intended usage is to return this from `find_matches()` so the resolver
can iterate through the sequence multiple times, but only access the index
page when remote packages are actually needed. This improve performances
when suitable candidates are already installed on disk.
"""
def __init__(
self,
get_infos: Callable[[], Iterator[IndexCandidateInfo]],
installed: Optional[Candidate],
prefers_installed: bool,
incompatible_ids: Set[int],
):
self._get_infos = get_infos
self._installed = installed
self._prefers_installed = prefers_installed
self._incompatible_ids = incompatible_ids
def __getitem__(self, index: Any) -> Any:
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
def __iter__(self) -> Iterator[Candidate]:
infos = self._get_infos()
if not self._installed:
iterator = _iter_built(infos)
elif self._prefers_installed:
iterator = _iter_built_with_prepended(self._installed, infos)
else:
iterator = _iter_built_with_inserted(self._installed, infos)
return (c for c in iterator if id(c) not in self._incompatible_ids)
def __len__(self) -> int:
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
@functools.lru_cache(maxsize=1)
def __bool__(self) -> bool:
if self._prefers_installed and self._installed:
return True
return any(self)
| FoundCandidates |
python | marshmallow-code__marshmallow | src/marshmallow/exceptions.py | {
"start": 1828,
"end": 1961
} | class ____(NameError):
"""Raised when an invalid operation is performed on the serializer
class registry.
"""
| RegistryError |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 41647,
"end": 43159
} | class ____:
def test_add_after_full(self):
c = self.type2test(2)
c['t1'] = 1
c['t2'] = 2
c['t3'] = 3
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
self.assertEqual(list(c), ['t2', 't3'])
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
def test_popitem(self):
c = self.type2test(3)
for i in range(1, 4):
c[i] = i
self.assertEqual(c.popitem(last=False), (1, 1))
self.assertEqual(c.popitem(last=True), (3, 3))
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
def test_pop(self):
c = self.type2test(3)
for i in range(1, 4):
c[i] = i
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
self.assertEqual(c.pop(2), 2)
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
self.assertEqual(c.pop(4, 0), 0)
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
self.assertRaises(KeyError, c.pop, 4)
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
def test_change_order_on_get(self):
c = self.type2test(3)
for i in range(1, 4):
c[i] = i
self.assertEqual(list(c), list(range(1, 4)))
self.assertEqual(c.counts, {'get': 0, 'set': 3, 'del': 0})
self.assertEqual(c[2], 2)
self.assertEqual(c.counts, {'get': 1, 'set': 3, 'del': 0})
self.assertEqual(list(c), [1, 3, 2])
| SimpleLRUCacheTests |
python | pytorch__pytorch | test/functorch/test_ac.py | {
"start": 2096,
"end": 14415
} | class ____(TestCase):
def setUp(self):
super().setUp()
torch.set_default_device("cuda")
def test_rematerializes_cheap(self):
def f(x, w):
x = x.cos()
x = torch.mm(x, w)
return x.sum()
x = torch.randn(512, 512, requires_grad=True)
w = torch.randn(512, 512, requires_grad=True)
def call():
return f(x, w)
eager_mem, eager_flops = get_mem_and_flops(call)
self.assertEqual(eager_mem, 1.0)
mem_10, flops_10 = get_mem_and_flops(call, memory_budget=1.0)
# Recomputing `.cos()` is not free here.
self.assertEqual(mem_10, 1.0)
self.assertEqual(eager_flops, flops_10)
mem_5, flops_5 = get_mem_and_flops(call, memory_budget=0.5)
# We can just recompute `x.cos()` here to only depend on the inputs
self.assertEqual(mem_5, 0.0)
self.assertEqual(flops_5, eager_flops)
def test_matmul_even_chain(self):
def f(x, ws):
x = x.cos()
for w in ws:
x = torch.mm(x, w).cos()
return x.sum()
x = torch.randn(512, 512, requires_grad=True)
ws = [torch.randn(512, 512, requires_grad=True) for _ in range(5)]
def call():
return f(x, ws)
_, eager_flops = get_mem_and_flops(call)
for budget in range(11):
mem, flops = get_mem_and_flops(call, memory_budget=budget / 10)
if budget <= 5:
# We start saving the matmuls
self.assertEqual(mem, budget)
self.assertEqual(flops, eager_flops + (5 - budget))
elif budget < 10:
# We're only recomputing the `cos` operations
self.assertEqual(mem, 5.0)
self.assertEqual(flops, eager_flops)
elif budget == 10:
self.assertEqual(mem, 10.0)
self.assertEqual(flops, eager_flops)
def test_matmul_uneven_chain(self):
# This function is constructed so that we are saving one input of size
# [512, in_dim] for each w
# In addition, every matmul has a same ratio of compute to "memory
# saved", so this test is essentially testing our knapsack solving
def f(x, ws):
xs = [torch.mm(x, w).cos() for w in ws]
return sum(x.sum() for x in xs)
x = torch.randn(512, 512, requires_grad=True)
def make_weights(w_shapes):
ws = []
for dim in w_shapes:
ws.append(torch.randn(512, dim * 512, requires_grad=True))
return ws
weight_configs = [
(
[11, 3, 4, 2],
[
18, # 11 + 4 + 3
17, # 11 + 4 + 2
16, # 11 + 3 + 2
15, # 11 + 4
14, # 11 + 3
13, # 11 + 2
11, # 11 + 2
7, # 4 + 3
6, # 4 + 2
5, # 3 + 2
],
),
(
[3, 5, 11, 17, 14],
[
42, # 17 + 14 + 9
30, # 11 + 15 + 5
19, # 11 + 5 + 3
8, # 5 + 3
3, # 3
],
),
]
random.seed(0)
random_arr = [random.randint(0, 50) for _ in range(10)]
exact_sums = []
for i in range(10):
random.shuffle(random_arr)
exact_sums.append(sum(random_arr[:i]))
weight_configs.append((random_arr, exact_sums))
for weight_shapes, exact_solves in weight_configs:
ws = make_weights(weight_shapes)
def call():
return f(x, ws)
eager_mem, _ = get_mem_and_flops(call)
total_mem = sum(weight_shapes)
self.assertEqual(eager_mem, sum(weight_shapes))
for mem_achieved in exact_solves:
mem, _ = get_mem_and_flops(call, memory_budget=mem_achieved / total_mem)
self.assertEqual(mem, mem_achieved)
# needs CUDA, but this test file all needs CUDA.
@unittest.skipIf(not has_triton(), "test needs triton")
def test_custom_triton_kernel(self):
@triton.jit
def relu_kernel_(inp_ptr, out_ptr, sz, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
block = tl.arange(0, BLOCK_SIZE) + pid * BLOCK_SIZE
msk = block < sz
inp = tl.load(inp_ptr + block, mask=msk)
relu = tl.where(inp < 0, 0, inp)
tl.store(out_ptr + block, relu, mask=msk)
@torch._library.triton_op("testac::triton_relu", mutates_args=())
def triton_relu(x: torch.Tensor) -> torch.Tensor:
y = torch.empty_like(x)
sz = y.numel()
BLOCK_SIZE = 256
grid = (triton.cdiv(sz, BLOCK_SIZE),)
torch._library.capture_triton(relu_kernel_)[grid](x, y, sz, BLOCK_SIZE)
return y
@torch._library.triton_op("testac::triton_relu_backward", mutates_args=())
def triton_relu_backward(grad_out: torch.Tensor) -> torch.Tensor:
grad_x = torch.empty_like(grad_out)
sz = grad_out.numel()
BLOCK_SIZE = 256
grid = (triton.cdiv(sz, BLOCK_SIZE),)
# I know this is wrong, but whatever..
torch._library.capture_triton(relu_kernel_)[grid](
grad_out, grad_x, sz, BLOCK_SIZE
)
return grad_x
def _triton_relu_backward(ctx, grad_out: torch.Tensor) -> torch.Tensor:
return triton_relu_backward(grad_out)
def _triton_relu_setup_context(ctx, inputs, output):
pass
triton_relu.register_autograd(
_triton_relu_backward,
setup_context=_triton_relu_setup_context,
)
@register_flop_formula(
[torch.ops.testac.triton_relu, torch.ops.testac.triton_relu_backward]
)
def triton_relu_flops(inp_shape, *args, **kwargs):
return prod(inp_shape)
def f(x, ws):
x = torch.ops.testac.triton_relu(x)
for w in ws:
x = torch.ops.testac.triton_relu(torch.mm(x, w))
return x.sum()
x = torch.randn(512, 512, requires_grad=True, device="cuda")
ws = [
torch.randn(512, 512, requires_grad=True, device="cuda") for _ in range(5)
]
def call():
return f(x, ws)
expected = call()
for budget in range(11):
memory_budget = budget / 10
torch._dynamo.reset()
with config.patch(activation_memory_budget=memory_budget):
if memory_budget is not None:
f_compile = torch.compile(
call, backend="aot_eager_decomp_partition"
)
self.assertEqual(expected, f_compile())
def test_prioritize_cheaper_matmul(self):
def f(xs, ws):
xs = [torch.mm(x, w).cos() for x, w in zip(xs, ws)]
return sum(x.sum() for x in xs)
x1, w1 = create_pair(1, 4)
x2, w2 = create_pair(2, 2)
def call():
return f([x1, x2], [w1, w2])
eager_mem, eager_flops = get_mem_and_flops(call)
self.assertEqual(eager_mem, 8)
self.assertEqual(eager_flops, 24)
comp_mem, comp_flops = get_mem_and_flops(call, memory_budget=0.5)
self.assertEqual(comp_mem, 4)
# We are recomputing x1 @ w1 here!
self.assertEqual(comp_flops, eager_flops + 4)
@config.patch(activation_memory_budget_runtime_estimator="profile")
def test_profile(self):
def f(x, ws):
x = x.cos()
for w in ws:
x = torch.mm(x, w).cos()
return x.sum()
x = torch.randn(512, 512, requires_grad=True)
ws = [torch.randn(512, 512, requires_grad=True) for _ in range(5)]
def call():
return f(x, ws)
_, eager_flops = get_mem_and_flops(call)
mem, flops = get_mem_and_flops(call, memory_budget=0.2)
# We start saving the matmuls
self.assertEqual(mem, 2)
self.assertEqual(flops, eager_flops + 3)
def test_prioritize_cheaper_matmul2(self):
def f(xs, ws):
xs = [torch.mm(x, w).cos() for x, w in zip(xs, ws)]
return sum(x.sum() for x in xs)
data = [(4, 4), (6, 2), (2, 6)]
xs, ws = zip(*[create_pair(a, b) for a, b in data])
def call():
return f(xs, ws)
eager_mem, eager_flops = get_mem_and_flops(call)
self.assertEqual(eager_mem, 40)
self.assertEqual(eager_flops, 320)
mem, flops = get_mem_and_flops(call, memory_budget=28 / eager_mem)
# Save w1 and w2
self.assertEqual(mem, 28)
# We're recomputing w3 (the cheap one!)
self.assertEqual(flops - eager_flops, 2 * 2 * 6)
mem, flops = get_mem_and_flops(call, memory_budget=16 / eager_mem)
# Save w2. Note that even though saving w1 gets us closer to our memory
# limit, w2 is actually *more* FLOPs than w1!
self.assertEqual(mem, 12)
self.assertEqual(flops - eager_flops, 2 * 2 * 6 + 4 * 4 * 4)
def test_attention_vs_linear(self):
def f(x, w):
orig_shape = x.shape
x = x.reshape(1, 1, x.shape[0], x.shape[1])
# I know this isn't technically right lol
x = torch.nn.functional.scaled_dot_product_attention(
x, x, x, is_causal=False
).reshape(*orig_shape)
x = torch.mm(x, w)
x = x.cos()
return x.sum()
def try_seq_length(S, D, expected_recompute):
x = torch.randn(S * 512, D * 512, requires_grad=True)
w = torch.randn(D * 512, D * 512, requires_grad=True)
def call():
return f(x, w)
with FlopCounterMode(display=False) as mode:
call()
mm_flops = mode.get_flop_counts()["Global"][torch.ops.aten.mm]
attn_flops = mode.get_total_flops() - mm_flops
mm_flops /= 512**3 * 2
attn_flops /= 512**3 * 2
eager_mem, eager_flops = get_mem_and_flops(call)
self.assertEqual(eager_mem, S * D * 2)
mem, flops = get_mem_and_flops(
call, memory_budget=0.6
) # Force it to recompute one of mm or attn
self.assertEqual(mem, S * D)
if expected_recompute == "attn":
expected_flops = attn_flops
else:
expected_flops = mm_flops
self.assertEqual(flops - eager_flops, expected_flops)
# General behind this test is that if sequence length * 2 > D, then
# attention is more expensive than the linear.
try_seq_length(1, 1, "mm")
try_seq_length(1, 3, "attn")
try_seq_length(2, 2, "mm")
try_seq_length(2, 1, "mm")
try_seq_length(2, 5, "attn")
try_seq_length(4, 7, "mm")
try_seq_length(4, 9, "attn")
def test_manual_ac(self):
# test that manual checkpoint boundaries are respected
# when autoac is set
def f(x):
tmp1 = torch.matmul(x, x.T)
tmp1 = torch.matmul(tmp1, tmp1)
tmp1 = torch.matmul(tmp1, tmp1)
out = torch.matmul(tmp1, x)
return out
def g(x):
x = checkpoint(f, x, use_reentrant=False)
x = checkpoint(f, x, use_reentrant=False)
return x
x = torch.randn(64, 1024, requires_grad=True)
def call():
return g(x).sum()
eager_mem, eager_flops = get_mem_and_flops(call)
# give the memory budget logic a value that should cause it to run,
# but not recompute the matmuls
mem, flops = get_mem_and_flops(call, memory_budget=0.01)
self.assertEqual(mem, eager_mem)
self.assertEqual(flops, eager_flops)
if __name__ == "__main__":
# I'm using the cuda memory allocator to verify memory allocations
if HAS_CUDA_AND_TRITON and not TEST_WITH_ROCM:
run_tests()
| MemoryBudgetTest |
python | mlflow__mlflow | mlflow/store/artifact/databricks_tracking_artifact_repo.py | {
"start": 611,
"end": 4167
} | class ____(ArtifactRepository, ABC):
"""
Base artifact repository for interacting with tracking artifacts in a Databricks workspace.
If operations using the Databricks SDK fail for any reason, this repository automatically
falls back to using the `DatabricksArtifactRepository`, ensuring operational resilience.
This is an abstract base class that should be extended by specific tracking artifact
repositories (e.g., for runs, logged models, etc.).
"""
def __init__(
self, artifact_uri: str, tracking_uri: str | None = None, registry_uri: str | None = None
) -> None:
super().__init__(artifact_uri, tracking_uri, registry_uri)
m = self._get_uri_regex().search(artifact_uri)
if not m:
raise MlflowException.invalid_parameter_value(
f"Invalid artifact URI: {artifact_uri}. Expected URI of the form "
f"{self._get_expected_uri_format()}"
)
experiment_id = m.group("experiment_id")
relative_path = m.group("relative_path") or ""
root_path = self._build_root_path(experiment_id, m, relative_path)
self.databricks_sdk_repo = DatabricksSdkArtifactRepository(root_path)
self.databricks_artifact_repo = DatabricksArtifactRepository(artifact_uri)
@abstractmethod
def _get_uri_regex(self) -> re.Pattern[str]:
"""Return the regex pattern for matching URIs of this type."""
@abstractmethod
def _get_expected_uri_format(self) -> str:
"""Return a description of the expected URI format."""
@abstractmethod
def _build_root_path(self, experiment_id: str, match: re.Match, relative_path: str) -> str:
"""Build the root path for the Databricks SDK repository."""
def log_artifact(self, local_file: str, artifact_path: str | None = None) -> None:
try:
self.databricks_sdk_repo.log_artifact(local_file, artifact_path)
except Exception as e:
_logger.debug(
_FALLBACK_MESSAGE_TEMPLATE.format(operation="log_artifact") % str(e),
exc_info=True,
)
self.databricks_artifact_repo.log_artifact(local_file, artifact_path)
def log_artifacts(self, local_dir: str, artifact_path: str | None = None) -> None:
try:
self.databricks_sdk_repo.log_artifacts(local_dir, artifact_path)
except Exception as e:
_logger.debug(
_FALLBACK_MESSAGE_TEMPLATE.format(operation="log_artifacts") % str(e),
exc_info=True,
)
self.databricks_artifact_repo.log_artifacts(local_dir, artifact_path)
def list_artifacts(self, path: str | None = None) -> list[FileInfo]:
try:
return self.databricks_sdk_repo.list_artifacts(path)
except Exception as e:
_logger.debug(
_FALLBACK_MESSAGE_TEMPLATE.format(operation="list_artifacts") % str(e),
exc_info=True,
)
return self.databricks_artifact_repo.list_artifacts(path)
def _download_file(self, remote_file_path: str, local_path: str) -> None:
try:
self.databricks_sdk_repo._download_file(remote_file_path, local_path)
except Exception as e:
_logger.debug(
_FALLBACK_MESSAGE_TEMPLATE.format(operation="download_file") % str(e),
exc_info=True,
)
self.databricks_artifact_repo._download_file(remote_file_path, local_path)
| DatabricksTrackingArtifactRepository |
python | gevent__gevent | src/gevent/tests/test__socketpair.py | {
"start": 78,
"end": 951
} | class ____(unittest.TestCase):
def test_makefile(self):
msg = b'hello world'
x, y = socket.socketpair()
x.sendall(msg)
x.close()
with y.makefile('rb') as f:
read = f.read()
self.assertEqual(msg, read)
y.close()
@unittest.skipUnless(hasattr(socket, 'fromfd'),
'Needs socket.fromfd')
def test_fromfd(self):
msg = b'hello world'
x, y = socket.socketpair()
xx = socket.fromfd(x.fileno(), x.family, socket.SOCK_STREAM)
x.close()
yy = socket.fromfd(y.fileno(), y.family, socket.SOCK_STREAM)
y.close()
xx.sendall(msg)
xx.close()
with yy.makefile('rb') as f:
read = f.read()
self.assertEqual(msg, read)
yy.close()
if __name__ == '__main__':
unittest.main()
| TestSocketpair |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_value_counts.py | {
"start": 776,
"end": 7876
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.value_counts"
value_keys = ("sort", "collate")
default_kwarg_values = {"sort": "value", "collate": None}
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict[str, str],
metric_value_kwargs: Dict[str, Optional[str]],
**kwargs,
) -> pd.Series:
sort: str = metric_value_kwargs.get("sort") or cls.default_kwarg_values["sort"]
collate: Optional[str] = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'") # noqa: TRY003 # FIXME CoP
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset") # noqa: TRY003 # FIXME CoP
df: pd.DataFrame
accessor_domain_kwargs: Dict[str, str]
df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column: str = accessor_domain_kwargs["column"]
counts: pd.Series = df[column].value_counts()
if sort == "value":
try:
counts.sort_index(inplace=True)
except TypeError:
# Having values of multiple types in a object dtype column (e.g., strings and floats) # noqa: E501 # FIXME CoP
# raises a TypeError when the sorting method performs comparisons.
# Related to the noqa E721 below: numpy / pandas implements equality, see https://github.com/astral-sh/ruff/issues/9570
if df[column].dtype == object:
counts.index = counts.index.astype(str)
counts.sort_index(inplace=True)
elif sort == "counts":
counts.sort_values(inplace=True)
counts.name = "count"
counts.index.name = "value"
return counts
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: Dict[str, str],
metric_value_kwargs: Dict[str, Optional[str]],
**kwargs,
) -> pd.Series:
sort: str = metric_value_kwargs.get("sort") or cls.default_kwarg_values["sort"]
collate: Optional[str] = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'") # noqa: TRY003 # FIXME CoP
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset") # noqa: TRY003 # FIXME CoP
selectable: sqlalchemy.Selectable
accessor_domain_kwargs: Dict[str, str]
selectable, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column: str = accessor_domain_kwargs["column"]
query: sqlalchemy.Select
if hasattr(sa.column(column), "is_not"):
query = (
sa.select(
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
)
.where(sa.column(column).is_not(None))
.group_by(sa.column(column))
)
else:
query = (
sa.select(
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
)
.where(sa.column(column).isnot(None))
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results: List[sqlalchemy.Row] = execution_engine.execute_query( # type: ignore[assignment] # FIXME CoP
query.select_from(selectable) # type: ignore[arg-type] # FIXME CoP
).fetchall()
# Numpy does not always infer the correct DataTypes for SqlAlchemy Row, so we cannot use vectorized approach. # noqa: E501 # FIXME CoP
series = pd.Series(
data=[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict[str, str],
metric_value_kwargs: Dict[str, Optional[str]],
**kwargs,
) -> pd.Series:
sort: str = metric_value_kwargs.get("sort") or cls.default_kwarg_values["sort"]
collate: Optional[str] = metric_value_kwargs.get(
"collate", cls.default_kwarg_values["collate"]
)
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'") # noqa: TRY003 # FIXME CoP
if collate is not None:
raise ValueError("collate parameter is not supported in SparkDFDataset") # noqa: TRY003 # FIXME CoP
df: pyspark.DataFrame
accessor_domain_kwargs: Dict[str, str]
df, _, accessor_domain_kwargs = execution_engine.get_compute_domain(
metric_domain_kwargs, MetricDomainTypes.COLUMN
)
column: str = accessor_domain_kwargs["column"]
value_counts_df: pyspark.DataFrame = (
df.select(column).where(F.col(column).isNotNull()).groupBy(column).count()
)
if sort == "value":
value_counts_df = value_counts_df.orderBy(column)
elif sort == "count":
value_counts_df = value_counts_df.orderBy(F.desc("count"))
value_counts: List[pyspark.Row] = value_counts_df.collect()
# Numpy does not always infer the correct DataTypes for Spark df, so we cannot use vectorized approach. # noqa: E501 # FIXME CoP
values: Iterable[Any]
counts: Iterable[int]
if len(value_counts) > 0:
values, counts = zip(*value_counts, strict=False)
else:
values = []
counts = []
series = pd.Series(
counts,
index=pd.Index(data=values, name="value"),
name="count",
)
return series
| ColumnValueCounts |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/pipes_tests/in_process_client.py | {
"start": 1874,
"end": 2152
} | class ____(dg.PipesContextInjector):
@contextmanager
def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:
yield {}
def no_messages_debug_text(self) -> str:
return "In-process context injection."
| InProcessContextInjector |
python | getsentry__sentry | src/sentry/api/endpoints/auth_index.py | {
"start": 1914,
"end": 5447
} | class ____(Endpoint):
"""
Base endpoint to manage session authentication. Shared between
AuthIndexEndpoint and StaffAuthIndexEndpoint (in getsentry)
"""
owner = ApiOwner.ENTERPRISE
authentication_classes = (QuietBasicAuthentication, SessionAuthentication)
permission_classes = ()
def get(self, request: Request) -> Response:
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
user = promote_request_rpc_user(request)
return Response(serialize(user, user, DetailedSelfUserSerializer()))
@staticmethod
def _reauthenticate_with_sso(request: Request, org_id: int) -> None:
"""
If a user without a password is hitting this, it means they need to re-identify with SSO.
"""
redirect = request.META.get("HTTP_REFERER", None)
if not url_has_allowed_host_and_scheme(redirect, allowed_hosts=(request.get_host(),)):
redirect = None
initiate_login(request, redirect)
organization_context = organization_service.get_organization_by_id(
id=org_id, include_teams=False, include_projects=False
)
assert organization_context, "Failed to fetch organization in _reauthenticate_with_sso"
raise SsoRequired(
organization=organization_context.organization,
request=request,
after_login_redirect=redirect,
)
@staticmethod
def _verify_user_via_inputs(validator: AuthVerifyValidator, request: Request) -> bool:
assert request.user.is_authenticated
# See if we have a u2f challenge/response
if "challenge" in validator.validated_data and "response" in validator.validated_data:
try:
interface = Authenticator.objects.get_interface(request.user, "u2f")
assert isinstance(interface, U2fInterface)
if not interface.is_enrolled():
raise LookupError()
challenge = json.loads(validator.validated_data["challenge"])
response = json.loads(validator.validated_data["response"])
authenticated = interface.validate_response(request, challenge, response)
if not authenticated:
logger.warning(
"u2f_authentication.verification_failed",
extra={"user": request.user.id},
)
else:
metrics.incr("auth.2fa.success", sample_rate=1.0, skip_internal=False)
return authenticated
except ValueError as err:
logger.warning(
"u2f_authentication.value_error",
extra={"user": request.user.id, "error_message": err},
)
except LookupError:
logger.warning(
"u2f_authentication.interface_not_enrolled",
extra={"validated_data": validator.validated_data, "user": request.user.id},
)
# attempt password authentication
elif "password" in validator.validated_data:
authenticated = promote_request_rpc_user(request).check_password(
validator.validated_data["password"]
)
if authenticated:
metrics.incr("auth.password.success", sample_rate=1.0, skip_internal=False)
return authenticated
return False
@control_silo_endpoint
| BaseAuthIndexEndpoint |
python | ray-project__ray | rllib/callbacks/tests/test_callbacks_on_env_runner.py | {
"start": 3271,
"end": 3687
} | class ____(RLlibCallback):
def on_episode_created(
self,
*,
episode,
worker=None,
env_runner=None,
metrics_logger=None,
base_env=None,
env=None,
policies=None,
rl_module=None,
env_index: int,
**kwargs,
) -> None:
print("Some code here to test the expected error on new API stack!")
| OnEpisodeCreatedCallback |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_contextlib.py | {
"start": 1812,
"end": 3849
} | class ____(__TestCase):
def test_enter(self):
with torch._dynamo.error_on_graph_break(False):
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
manager = DefaultEnter()
self.assertIs(manager.__enter__(), manager)
def test_slots(self):
with torch._dynamo.error_on_graph_break(False):
class DefaultContextManager(AbstractContextManager):
__slots__ = ()
def __exit__(self, *args):
super().__exit__(*args)
with self.assertRaises(AttributeError):
DefaultContextManager().var = 42
def test_exit_is_abstract(self):
with torch._dynamo.error_on_graph_break(False):
class MissingExit(AbstractContextManager):
pass
with self.assertRaises(TypeError):
MissingExit()
def test_structural_subclassing(self):
with torch._dynamo.error_on_graph_break(False):
class ManagerFromScratch:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return None
self.assertTrue(issubclass(ManagerFromScratch, AbstractContextManager))
with torch._dynamo.error_on_graph_break(False):
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
self.assertTrue(issubclass(DefaultEnter, AbstractContextManager))
with torch._dynamo.error_on_graph_break(False):
class NoEnter(ManagerFromScratch):
__enter__ = None
self.assertFalse(issubclass(NoEnter, AbstractContextManager))
with torch._dynamo.error_on_graph_break(False):
class NoExit(ManagerFromScratch):
__exit__ = None
self.assertFalse(issubclass(NoExit, AbstractContextManager))
| TestAbstractContextManager |
python | walkccc__LeetCode | solutions/2276. Count Integers in Intervals/2276.py | {
"start": 42,
"end": 678
} | class ____:
def __init__(self):
self.intervals = SortedDict()
self.cnt = 0
def add(self, left: int, right: int) -> None:
while self._isOverlapped(left, right):
i = self.intervals.bisect_right(right) - 1
l, r = self.intervals.popitem(i)
left = min(left, l)
right = max(right, r)
self.cnt -= r - l + 1
self.intervals[left] = right
self.cnt += right - left + 1
def count(self) -> int:
return self.cnt
def _isOverlapped(self, left: int, right: int) -> bool:
i = self.intervals.bisect_right(right)
return i > 0 and self.intervals.peekitem(i - 1)[1] >= left
| CountIntervals |
python | tiangolo__fastapi | docs_src/openapi_callbacks/tutorial001.py | {
"start": 124,
"end": 234
} | class ____(BaseModel):
id: str
title: Union[str, None] = None
customer: str
total: float
| Invoice |
python | PyCQA__pylint | tests/functional/p/pragma_after_backslash.py | {
"start": 112,
"end": 277
} | class ____:
"""block-disable test"""
def meth3(self):
"""test one line disabling"""
print(self.bla) \
# pylint: disable=no-member
| Foo |
python | automl__auto-sklearn | test/test_metric/test_metrics.py | {
"start": 14880,
"end": 24719
} | class ____(unittest.TestCase):
def test_unsupported_task_type(self):
y_true = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
y_pred = np.array(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
)
scorer = autosklearn.metrics.accuracy
raised = False
try:
calculate_scores(y_true, y_pred, 6, scorer)
except NotImplementedError:
raised = True
self.assertTrue(raised)
def test_classification_scoring_functions(self):
scoring_functions = list(autosklearn.metrics.CLASSIFICATION_METRICS.values())
scoring_functions.remove(autosklearn.metrics.accuracy)
fail_metrics = ["precision_samples", "recall_samples", "f1_samples"]
success_metrics = list(autosklearn.metrics.CLASSIFICATION_METRICS.keys())
for metric in fail_metrics:
success_metrics.remove(metric)
y_true = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
y_pred = np.array(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
)
score_dict = calculate_scores(
y_true,
y_pred,
BINARY_CLASSIFICATION,
[autosklearn.metrics.accuracy],
scoring_functions=scoring_functions,
)
self.assertIsInstance(score_dict, dict)
self.assertTrue(len(success_metrics), len(score_dict))
for metric in fail_metrics:
self.assertNotIn(metric, score_dict.keys())
for metric in success_metrics:
self.assertIn(metric, score_dict.keys())
self.assertAlmostEqual(
autosklearn.metrics.CLASSIFICATION_METRICS[metric]._optimum,
score_dict[metric],
)
def test_regression_scoring_functions(self):
scoring_functions = list(autosklearn.metrics.REGRESSION_METRICS.values())
scoring_functions.remove(autosklearn.metrics.root_mean_squared_error)
metrics = list(autosklearn.metrics.REGRESSION_METRICS.keys())
metrics.remove("mean_squared_log_error")
y_true = np.array([1, 2, 3, -4])
y_pred = y_true.copy()
score_dict = calculate_scores(
y_true,
y_pred,
REGRESSION,
[autosklearn.metrics.root_mean_squared_error],
scoring_functions=scoring_functions,
)
self.assertIsInstance(score_dict, dict)
self.assertTrue(len(metrics), len(score_dict))
for metric in metrics:
self.assertIn(metric, score_dict.keys())
self.assertAlmostEqual(
autosklearn.metrics.REGRESSION_METRICS[metric]._optimum,
score_dict[metric],
)
def test_classification_only_metric(self):
y_true = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
y_pred = np.array(
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
)
scorer = autosklearn.metrics.accuracy
score = calculate_scores(y_true, y_pred, BINARY_CLASSIFICATION, [scorer])[
"accuracy"
]
previous_score = scorer._optimum
self.assertAlmostEqual(score, previous_score)
def test_regression_only_metric(self):
y_true = np.array([1, 2, 3, 4])
y_pred = y_true.copy()
scorer = autosklearn.metrics.root_mean_squared_error
score = calculate_scores(y_true, y_pred, REGRESSION, [scorer])[
"root_mean_squared_error"
]
previous_score = scorer._optimum
self.assertAlmostEqual(score, previous_score)
def test_calculate_losses():
# In a 0-1 ranged scorer, make sure that the loss
# has an expected positive value
y_pred = np.array([0, 1, 0, 1, 1, 1, 0, 0, 0, 0])
y_true = np.array([0, 1, 0, 1, 1, 0, 0, 0, 0, 0])
score = sklearn.metrics.accuracy_score(y_true, y_pred)
assert {"accuracy": pytest.approx(score)} == calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
)
assert {"accuracy": pytest.approx(1.0 - score)} == calculate_losses(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
)
# Test two metrics
score_dict = calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[
autosklearn.metrics.accuracy,
autosklearn.metrics.balanced_accuracy,
],
)
expected_score_dict = {
"accuracy": 0.9,
"balanced_accuracy": 0.9285714285714286,
}
loss_dict = calculate_losses(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[
autosklearn.metrics.accuracy,
autosklearn.metrics.balanced_accuracy,
],
)
for expected_metric, expected_score in expected_score_dict.items():
assert pytest.approx(expected_score) == score_dict[expected_metric]
assert pytest.approx(1 - expected_score) == loss_dict[expected_metric]
# Test no metric
with pytest.raises(
ValueError, match="Number of metrics to compute must be greater than zero."
):
calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[],
)
with pytest.raises(
ValueError, match="Number of metrics to compute must be greater than zero."
):
calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[],
scoring_functions=[
autosklearn.metrics.accuracy,
autosklearn.metrics.balanced_accuracy,
],
)
# Test the same metric twice
accuracy_fixture = {"accuracy": pytest.approx(0.9)}
assert accuracy_fixture == calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy, autosklearn.metrics.accuracy],
)
assert accuracy_fixture == calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
scoring_functions=[autosklearn.metrics.accuracy],
)
assert accuracy_fixture == calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
scoring_functions=[autosklearn.metrics.accuracy, autosklearn.metrics.accuracy],
)
# Test the same name for multiple metrics!
bogus_accuracy = autosklearn.metrics.make_scorer(
"accuracy",
score_func=sklearn.metrics.roc_auc_score,
)
with pytest.raises(ValueError, match="used multiple times"):
calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy, bogus_accuracy],
)
# Test additional scoring functions
score_dict = calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
scoring_functions=[
autosklearn.metrics.accuracy,
autosklearn.metrics.balanced_accuracy,
],
)
expected_score_dict = {
"accuracy": 0.9,
"balanced_accuracy": 0.9285714285714286,
}
loss_dict = calculate_losses(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metrics=[autosklearn.metrics.accuracy],
scoring_functions=[
autosklearn.metrics.accuracy,
autosklearn.metrics.balanced_accuracy,
],
)
for expected_metric, expected_score in expected_score_dict.items():
assert pytest.approx(expected_score) == score_dict[expected_metric]
assert pytest.approx(1 - expected_score) == loss_dict[expected_metric]
# Lastly make sure that metrics whose optimum is zero
# are also properly working
y_true = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
y_pred = np.array([0.11, 0.22, 0.33, 0.44, 0.55, 0.66])
score = sklearn.metrics.mean_squared_error(y_true, y_pred)
assert {"mean_squared_error": pytest.approx(0 - score)} == calculate_scores(
solution=y_true,
prediction=y_pred,
task_type=REGRESSION,
metrics=[autosklearn.metrics.mean_squared_error],
)
assert {"mean_squared_error": pytest.approx(score)} == calculate_losses(
solution=y_true,
prediction=y_pred,
task_type=REGRESSION,
metrics=[autosklearn.metrics.mean_squared_error],
)
def test_calculate_metric():
# metric to be maximized
y_pred = np.array([0, 1, 0, 1, 1, 1, 0, 0, 0, 0])
y_true = np.array([0, 1, 0, 1, 1, 0, 0, 0, 0, 0])
score = sklearn.metrics.accuracy_score(y_true, y_pred)
assert pytest.approx(score) == compute_single_metric(
solution=y_true,
prediction=y_pred,
task_type=BINARY_CLASSIFICATION,
metric=autosklearn.metrics.accuracy,
)
# metric to be minimized
y_true = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
y_pred = np.array([0.11, 0.22, 0.33, 0.44, 0.55, 0.66])
score = sklearn.metrics.mean_squared_error(y_true, y_pred)
assert pytest.approx(score) == compute_single_metric(
solution=y_true,
prediction=y_pred,
task_type=REGRESSION,
metric=autosklearn.metrics.mean_squared_error,
)
| TestCalculateScore |
python | huggingface__transformers | src/transformers/models/sam3_tracker/processing_sam3_tracker.py | {
"start": 1511,
"end": 23564
} | class ____(ProcessorMixin):
r"""
Constructs a SAM3_TRACKER processor which wraps a SAM3_TRACKER image processor and an 2D points & Bounding boxes processor into a
single processor.
[`Sam3TrackerProcessor`] offers all the functionalities of [`Sam3TrackerImageProcessorFast`] and [`Sam3TrackerVideoProcessor`]. See the docstring of
[`~Sam3TrackerImageProcessorFast.__call__`] and [`~Sam3TrackerVideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam3TrackerImageProcessorFast`):
An instance of [`Sam3TrackerImageProcessorFast`].
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input points.
"""
def __init__(self, image_processor, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs):
super().__init__(image_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def __call__(
self,
images: Optional[ImageInput] = None,
segmentation_maps: Optional[ImageInput] = None,
input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None,
input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
original_sizes: Optional[Union[list[list[float]], torch.Tensor]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
r"""
This method uses [`Sam3TrackerImageProcessorFast.__call__`] method to prepare image(s) for the model. It also prepares 2D
points and bounding boxes for the model if they are provided.
Args:
images (`ImageInput`, *optional*):
The image(s) to process.
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to process.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_sizes (`list[list[float]]`, `torch.Tensor`, *optional*):
The original sizes of the images.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return.
**kwargs:
Additional keyword arguments to pass to the image processor.
Returns:
A [`BatchEncoding`] with the following fields:
- `pixel_values` (`torch.Tensor`): The processed image(s).
- `original_sizes` (`list[list[float]]`): The original sizes of the images.
- `labels` (`torch.Tensor`): The processed segmentation maps (if provided).
- `input_points` (`torch.Tensor`): The processed points.
- `input_labels` (`torch.Tensor`): The processed labels.
- `input_boxes` (`torch.Tensor`): The processed bounding boxes.
"""
if images is not None:
encoding_image_processor = self.image_processor(
images,
segmentation_maps=segmentation_maps,
return_tensors=return_tensors,
**kwargs,
)
elif original_sizes is not None:
if isinstance(original_sizes, torch.Tensor):
original_sizes = original_sizes.cpu().tolist()
encoding_image_processor = BatchEncoding({"original_sizes": original_sizes}, tensor_type=return_tensors)
else:
raise ValueError("Either images or original_sizes must be provided")
# pop arguments that are not used in the forward but used nevertheless
original_sizes = encoding_image_processor["original_sizes"]
# Check original_sizes is of length 1 or len(images)
if images is not None and len(original_sizes) != 1 and len(original_sizes) != len(images):
raise ValueError(
"original_sizes must be of length 1 or len(images). If you are passing a single image, you must pass a single original_size."
)
# Process input points, labels, and boxes if provided
if input_points is not None or input_labels is not None or input_boxes is not None:
# Validate and convert inputs to standardized format
processed_points = self._validate_single_input(
input_points,
expected_depth=4,
input_name="points",
expected_format="[image level, object level, point level, point coordinates]",
expected_coord_size=2,
)
processed_labels = self._validate_single_input(
input_labels,
expected_depth=3,
input_name="labels",
expected_format="[image level, object level, point level]",
)
processed_boxes = self._validate_single_input(
input_boxes,
expected_depth=3,
input_name="boxes",
expected_format="[image level, box level, box coordinates]",
expected_coord_size=4,
)
# Get padding requirements for all inputs
if processed_points is not None:
points_max_dims = self._get_nested_dimensions(processed_points)[:3]
if processed_labels is not None:
labels_max_dims = self._get_nested_dimensions(processed_labels)[:3]
if processed_boxes is not None:
boxes_max_dims = self._get_nested_dimensions(processed_boxes)[:2]
# Ensure points and labels have consistent dimensions
if processed_points is not None and processed_labels is not None:
if points_max_dims != labels_max_dims:
raise ValueError(
"Input points and labels have inconsistent dimensions. Please ensure they have the same dimensions."
)
# Check that boxes don't need padding (model limitation)
if processed_boxes is not None and len(processed_boxes) >= 2:
if any(len(img_boxes) < boxes_max_dims[1] for img_boxes in processed_boxes):
raise ValueError(
"Input boxes have inconsistent dimensions that would require padding, "
"but boxes cannot be padded due to model limitations. "
"Please ensure all images have the same number of boxes."
)
# Pad and normalize all inputs to final tensor format
if processed_points is not None:
padded_points = self._pad_nested_list(processed_points, points_max_dims + [2])
final_points = torch.tensor(padded_points, dtype=torch.float32)
self._normalize_tensor_coordinates(final_points, original_sizes, preserve_padding=True)
encoding_image_processor.update({"input_points": final_points})
if processed_labels is not None:
padded_labels = self._pad_nested_list(processed_labels, labels_max_dims)
final_labels = torch.tensor(padded_labels, dtype=torch.int64)
encoding_image_processor.update({"input_labels": final_labels})
if processed_boxes is not None:
final_boxes = torch.tensor(processed_boxes, dtype=torch.float32)
self._normalize_tensor_coordinates(final_boxes, original_sizes, is_bounding_box=True)
encoding_image_processor.update({"input_boxes": final_boxes})
return encoding_image_processor
def _normalize_coordinates(
self, target_size: int, coords: "torch.Tensor", original_size, is_bounding_box=False
) -> "torch.Tensor":
"""
Expects a numpy array of length 2 in the final dimension. Requires the original image size in (H, W) format.
Args:
target_size (`int`):
The target size of the image.
coords (`torch.Tensor`):
The coordinates to be normalized.
original_size (`tuple`):
The original size of the image.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether the coordinates are bounding boxes.
"""
old_h, old_w = original_size
new_h, new_w = target_size, target_size
coords = deepcopy(coords).float()
if is_bounding_box:
coords = coords.reshape(-1, 2, 2)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
coords = coords.reshape(-1, 4)
return coords
def _convert_to_nested_list(self, data, expected_depth, current_depth=0):
"""
Recursively convert various input formats (tensors, numpy arrays, lists) to nested lists.
Args:
data: Input data in any format
expected_depth: Expected nesting depth
current_depth: Current depth in recursion
Returns:
Nested list representation of the data
"""
if data is None:
return None
# Convert tensor/numpy to list if we're at a leaf level or if it's a multi-dimensional array
if isinstance(data, torch.Tensor): # PyTorch tensor
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small tensor
return data.numpy().tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, np.ndarray): # NumPy array
if current_depth == expected_depth - 2 or len(data.shape) <= 2: # At coordinate level or small array
return data.tolist()
else:
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, list):
if current_depth == expected_depth:
# We've reached the expected depth, return as is
return data
else:
# Continue recursion
return [self._convert_to_nested_list(item, expected_depth, current_depth + 1) for item in data]
elif isinstance(data, (int, float)):
return data
else:
raise TypeError(f"Unsupported data type: {type(data)}")
def _get_nested_dimensions(self, nested_list, max_dims=None):
"""
Get the maximum dimensions at each level of nesting.
Args:
nested_list (`list`):
Nested list structure.
max_dims (`list`, *optional*):
Current maximum dimensions (for recursion).
Returns:
`list`: A list of maximum dimensions for each nesting level.
"""
if max_dims is None:
max_dims = []
if not isinstance(nested_list, list):
return max_dims
if len(max_dims) == 0:
max_dims.append(len(nested_list))
else:
max_dims[0] = max(max_dims[0], len(nested_list))
if len(nested_list) > 0:
for item in nested_list:
if isinstance(item, list):
sub_dims = self._get_nested_dimensions(item)
# Merge sub_dims into max_dims
for i, dim in enumerate(sub_dims):
if i + 1 >= len(max_dims):
max_dims.append(dim)
else:
max_dims[i + 1] = max(max_dims[i + 1], dim)
return max_dims
def _pad_nested_list(self, nested_list, target_dims, current_level=0, pad_value=None):
"""
Recursively pad a nested list to match target dimensions.
Args:
nested_list (`list`):
Nested list to pad.
target_dims (`list`):
Target dimensions for each level.
current_level (`int`, *optional*, defaults to 0):
Current nesting level.
pad_value (`int`, *optional*):
Value to use for padding.
Returns:
`list`: The padded nested list.
"""
if pad_value is None:
pad_value = self.point_pad_value
if current_level >= len(target_dims):
return nested_list
# Ensure we have a list
if not isinstance(nested_list, list):
nested_list = [nested_list]
# Pad current level
current_size = len(nested_list)
target_size = target_dims[current_level]
# Pad with appropriate values
if current_level == len(target_dims) - 1:
# At the coordinate level, pad with pad_value
nested_list.extend([pad_value] * (target_size - current_size))
else:
# At higher levels, pad with nested structures
if current_size > 0:
# Create appropriately sized template
if current_level < len(target_dims) - 2:
# For non-coordinate levels, create empty nested structure
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
else:
# For coordinate level, create list of pad_values
template = [pad_value] * target_dims[current_level + 1]
nested_list.extend([deepcopy(template) for _ in range(target_size - current_size)])
else:
# Create from scratch
template_dims = target_dims[current_level + 1 :]
template = self._create_empty_nested_structure(template_dims, pad_value)
nested_list.extend([deepcopy(template) for _ in range(target_size)])
# Recursively pad sublists
if current_level < len(target_dims) - 1:
for i in range(len(nested_list)):
if isinstance(nested_list[i], list):
nested_list[i] = self._pad_nested_list(nested_list[i], target_dims, current_level + 1, pad_value)
return nested_list
def _create_empty_nested_structure(self, dims, pad_value):
"""
Create an empty nested structure with given dimensions filled with pad_value.
Args:
dims (`list`):
The dimensions of the nested structure.
pad_value (`int`):
The value to fill the structure with.
"""
if len(dims) == 1:
return [pad_value] * dims[0]
else:
return [self._create_empty_nested_structure(dims[1:], pad_value) for _ in range(dims[0])]
def _get_nesting_level(self, input_list):
"""
Get the nesting level of a list structure.
Args:
input_list (`list`):
The list to get the nesting level of.
"""
if isinstance(input_list, list):
if len(input_list) == 0:
return 1
return 1 + self._get_nesting_level(input_list[0])
elif isinstance(input_list, (np.ndarray, torch.Tensor)):
# For arrays/tensors, the nesting level is the number of dimensions
return len(input_list.shape)
return 0
def _validate_single_input(
self,
data: Union[torch.Tensor, np.ndarray, list],
expected_depth: int,
input_name: str,
expected_format: str,
expected_coord_size: Optional[int] = None,
) -> list:
"""
Validate a single input by ensuring proper nesting and raising an error if the input is not valid.
Args:
data (`torch.Tensor`, `np.ndarray`, or `list`):
Input data to process.
expected_depth (`int`):
Expected nesting depth.
input_name (`str`):
Name of the input for error messages.
expected_format (`str`):
The expected format of the input.
expected_coord_size (`int`, *optional*):
Expected coordinate size (2 for points, 4 for boxes, None for labels).
.
"""
if data is None:
return None
# Handle tensors and numpy arrays first
if isinstance(data, (torch.Tensor, np.ndarray)):
# For tensors/arrays, we can directly check the number of dimensions
if data.ndim != expected_depth:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_depth} dimensions. The expected nesting format is {expected_format}. Got {data.ndim} dimensions."
)
elif expected_coord_size is not None:
if data.shape[-1] != expected_coord_size:
raise ValueError(
f"Input {input_name} must be a tensor/array with {expected_coord_size} as the last dimension, got {data.shape[-1]}."
)
return self._convert_to_nested_list(data, expected_depth)
# Handle nested lists
if isinstance(data, list):
current_depth = self._get_nesting_level(data)
if current_depth != expected_depth:
raise ValueError(
f"Input {input_name} must be a nested list with {expected_depth} levels. The expected nesting format is {expected_format}. Got {current_depth} levels."
)
return self._convert_to_nested_list(data, expected_depth)
def _normalize_tensor_coordinates(self, tensor, original_sizes, is_bounding_box=False, preserve_padding=False):
"""
Helper method to normalize coordinates in a tensor across multiple images.
Args:
tensor (`torch.Tensor`):
Input tensor with coordinates.
original_sizes (`list`):
Original image sizes.
is_bounding_box (`bool`, *optional*, defaults to `False`):
Whether coordinates are bounding boxes.
preserve_padding (`bool`, *optional*, defaults to `False`):
Whether to preserve padding values (for points).
"""
if preserve_padding:
# For points: avoid normalizing pad values
mask = tensor != self.point_pad_value
coord_mask = mask.all(dim=-1, keepdim=True)
for img_idx in range(len(original_sizes)):
if img_idx < tensor.shape[0]:
original_size = original_sizes[img_idx] if img_idx < len(original_sizes) else original_sizes[0]
normalized_coords = self._normalize_coordinates(
self.target_size, tensor[img_idx], original_size, is_bounding_box=is_bounding_box
)
if preserve_padding:
# Only update non-padded values
img_mask = coord_mask[img_idx]
tensor[img_idx] = torch.where(
img_mask.expand_as(tensor[img_idx]), normalized_coords, tensor[img_idx]
)
else:
tensor[img_idx] = normalized_coords
def post_process_masks(
self,
masks,
original_sizes,
mask_threshold=0.0,
binarize=True,
max_hole_area=0.0,
max_sprinkle_area=0.0,
apply_non_overlapping_constraints=False,
**kwargs,
):
"""
Remove padding and upscale masks to the original image size.
Args:
masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
The original sizes of each image before it was resized to the model's expected input shape, in (height,
width) format.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold for binarization and post-processing operations.
binarize (`bool`, *optional*, defaults to `True`):
Whether to binarize the masks.
max_hole_area (`float`, *optional*, defaults to 0.0):
The maximum area of a hole to fill.
max_sprinkle_area (`float`, *optional*, defaults to 0.0):
The maximum area of a sprinkle to fill.
apply_non_overlapping_constraints (`bool`, *optional*, defaults to `False`):
Whether to apply non-overlapping constraints to the masks.
Returns:
(`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
is given by original_size.
"""
return self.image_processor.post_process_masks(
masks,
original_sizes,
mask_threshold,
binarize,
max_hole_area,
max_sprinkle_area,
apply_non_overlapping_constraints,
**kwargs,
)
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return list(image_processor_input_names + ["original_sizes"])
__all__ = ["Sam3TrackerProcessor"]
| Sam3TrackerProcessor |
python | getsentry__sentry | src/sentry/preprod/analytics.py | {
"start": 652,
"end": 883
} | class ____(analytics.Event):
organization_id: int
project_id: int
user_id: int | None = None
artifact_id: str
@analytics.eventclass("preprod_artifact.api.get_build_details")
| PreprodArtifactApiSizeAnalysisDownloadEvent |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_issue_timeseries.py | {
"start": 334,
"end": 18032
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-issue-timeseries"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.slug,))
self.end = datetime.now(tz=timezone.utc)
self.just_before_now = self.end - timedelta(microseconds=100)
self.start = self.just_before_now - timedelta(hours=1)
self.project1 = self.create_project(teams=[self.team], slug="foo")
self.project2 = self.create_project(teams=[self.team], slug="bar")
self.releases = [
self.create_release(self.project1, version="1.0.0"),
self.create_release(self.project2, version="1.1.0"),
self.create_release(self.project2, version="1.2.0"),
self.create_release(self.project2, version="1.3.0"),
self.create_release(self.project2, version="1.4.0"),
self.create_release(self.project2, version="1.5.0"),
]
# Release issues.
self.create_group(
project=self.project1,
status=0,
first_seen=self.end,
first_release=self.releases[0],
type=1,
)
self.create_group(
project=self.project1,
status=1,
first_seen=self.start,
first_release=self.releases[0],
type=2,
)
self.create_group(
project=self.project2,
status=1,
first_seen=self.end,
first_release=self.releases[1],
type=3,
)
self.create_group(
project=self.project2,
status=2,
first_seen=self.end,
first_release=self.releases[1],
type=4,
)
self.create_group(
project=self.project2,
status=2,
first_seen=self.end,
first_release=self.releases[1],
type=FeedbackGroup.type_id,
)
# Time based issues.
self.create_group(project=self.project1, status=0, first_seen=self.end, type=1)
self.create_group(
project=self.project1,
status=1,
first_seen=self.just_before_now,
resolved_at=self.end,
type=2,
)
self.create_group(
project=self.project2,
status=1,
first_seen=self.start,
resolved_at=self.start + timedelta(microseconds=100),
type=3,
)
self.create_group(project=self.project2, status=2, first_seen=self.start, type=4)
self.create_group(
project=self.project2, status=2, first_seen=self.start, type=FeedbackGroup.type_id
)
def do_request(self, data: dict[str, Any], url: str | None = None) -> Any:
return self.client.get(self.url if url is None else url, data=data, format="json")
def test_get_invalid_interval(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "0",
"category": "issue",
"yAxis": "count(new_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 400
assert response.json() == {"detail": "Interval cannot result in a zero duration."}
def test_get_too_much_granularity(self) -> None:
response = self.do_request(
{
"statsPeriod": "14d",
"interval": "1001",
"category": "issue",
"yAxis": "count(new_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 400
assert response.json() == {"detail": "Invalid Interval"}
def test_get_invalid_category(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "foo",
"yAxis": "count(new_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 400
assert response.json() == {
"detail": "Invalid issue category. Valid options are 'issue' and 'feedback'."
}
def test_get_invalid_groupby(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(new_issues)",
"groupBy": ["foo"],
},
)
assert response.status_code == 400
assert response.json() == {"detail": "The only supported groupBy is currently release."}
def test_get_new_issues(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(new_issues)",
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 1
timeseries = response.data["timeSeries"][0]
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 3,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 5,
},
]
assert timeseries["meta"] == {
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_get_resolved_issues(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(resolved_issues)",
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 1
timeseries = response.data["timeSeries"][0]
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 1,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 1,
},
]
assert timeseries["meta"] == {
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_get_new_and_resolved(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": ["count(new_issues)", "count(resolved_issues)"],
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 2
timeseries = response.data["timeSeries"][0]
assert timeseries["yAxis"] == "count(new_issues)"
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 3,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 5,
},
]
assert timeseries["meta"] == {
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
timeseries = response.data["timeSeries"][1]
assert timeseries["yAxis"] == "count(resolved_issues)"
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 1,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 1,
},
]
assert timeseries["meta"] == {
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_groupby_release(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(new_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 2
timeseries = response.data["timeSeries"][0]
assert timeseries["groupBy"] == [{"key": "release", "value": "1.0.0"}]
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 1,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 1,
},
]
assert timeseries["meta"] == {
"isOther": False,
"order": 0,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
timeseries = response.data["timeSeries"][1]
assert timeseries["groupBy"] == [{"key": "release", "value": "1.1.0"}]
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 0,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 2,
},
]
assert timeseries["meta"] == {
"isOther": False,
"order": 1,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_get_feedback(self) -> None:
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "feedback",
"yAxis": "count(new_issues)",
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "feedback",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 1
timeseries = response.data["timeSeries"][0]
assert len(timeseries["values"]) == 2
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 1,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 1,
},
]
assert timeseries["meta"] == {
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_other_with_new_issues(self) -> None:
# Release issues.
for release in self.releases:
self.create_group(
project=self.project1, status=0, first_seen=self.end, first_release=release, type=1
)
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(new_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 6
values = [(1, 2), (0, 3), (0, 1), (0, 1), (0, 1)]
for index, timeseries in enumerate(response.data["timeSeries"][:5]):
assert timeseries["yAxis"] == "count(new_issues)"
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": values[index][0],
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": values[index][1],
},
], index
assert timeseries["meta"] == {
"isOther": False,
"order": index,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
timeseries = response.data["timeSeries"][-1]
assert timeseries["yAxis"] == "count(new_issues)"
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 0,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 1,
},
]
assert timeseries["meta"] == {
"isOther": True,
"order": 5,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
def test_other_with_resolved_issues(self) -> None:
# Release issues.
for release in self.releases:
self.create_group(
project=self.project2,
status=1,
first_seen=self.start,
resolved_at=self.start + timedelta(microseconds=100),
first_release=release,
type=3,
)
response = self.do_request(
{
"start": self.start,
"end": self.end,
"interval": "1h",
"category": "issue",
"yAxis": "count(resolved_issues)",
"groupBy": ["release"],
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "issue",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 6
values = [(2, 0), (1, 1), (1, 0), (1, 0), (1, 0)]
for index, timeseries in enumerate(response.data["timeSeries"][:5]):
assert timeseries["yAxis"] == "count(resolved_issues)"
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": values[index][0],
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": values[index][1],
},
], index
assert timeseries["meta"] == {
"isOther": False,
"order": index,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
timeseries = response.data["timeSeries"][-1]
assert timeseries["yAxis"] == "count(resolved_issues)"
assert timeseries["values"] == [
{
"incomplete": False,
"timestamp": self.start.timestamp() * 1000,
"value": 1,
},
{
"incomplete": False,
"timestamp": self.just_before_now.timestamp() * 1000,
"value": 0,
},
]
assert timeseries["meta"] == {
"isOther": True,
"order": 5,
"valueType": "integer",
"valueUnit": None,
"interval": 3_600_000,
}
| OrganizationIssueMetricsTestCase |
python | langchain-ai__langchain | libs/langchain/langchain_classic/memory/entity.py | {
"start": 1959,
"end": 2753
} | class ____(BaseEntityStore):
"""In-memory Entity store."""
store: dict[str, str | None] = {}
@override
def get(self, key: str, default: str | None = None) -> str | None:
return self.store.get(key, default)
@override
def set(self, key: str, value: str | None) -> None:
self.store[key] = value
@override
def delete(self, key: str) -> None:
del self.store[key]
@override
def exists(self, key: str) -> bool:
return key in self.store
@override
def clear(self) -> None:
return self.store.clear()
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
| InMemoryEntityStore |
python | google__pytype | pytype/tests/test_abc1.py | {
"start": 129,
"end": 3631
} | class ____(test_base.BaseTest):
"""Tests for @abc.abstractmethod."""
def test_instantiate_pyi_abstract_class(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import abc
class Example(metaclass=abc.ABCMeta):
@abc.abstractmethod
def foo(self) -> None: ...
""",
)
errors = self.CheckWithErrors(
"""
import foo
foo.Example() # not-instantiable[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"foo\.Example.*foo"})
def test_stray_abstractmethod(self):
errors = self.CheckWithErrors("""
import abc
class Example: # ignored-abstractmethod[e]
@abc.abstractmethod
def foo(self):
pass
""")
self.assertErrorRegexes(errors, {"e": r"foo.*Example"})
def test_multiple_inheritance_implementation_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import abc
class Interface(metaclass=abc.ABCMeta):
@abc.abstractmethod
def foo(self): ...
class X(Interface): ...
class Implementation(Interface):
def foo(self) -> int: ...
class Foo(X, Implementation): ...
""",
)
self.Check(
"""
import foo
foo.Foo().foo()
""",
pythonpath=[d.path],
)
def test_multiple_inheritance_error_pyi(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import abc
class X: ...
class Interface(metaclass=abc.ABCMeta):
@abc.abstractmethod
def foo(self): ...
class Foo(X, Interface): ...
""",
)
errors = self.CheckWithErrors(
"""
import foo
foo.Foo().foo() # not-instantiable[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"foo\.Foo.*foo"})
def test_abc_metaclass_from_decorator(self):
with test_utils.Tempdir() as d:
d.create_file(
"six.pyi",
"""
from typing import TypeVar, Callable
T = TypeVar('T')
def add_metaclass(metaclass: type) -> Callable[[T], T]: ...
""",
)
self.Check(
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Foo:
@abc.abstractmethod
def foo(self):
pass
""",
pythonpath=[d.path],
)
def test_abc_child_metaclass(self):
with test_utils.Tempdir() as d:
d.create_file(
"six.pyi",
"""
from typing import TypeVar, Callable
T = TypeVar('T')
def add_metaclass(metaclass: type) -> Callable[[T], T]: ...
""",
)
self.Check(
"""
import abc
import six
class ABCChild(abc.ABCMeta):
pass
@six.add_metaclass(ABCChild)
class Foo:
@abc.abstractmethod
def foo(self):
pass
""",
pythonpath=[d.path],
)
def test_misplaced_abstractproperty(self):
errors = self.CheckWithErrors("""
import abc
@abc.abstractproperty
class Example:
pass
Example() # not-callable[e]
""")
self.assertErrorRegexes(errors, {"e": r"'abstractproperty' object"})
if __name__ == "__main__":
test_base.main()
| AbstractMethodTests |
python | ray-project__ray | python/ray/serve/tests/test_config_files/grpc_deployment.py | {
"start": 244,
"end": 1779
} | class ____:
def __call__(self, user_message):
greeting = f"Hello {user_message.name} from {user_message.foo}"
num_x2 = user_message.num * 2
user_response = serve_pb2.UserDefinedResponse(
greeting=greeting,
num_x2=num_x2,
)
return user_response
def Method1(self, user_message):
greeting = f"Hello {user_message.name} from method1"
num_x2 = user_message.num * 3
user_response = serve_pb2.UserDefinedResponse(
greeting=greeting,
num_x2=num_x2,
)
return user_response
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, model_id: str) -> str:
return f"loading model: {model_id}"
async def Method2(self, user_message):
model_id = serve.get_multiplexed_model_id()
model = await self.get_model(model_id)
user_response = serve_pb2.UserDefinedResponse(
greeting=f"Method2 called model, {model}",
)
return user_response
def Streaming(self, user_message):
for i in range(10):
greeting = f"{i}: Hello {user_message.name} from {user_message.foo}"
num_x2 = user_message.num * 2 + i
user_response = serve_pb2.UserDefinedResponse(
greeting=greeting,
num_x2=num_x2,
)
yield user_response
g = GrpcDeployment.options(name="grpc-deployment").bind()
@serve.deployment(ray_actor_options={"num_cpus": 0})
| GrpcDeployment |
python | pennersr__django-allauth | allauth/socialaccount/providers/gitea/provider.py | {
"start": 217,
"end": 771
} | class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("html_url")
def get_avatar_url(self):
return self.account.extra_data.get("avatar_url")
def to_str(self):
dflt = super(GiteaAccount, self).to_str()
return next(
value
for value in (
self.account.extra_data.get("username", None),
self.account.extra_data.get("login", None),
dflt,
)
if value is not None
)
| GiteaAccount |
python | Pylons__pyramid | tests/test_csrf.py | {
"start": 1739,
"end": 3306
} | class ____(unittest.TestCase):
def _makeOne(self, **kw):
from pyramid.csrf import SessionCSRFStoragePolicy
return SessionCSRFStoragePolicy(**kw)
def test_register_session_csrf_policy(self):
from pyramid.csrf import SessionCSRFStoragePolicy
from pyramid.interfaces import ICSRFStoragePolicy
config = Configurator()
config.set_csrf_storage_policy(self._makeOne())
config.commit()
policy = config.registry.queryUtility(ICSRFStoragePolicy)
self.assertTrue(isinstance(policy, SessionCSRFStoragePolicy))
def test_it_creates_a_new_token(self):
request = DummyRequest(session={})
policy = self._makeOne()
policy._token_factory = lambda: 'foo'
self.assertEqual(policy.get_csrf_token(request), 'foo')
def test_get_csrf_token_returns_the_new_token(self):
request = DummyRequest(session={'_csrft_': 'foo'})
policy = self._makeOne()
self.assertEqual(policy.get_csrf_token(request), 'foo')
token = policy.new_csrf_token(request)
self.assertNotEqual(token, 'foo')
self.assertEqual(token, policy.get_csrf_token(request))
def test_check_csrf_token(self):
request = DummyRequest(session={})
policy = self._makeOne()
self.assertFalse(policy.check_csrf_token(request, 'foo'))
request.session = {'_csrft_': 'foo'}
self.assertTrue(policy.check_csrf_token(request, 'foo'))
self.assertFalse(policy.check_csrf_token(request, 'bar'))
| TestSessionCSRFStoragePolicy |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_compat.py | {
"start": 1701,
"end": 2966
} | class ____:
x: Union[int, "Bar"] | None
Bar.__signature__ = signature(Bar).replace( # type: ignore
parameters=[
Parameter(
"x",
Parameter.POSITIONAL_OR_KEYWORD,
# ruff reports a false-positive UP007 here, since int | ForwardRef("Bar")
# errors on 3.10. We can change this once we drop 3.10.
#
# see also https://github.com/astral-sh/ruff/issues/20883
annotation=Union[int, ForwardRef("Bar"), None], # type: ignore # noqa: UP007
)
]
)
@pytest.mark.parametrize("obj,expected", [(Foo, Foo | None), (Bar, int | Bar | None)])
def test_resolve_fwd_refs(obj, expected):
# See: https://github.com/HypothesisWorks/hypothesis/issues/3519
assert get_type_hints(obj)["x"] == expected
def func(a, b: int, *c: str, d: int | None = None):
pass
@pytest.mark.parametrize(
"pf, names",
[
(partial(func, 1), "b c d"),
(partial(func, 1, 2), "c d"),
(partial(func, 1, 2, 3, 4, 5), "c d"), # varargs don't fill
(partial(func, 1, 2, 3, d=4), "c d"), # kwonly args just get new defaults
],
)
def test_get_hints_through_partial(pf, names):
assert set(get_type_hints(pf)) == set(names.split())
@dataclass
| Bar |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/test_component_scaffolding.py | {
"start": 1096,
"end": 5394
} | class ____(dg.Scaffolder[BaseModel]):
@classmethod
def get_scaffold_params(cls) -> type[BaseModel]:
return NoParams
@dg.scaffold_with(TestScaffolderWithDefaults)
def fn_with_scaffolder_with_defaults() -> None: ...
@dg.scaffold_with(TestScaffolderWithoutDefaults)
def fn_with_scaffolder_without_defaults() -> None: ...
@dg.scaffold_with(NoParamsScaffolder)
def fn_with_no_params_scaffolder() -> None: ...
def test_parse_params_model_no_params() -> None:
"""Test when json_params is None."""
assert (
parse_params_model(obj=fn_with_scaffolder_with_defaults, json_params=None)
== TestParamsModelWithDefaults()
)
with pytest.raises(ValidationError) as exc_info:
assert exc_info
assert parse_params_model(obj=fn_with_scaffolder_without_defaults, json_params=None)
assert parse_params_model(obj=fn_with_no_params_scaffolder, json_params=None) == NoParams()
def test_parse_params_model_no_scaffolder() -> None:
"""Test when object has no scaffolder."""
def no_scaffolder_fn() -> None: ...
with pytest.raises(Exception) as exc_info:
parse_params_model(obj=no_scaffolder_fn, json_params='{"name": "test", "age": 30}')
assert "must be decorated with @scaffold_with" in str(exc_info.value)
def test_parse_params_model_valid_params() -> None:
"""Test when valid JSON params are provided."""
result = parse_params_model(
obj=fn_with_scaffolder_with_defaults, json_params='{"name": "test", "age": 30}'
)
assert isinstance(result, TestParamsModelWithDefaults)
assert result.name == "test"
assert result.age == 30
def test_parse_params_model_empty_params() -> None:
"""Test when no JSON params are provided but scaffolder accepts params."""
result = parse_params_model(obj=fn_with_scaffolder_with_defaults, json_params="{}")
assert isinstance(result, TestParamsModelWithDefaults)
assert result.name is None
assert result.age is None
def test_parse_params_model_without_defaults_empty_params() -> None:
"""Test when no JSON params are provided and model has required fields."""
with pytest.raises(ValidationError) as exc_info:
parse_params_model(obj=fn_with_scaffolder_without_defaults, json_params="{}")
assert "validation error" in str(exc_info.value).lower()
assert "field required" in str(exc_info.value).lower()
def test_parse_params_model_without_defaults_valid_params() -> None:
"""Test when valid JSON params are provided for model with required fields."""
result = parse_params_model(
obj=fn_with_scaffolder_without_defaults,
json_params='{"name": "test", "age": 30, "is_active": true}',
)
assert isinstance(result, TestParamsModelWithoutDefaults)
assert result.name == "test"
assert result.age == 30
assert result.is_active is True
def test_parse_params_model_no_params_but_provided() -> None:
"""Test when scaffolder doesn't accept params but JSON params are provided."""
with pytest.raises(Exception) as exc_info:
parse_params_model(
obj=fn_with_no_params_scaffolder, json_params='{"name": "test", "age": 30}'
)
assert "Input should be null" in str(exc_info.value)
def test_parse_params_model_validation_error() -> None:
"""Test when JSON params fail validation."""
with pytest.raises(ValidationError) as exc_info:
parse_params_model(
obj=fn_with_scaffolder_with_defaults, json_params='{"name": "test", "age": "invalid"}'
)
assert "validation error" in str(exc_info.value).lower()
def test_parse_params_model_invalid_json() -> None:
"""Test when invalid JSON is provided."""
with pytest.raises(ValidationError) as exc_info:
parse_params_model(obj=fn_with_scaffolder_with_defaults, json_params="invalid json")
assert "invalid json" in str(exc_info.value).lower()
def test_scaffold_object():
with temp_code_location_bar():
scaffold_object(
Path("bar/components/qux"),
"dagster_test.components.SimplePipesScriptComponent",
'{"asset_key": "my_asset", "filename": "my_asset.py"}',
"yaml",
project_root=None,
)
assert Path("bar/components/qux/my_asset.py").exists()
| NoParamsScaffolder |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 32976,
"end": 33665
} | class ____:
"""Report from a replica on ongoing requests.
Args:
replica_id: The replica ID of the replica.
aggregated_metrics: A map of metric name to the aggregated value over the past
look_back_period_s seconds at the replica.
metrics: A map of metric name to the list of values running at that replica
over the past look_back_period_s seconds. This is a list because
we take multiple measurements over time.
timestamp: The time at which this report was created.
"""
replica_id: ReplicaID
aggregated_metrics: Dict[str, float]
metrics: Dict[str, TimeSeries]
timestamp: float
| ReplicaMetricReport |
python | python-excel__xlrd | xlrd/formula.py | {
"start": 26018,
"end": 28870
} | class ____(object):
"""
Used in evaluating formulas.
The following table describes the kinds and how their values
are represented.
.. raw:: html
<table border="1" cellpadding="7">
<tr>
<th>Kind symbol</th>
<th>Kind number</th>
<th>Value representation</th>
</tr>
<tr>
<td>oBOOL</td>
<td align="center">3</td>
<td>integer: 0 => False; 1 => True</td>
</tr>
<tr>
<td>oERR</td>
<td align="center">4</td>
<td>None, or an int error code (same as XL_CELL_ERROR in the Cell class).
</td>
</tr>
<tr>
<td>oMSNG</td>
<td align="center">5</td>
<td>Used by Excel as a placeholder for a missing (not supplied) function
argument. Should *not* appear as a final formula result. Value is None.</td>
</tr>
<tr>
<td>oNUM</td>
<td align="center">2</td>
<td>A float. Note that there is no way of distinguishing dates.</td>
</tr>
<tr>
<td>oREF</td>
<td align="center">-1</td>
<td>The value is either None or a non-empty list of
absolute Ref3D instances.<br>
</td>
</tr>
<tr>
<td>oREL</td>
<td align="center">-2</td>
<td>The value is None or a non-empty list of
fully or partially relative Ref3D instances.
</td>
</tr>
<tr>
<td>oSTRG</td>
<td align="center">1</td>
<td>A Unicode string.</td>
</tr>
<tr>
<td>oUNK</td>
<td align="center">0</td>
<td>The kind is unknown or ambiguous. The value is None</td>
</tr>
</table>
"""
#: None means that the actual value of the operand is a variable
#: (depends on cell data), not a constant.
value = None
#: oUNK means that the kind of operand is not known unambiguously.
kind = oUNK
#: The reconstituted text of the original formula. Function names will be
#: in English irrespective of the original language, which doesn't seem
#: to be recorded anywhere. The separator is ",", not ";" or whatever else
#: might be more appropriate for the end-user's locale; patches welcome.
text = '?'
def __init__(self, akind=None, avalue=None, arank=0, atext='?'):
if akind is not None:
self.kind = akind
if avalue is not None:
self.value = avalue
self.rank = arank
# rank is an internal gizmo (operator precedence);
# it's used in reconstructing formula text.
self.text = atext
def __repr__(self):
kind_text = okind_dict.get(self.kind, "?Unknown kind?")
return "Operand(kind=%s, value=%r, text=%r)" \
% (kind_text, self.value, self.text)
| Operand |
python | pypa__installer | tests/test_utils.py | {
"start": 974,
"end": 1466
} | class ____:
@pytest.mark.parametrize(
"string, expected",
[
# Noop
(
"package-1",
"package-1",
),
# PEP 508 canonicalization
(
"ABC..12",
"abc-12",
),
],
)
def test_valid_cases(self, string, expected):
got = canonicalize_name(string)
assert expected == got, (expected, got)
| TestCanonicalizeDistributionName |
python | pypa__packaging | tests/test_tags.py | {
"start": 5129,
"end": 5765
} | class ____:
def test_sys_implementation_name(self, monkeypatch: pytest.MonkeyPatch) -> None:
class MockImplementation:
pass
mock_implementation = MockImplementation()
mock_implementation.name = "sillywalk" # type: ignore[attr-defined]
monkeypatch.setattr(sys, "implementation", mock_implementation, raising=False)
assert tags.interpreter_name() == "sillywalk"
def test_interpreter_short_names(
self, mock_interpreter_name: Callable[[str], bool]
) -> None:
mock_interpreter_name("cpython")
assert tags.interpreter_name() == "cp"
| TestInterpreterName |
python | tqdm__tqdm | tests/tests_contrib_logging.py | {
"start": 2123,
"end": 3100
} | class ____:
def test_should_return_none_for_no_handlers(self):
assert _get_first_found_console_logging_handler([]) is None
def test_should_return_none_without_stream_handler(self):
handler = logging.handlers.MemoryHandler(capacity=1)
assert _get_first_found_console_logging_handler([handler]) is None
def test_should_return_none_for_stream_handler_not_stdout_or_stderr(self):
handler = logging.StreamHandler(StringIO())
assert _get_first_found_console_logging_handler([handler]) is None
def test_should_return_stream_handler_if_stream_is_stdout(self):
handler = logging.StreamHandler(sys.stdout)
assert _get_first_found_console_logging_handler([handler]) == handler
def test_should_return_stream_handler_if_stream_is_stderr(self):
handler = logging.StreamHandler(sys.stderr)
assert _get_first_found_console_logging_handler([handler]) == handler
| TestGetFirstFoundConsoleLoggingHandler |
python | protocolbuffers__protobuf | python/google/protobuf/text_format.py | {
"start": 2510,
"end": 13075
} | class ____(object):
def __init__(self, as_utf8):
self._writer = io.StringIO()
def write(self, val):
return self._writer.write(val)
def close(self):
return self._writer.close()
def getvalue(self):
return self._writer.getvalue()
def MessageToString(
message,
as_utf8=_as_utf8_default,
as_one_line=False,
use_short_repeated_primitives=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
double_format=None,
use_field_number=False,
descriptor_pool=None,
indent=0,
message_formatter=None,
print_unknown_fields=False,
force_colon=False) -> str:
"""Convert protobuf message to text format.
Double values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using double_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, double_format='.17g' should be used.
Args:
message: The protocol buffers message.
as_utf8: Return unescaped Unicode for non-ASCII characters.
as_one_line: Don't introduce newlines between fields.
use_short_repeated_primitives: Use short repeated format for primitives.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, fields of a proto message will be printed using
the order defined in source code instead of the field number, extensions
will be printed at the end of the message and their relative order is
determined by the extension number. By default, use the field number
order.
float_format (str): Deprecated. If set, use this to specify float field
formatting (per the "Format Specification Mini-Language"); otherwise,
shortest float that has same value in wire will be printed. Also affect
double field if double_format is not set but float_format is set.
double_format (str): Deprecated. If set, use this to specify double field
formatting (per the "Format Specification Mini-Language"); if it is not
set but float_format is set, use float_format. Otherwise, use ``str()``
use_field_number: If True, print field numbers instead of names.
descriptor_pool (DescriptorPool): Descriptor pool used to resolve Any types.
indent (int): The initial indent level, in terms of spaces, for pretty
print.
message_formatter (function(message, indent, as_one_line) -> unicode|None):
Custom formatter for selected sub-messages (usually based on message
type). Use to pretty print parts of the protobuf for easier diffing.
print_unknown_fields: If True, unknown fields will be printed.
force_colon: If set, a colon will be added after the field name even if the
field is a proto message.
Returns:
str: A string of the text formatted protocol buffer message.
"""
out = TextWriter(as_utf8)
printer = _Printer(
out,
indent,
as_utf8,
as_one_line,
use_short_repeated_primitives,
pointy_brackets,
use_index_order,
float_format,
double_format,
use_field_number,
descriptor_pool,
message_formatter,
print_unknown_fields=print_unknown_fields,
force_colon=force_colon)
printer.PrintMessage(message)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def MessageToBytes(message, **kwargs) -> bytes:
"""Convert protobuf message to encoded text format. See MessageToString."""
text = MessageToString(message, **kwargs)
if isinstance(text, bytes):
return text
codec = 'utf-8' if kwargs.get('as_utf8') else 'ascii'
return text.encode(codec)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def _IsGroupLike(field):
"""Determines if a field is consistent with a proto2 group.
Args:
field: The field descriptor.
Returns:
True if this field is group-like, false otherwise.
"""
# Groups are always tag-delimited.
if field.type != descriptor.FieldDescriptor.TYPE_GROUP:
return False
# Group fields always are always the lowercase type name.
if field.name != field.message_type.name.lower():
return False
if field.message_type.file != field.file:
return False
# Group messages are always defined in the same scope as the field. File
# level extensions will compare NULL == NULL here, which is why the file
# comparison above is necessary to ensure both come from the same file.
return (
field.message_type.containing_type == field.extension_scope
if field.is_extension
else field.message_type.containing_type == field.containing_type
)
def PrintMessage(message,
out,
indent=0,
as_utf8=_as_utf8_default,
as_one_line=False,
use_short_repeated_primitives=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
double_format=None,
use_field_number=False,
descriptor_pool=None,
message_formatter=None,
print_unknown_fields=False,
force_colon=False):
"""Convert the message to text format and write it to the out stream.
Args:
message: The Message object to convert to text format.
out: A file handle to write the message to.
indent: The initial indent level for pretty print.
as_utf8: Return unescaped Unicode for non-ASCII characters.
as_one_line: Don't introduce newlines between fields.
use_short_repeated_primitives: Use short repeated format for primitives.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify float field formatting
(per the "Format Specification Mini-Language"); otherwise, shortest
float that has same value in wire will be printed. Also affect double
field if double_format is not set but float_format is set.
double_format: If set, use this to specify double field formatting
(per the "Format Specification Mini-Language"); if it is not set but
float_format is set, use float_format. Otherwise, str() is used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
message_formatter: A function(message, indent, as_one_line): unicode|None
to custom format selected sub-messages (usually based on message type).
Use to pretty print parts of the protobuf for easier diffing.
print_unknown_fields: If True, unknown fields will be printed.
force_colon: If set, a colon will be added after the field name even if
the field is a proto message.
"""
printer = _Printer(
out=out, indent=indent, as_utf8=as_utf8,
as_one_line=as_one_line,
use_short_repeated_primitives=use_short_repeated_primitives,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format,
double_format=double_format,
use_field_number=use_field_number,
descriptor_pool=descriptor_pool,
message_formatter=message_formatter,
print_unknown_fields=print_unknown_fields,
force_colon=force_colon)
printer.PrintMessage(message)
def PrintField(field,
value,
out,
indent=0,
as_utf8=_as_utf8_default,
as_one_line=False,
use_short_repeated_primitives=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
double_format=None,
message_formatter=None,
print_unknown_fields=False,
force_colon=False):
"""Print a single field name/value pair."""
printer = _Printer(out, indent, as_utf8, as_one_line,
use_short_repeated_primitives, pointy_brackets,
use_index_order, float_format, double_format,
message_formatter=message_formatter,
print_unknown_fields=print_unknown_fields,
force_colon=force_colon)
printer.PrintField(field, value)
def PrintFieldValue(field,
value,
out,
indent=0,
as_utf8=_as_utf8_default,
as_one_line=False,
use_short_repeated_primitives=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
double_format=None,
message_formatter=None,
print_unknown_fields=False,
force_colon=False):
"""Print a single field value (not including name)."""
printer = _Printer(out, indent, as_utf8, as_one_line,
use_short_repeated_primitives, pointy_brackets,
use_index_order, float_format, double_format,
message_formatter=message_formatter,
print_unknown_fields=print_unknown_fields,
force_colon=force_colon)
printer.PrintFieldValue(field, value)
def _BuildMessageFromTypeName(type_name, descriptor_pool):
"""Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name.
"""
# pylint: disable=g-import-not-at-top
if descriptor_pool is None:
from google.protobuf import descriptor_pool as pool_mod
descriptor_pool = pool_mod.Default()
from google.protobuf import message_factory
try:
message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)
except KeyError:
return None
message_type = message_factory.GetMessageClass(message_descriptor)
return message_type()
# These values must match WireType enum in //google/protobuf/wire_format.h.
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
| TextWriter |
python | tiangolo__fastapi | scripts/people.py | {
"start": 1736,
"end": 1837
} | class ____(BaseModel):
totalCount: int
nodes: list[DiscussionsCommentsNode]
| DiscussionsComments |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_text.py | {
"start": 23378,
"end": 28718
} | class ____(Data2VecTextPreTrainedModel):
_no_split_modules = ["Data2VecTextEmbeddings", "Data2VecTextLayer"]
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = Data2VecTextEmbeddings(config)
self.encoder = Data2VecTextEncoder(config)
self.pooler = Data2VecTextPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
seq_length = input_ids.shape[1]
else:
device = inputs_embeds.device
seq_length = inputs_embeds.shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
| Data2VecTextModel |
python | numba__numba | numba/tests/test_polynomial.py | {
"start": 1826,
"end": 4481
} | class ____(TestPolynomialBase):
def assert_no_domain_change(self, name, cfunc, args):
msg = name + "() argument must not cause a domain change."
self.assert_error(cfunc, args, msg)
@needs_lapack
def test_roots(self):
cfunc = jit(nopython=True)(roots_fn)
default_resolution = np.finfo(np.float64).resolution
def check(a, **kwargs):
expected = roots_fn(a, **kwargs)
got = cfunc(a, **kwargs)
# eigen decomposition used so type specific impl
# will be used in numba whereas a wide type impl
# will be used in numpy, so compare using a more
# fuzzy comparator
if a.dtype in self.dtypes:
resolution = np.finfo(a.dtype).resolution
else:
# this is for integer types when roots() will cast to float64
resolution = default_resolution
np.testing.assert_allclose(
expected,
got,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, **kwargs)
# test vectors in real space
# contrived examples to trip branches
r_vectors = (
np.array([1]),
np.array([1, 3, 2]),
np.array([0, 0, 0]),
np.array([1, 6, 11, 6]),
np.array([0, 0, 0, 1, 3, 2]),
np.array([1, 1, 0, 0, 0]),
np.array([0, 0, 1, 0, 0, 0])
)
# test loop real space
for v, dtype in \
product(r_vectors, [np.int32, np.int64] + list(self.dtypes)):
a = v.astype(dtype)
check(a)
c_vectors = (
np.array([1 + 1j]),
np.array([1, 3 + 1j, 2]),
np.array([0, 0 + 0j, 0]),
np.array([1, 6 + 1j, 11, 6]),
np.array([0, 0, 0, 1 + 1j, 3, 2]),
np.array([1 + 1j, 1, 0, 0, 0]),
np.array([0, 0, 1 + 1j, 0, 0, 0])
)
# test loop complex space
for v, dtype in product(c_vectors, self.dtypes[2:]):
a = v.astype(dtype)
check(a)
# check input with dimension > 1 raises
self.assert_1d_input(cfunc, (np.arange(4.).reshape(2, 2),))
# check real input with complex roots raises
x = np.array([7., 2., 0., 1.])
self.assert_no_domain_change("eigvals", cfunc, (x,))
# but works fine if type conv to complex first
cfunc(x.astype(np.complex128))
| TestPoly1D |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 66626,
"end": 67340
} | class ____(Operation):
def call(self, x):
return backend.numpy.cosh(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.cosh", "keras.ops.numpy.cosh"])
def cosh(x):
"""Hyperbolic cosine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Cosh().symbolic_call(x)
return backend.numpy.cosh(x)
| Cosh |
python | pytorch__pytorch | torchgen/model.py | {
"start": 1683,
"end": 2670
} | class ____(Enum):
function = auto()
method = auto()
# Default kernel namespace
DEFAULT_KERNEL_NAMESPACE = "at::native"
# NOTE: Keep the list in sync with `DispatchKey` in c10/core/DispatchKey.h
BACKEND_COMPONENTS = [
"CPU",
"CUDA",
"HIP",
"XLA",
"MTIA",
"MPS",
"IPU",
"XPU",
"HPU",
"VE",
"Lazy",
"Meta",
"PrivateUse1",
"PrivateUse2",
"PrivateUse3",
]
FUNCTIONALITY_KEYS = [
"",
"Quantized",
"Sparse",
"SparseCsr",
"NestedTensor",
"Autograd",
]
# This list guards dispatches that can be used in derivatives.yaml
# For now we omit AutogradFunctionality and AutogradOther
AUTOGRAD_KEYS = ["AutogradNestedTensor"] + [
"Autograd" + component for component in BACKEND_COMPONENTS
]
FRAGMENT_NAMESPACES = {"quantized", "quantized_decomposed"}
# This doesn't have to be in sync with the header, it only needs to contain
# entries that we actually use in the codegen or want pyi entries for
| Variant |
python | gevent__gevent | src/gevent/testing/flaky.py | {
"start": 1455,
"end": 1662
} | class ____(unittest.SkipTest):
"""
A unittest exception that causes the test to be skipped when raised.
Use this carefully, it is a code smell and indicates an undebugged problem.
"""
| FlakyTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_font02.py | {
"start": 315,
"end": 1819
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_font02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [43945344, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_title({"name": "Title"})
chart.set_x_axis(
{
"name": "XXX",
"name_font": {"bold": 0, "italic": 1},
"num_font": {"size": 11, "bold": 1, "italic": 1},
}
)
chart.set_y_axis(
{
"name": "YYY",
"name_font": {"bold": 1, "italic": 1},
"num_font": {"size": 9, "bold": 0, "italic": 1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | weaviate__weaviate-python-client | weaviate/gql/aggregate.py | {
"start": 426,
"end": 1791
} | class ____:
query: Optional[str]
alpha: Optional[float]
vector: Optional[List[float]]
properties: Optional[List[str]]
target_vectors: Optional[List[str]]
max_vector_distance: Optional[List[str]]
def __init__(self, content: dict) -> None:
self.query = content.get("query")
self.alpha = content.get("alpha")
self.vector = content.get("vector")
self.properties = content.get("properties")
self.target_vectors = content.get("targetVectors")
self.max_vector_distance = content.get("maxVectorDistance")
def __str__(self) -> str:
ret = ""
if self.query is not None:
ret += f"query: {_sanitize_str(self.query)}"
if self.vector is not None:
ret += f", vector: {self.vector}"
if self.alpha is not None:
ret += f", alpha: {self.alpha}"
if self.properties is not None and len(self.properties) > 0:
props = '","'.join(self.properties)
ret += f', properties: ["{props}"]'
if self.target_vectors is not None:
target_vectors = '","'.join(self.target_vectors)
ret += f', targetVectors: ["{target_vectors}"]'
if self.max_vector_distance is not None:
ret += f", maxVectorDistance:{self.max_vector_distance}"
return "hybrid:{" + ret + "}"
| Hybrid |
python | mlflow__mlflow | mlflow/store/artifact/artifact_repo.py | {
"start": 16836,
"end": 18769
} | class ____(ABC):
@abstractmethod
def create_multipart_upload(
self, local_file: str, num_parts: int, artifact_path: str | None = None
) -> CreateMultipartUploadResponse:
"""
Initiate a multipart upload and retrieve the pre-signed upload URLS and upload id.
Args:
local_file: Path of artifact to upload.
num_parts: Number of parts to upload. Only required by S3 and GCS.
artifact_path: Directory within the run's artifact directory in which to upload the
artifact.
"""
@abstractmethod
def complete_multipart_upload(
self,
local_file: str,
upload_id: str,
parts: list[MultipartUploadPart],
artifact_path: str | None = None,
) -> None:
"""
Complete a multipart upload.
Args:
local_file: Path of artifact to upload.
upload_id: The upload ID. Only required by S3 and GCS.
parts: A list containing the metadata of each part that has been uploaded.
artifact_path: Directory within the run's artifact directory in which to upload the
artifact.
"""
@abstractmethod
def abort_multipart_upload(
self,
local_file: str,
upload_id: str,
artifact_path: str | None = None,
) -> None:
"""
Abort a multipart upload.
Args:
local_file: Path of artifact to upload.
upload_id: The upload ID. Only required by S3 and GCS.
artifact_path: Directory within the run's artifact directory in which to upload the
artifact.
"""
def verify_artifact_path(artifact_path):
if artifact_path and path_not_unique(artifact_path):
raise MlflowException(
f"Invalid artifact path: '{artifact_path}'. {bad_path_message(artifact_path)}"
)
| MultipartUploadMixin |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/circular2.py | {
"start": 326,
"end": 358
} | class ____(D):
pass
E.a_attr
| F |
python | gevent__gevent | src/greentest/3.11/test_signal.py | {
"start": 27750,
"end": 31736
} | class ____(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('netbsd5',),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
for _ in support.busy_retry(60.0, error=False):
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
# sig_vtalrm handler stopped this itimer
break
else:
# bpo-8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
for _ in support.busy_retry(60.0, error=False):
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
# sig_prof handler stopped this itimer
break
else:
# bpo-8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_setitimer_tiny(self):
# bpo-30807: C setitimer() takes a microsecond-resolution interval.
# Check that float -> timeval conversion doesn't round
# the interval down to zero, which would disable the timer.
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1e-6)
time.sleep(1)
self.assertEqual(self.hndl_called, True)
| ItimerTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 73372,
"end": 73569
} | class ____(_PrintableStructure):
_fields_ = [
("ucodeType", c_uint8),
("major", c_uint),
("minor", c_uint),
("subMinor", c_uint)
]
| c_nvmlNvlinkFirmwareVersion_t |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py | {
"start": 4996,
"end": 5220
} | class ____(BaseConfig):
bypass_reason: Optional[str] = Field(
default=None, description="Reason why the Metadata `AllowedHosts` check should be skipped for this certified connector."
)
| AllowedHostsConfiguration |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_bigtable.py | {
"start": 25957,
"end": 31207
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_delete_execute(self, mock_hook):
op = BigtableDeleteTableOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, table_id=TABLE_ID
)
@pytest.mark.parametrize(
("missing_attribute", "project_id", "instance_id", "table_id"),
[
("instance_id", PROJECT_ID, "", TABLE_ID),
("table_id", PROJECT_ID, INSTANCE_ID, ""),
],
)
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_empty_attribute(self, mock_hook, missing_attribute, project_id, instance_id, table_id):
with pytest.raises(AirflowException) as ctx:
BigtableDeleteTableOperator(
project_id=project_id,
instance_id=instance_id,
table_id=table_id,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
)
err = ctx.value
assert str(err) == f"Empty parameter: {missing_attribute}"
mock_hook.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_deleting_table_that_doesnt_exists(self, mock_hook):
op = BigtableDeleteTableOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found.")
)
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, table_id=TABLE_ID
)
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_deleting_table_that_doesnt_exists_empty_project_id(self, mock_hook):
op = BigtableDeleteTableOperator(
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.NotFound("Table not found.")
)
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=None, instance_id=INSTANCE_ID, table_id=TABLE_ID
)
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_deleting_table_when_instance_doesnt_exists(self, mock_hook):
op = BigtableDeleteTableOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.get_instance.return_value = None
with pytest.raises(AirflowException) as ctx:
op.execute(None)
err = ctx.value
assert str(err) == f"Dependency: instance '{INSTANCE_ID}' does not exist."
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.assert_not_called()
@mock.patch("airflow.providers.google.cloud.operators.bigtable.BigtableHook")
def test_different_error_reraised(self, mock_hook):
op = BigtableDeleteTableOperator(
project_id=PROJECT_ID,
instance_id=INSTANCE_ID,
table_id=TABLE_ID,
task_id="id",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.side_effect = mock.Mock(
side_effect=google.api_core.exceptions.GoogleAPICallError("error")
)
with pytest.raises(google.api_core.exceptions.GoogleAPICallError):
op.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.delete_table.assert_called_once_with(
project_id=PROJECT_ID, instance_id=INSTANCE_ID, table_id=TABLE_ID
)
| TestBigtableTableDelete |
python | rq__rq | tests/test_callbacks.py | {
"start": 7702,
"end": 11387
} | class ____(RQTestCase):
def test_success_callback(self):
"""Test success callback is executed only when job is successful"""
queue = Queue(connection=self.connection)
worker = SimpleWorker([queue], connection=self.connection)
# Callback is executed when job is successfully executed
job = queue.enqueue(say_hello, on_success=save_result)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(self.connection.get('success_callback:%s' % job.id).decode(), job.return_value())
job = queue.enqueue(div_by_zero, on_success=save_result)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('success_callback:%s' % job.id))
# test string callbacks
job = queue.enqueue(say_hello, on_success=Callback('tests.fixtures.save_result'))
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(self.connection.get('success_callback:%s' % job.id).decode(), job.return_value())
job = queue.enqueue(div_by_zero, on_success=Callback('tests.fixtures.save_result'))
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('success_callback:%s' % job.id))
def test_erroneous_success_callback(self):
"""Test exception handling when executing success callback"""
queue = Queue(connection=self.connection)
worker = Worker([queue], connection=self.connection)
# If success_callback raises an error, job will is considered as failed
job = queue.enqueue(say_hello, on_success=erroneous_callback)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
# test string callbacks
job = queue.enqueue(say_hello, on_success=Callback('tests.fixtures.erroneous_callback'))
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
def test_failure_callback(self):
"""Test failure callback is executed only when job a fails"""
queue = Queue(connection=self.connection)
worker = SimpleWorker([queue], connection=self.connection)
# Callback is executed when job is successfully executed
job = queue.enqueue(div_by_zero, on_failure=save_exception)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
job.refresh()
print(job.exc_info)
self.assertIn('div_by_zero', self.connection.get('failure_callback:%s' % job.id).decode())
job = queue.enqueue(div_by_zero, on_success=save_result)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('failure_callback:%s' % job.id))
# test string callbacks
job = queue.enqueue(div_by_zero, on_failure=Callback('tests.fixtures.save_exception'))
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
job.refresh()
print(job.exc_info)
self.assertIn('div_by_zero', self.connection.get('failure_callback:%s' % job.id).decode())
job = queue.enqueue(div_by_zero, on_success=Callback('tests.fixtures.save_result'))
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertFalse(self.connection.exists('failure_callback:%s' % job.id))
# TODO: add test case for error while executing failure callback
| WorkerCallbackTestCase |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 3303,
"end": 3525
} | class ____(models.Model):
parent = models.ForeignKey(Parent, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
def __str__(self):
return f"{self.name} - child of {self.parent.name}"
| Child |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/model_summary/model_summary_deepspeed.py | {
"start": 1847,
"end": 4691
} | class ____(ModelSummary):
@override
def summarize(self) -> dict[str, DeepSpeedLayerSummary]: # type: ignore[override]
summary = OrderedDict((name, DeepSpeedLayerSummary(module)) for name, module in self.named_modules)
if self._model.example_input_array is not None:
self._forward_example_input()
for layer in summary.values():
layer.detach_hook()
if self._max_depth >= 1:
# remove summary entries with depth > max_depth
for k in [k for k in summary if k.count(".") >= self._max_depth]:
del summary[k]
return summary
@property
@override
def total_parameters(self) -> int:
return sum(deepspeed_param_size(p) if not _tensor_has_shape(p) else 0 for p in self._model.parameters())
@property
@override
def trainable_parameters(self) -> int:
return sum(
deepspeed_param_size(p) if not _tensor_has_shape(p) else 0
for p in self._model.parameters()
if p.requires_grad
)
@property
def parameters_per_layer(self) -> list[int]:
return [layer.average_shard_parameters for layer in self._layer_summary.values()]
@override
def _get_summary_data(self) -> list[tuple[str, list[str]]]:
"""Makes a summary listing with:
Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size
"""
arrays = [
(" ", list(map(str, range(len(self._layer_summary))))),
("Name", self.layer_names),
("Type", self.layer_types),
("Params", list(map(get_human_readable_count, self.param_nums))),
("Params per Device", list(map(get_human_readable_count, self.parameters_per_layer))),
("Mode", ["train" if mode else "eval" for mode in self.training_modes]),
("FLOPs", list(map(get_human_readable_count, (sum(x.values()) for x in self.flop_counts.values())))),
]
if self._model.example_input_array is not None:
arrays.append(("In sizes", [str(x) for x in self.in_sizes]))
arrays.append(("Out sizes", [str(x) for x in self.out_sizes]))
total_leftover_params = self.total_parameters - self.total_layer_params
if total_leftover_params > 0:
self._add_leftover_params_to_summary(arrays, total_leftover_params)
return arrays
@override
def _add_leftover_params_to_summary(self, arrays: list[tuple[str, list[str]]], total_leftover_params: int) -> None:
"""Add summary of params not associated with module or layer to model summary."""
super()._add_leftover_params_to_summary(arrays, total_leftover_params)
layer_summaries = dict(arrays)
layer_summaries["Params per Device"].append(NOT_APPLICABLE)
| DeepSpeedSummary |
python | cython__cython | Cython/Shadow.py | {
"start": 8144,
"end": 8332
} | class ____(PointerType):
def __init__(self, value=None):
if value is None:
self._items = [None] * self._n
else:
super().__init__(value)
| ArrayType |
python | google__jax | jax/_src/numpy/lax_numpy.py | {
"start": 134261,
"end": 337959
} | class ____(Protocol):
def __call__(self, array: ArrayLike, /, *,
axis: int | None = None,
keepdims: bool = False) -> Array: ...
def _broadcast_to_pairs(nvals: PadValueLike, nd: int, name: str) -> PadValue:
try:
nvals = np.asarray(tree_map(
lambda x: core.concrete_or_error(None, x, context=f"{name} argument of jnp.pad"),
nvals))
except ValueError as e:
# In numpy 1.24
if "array has an inhomogeneous shape" in str(e):
raise TypeError(f'`{name}` entries must be the same shape: {nvals}') from e
raise
def as_scalar_dim(v):
if core.is_dim(v) or not np.shape(v):
return v
else:
raise TypeError(f'`{name}` entries must be the same shape: {nvals}')
if nvals.shape == (nd, 2):
# ((before_1, after_1), ..., (before_N, after_N))
return tuple((as_scalar_dim(nval[0]), as_scalar_dim(nval[1])) for nval in nvals)
elif nvals.shape == (1, 2):
# ((before, after),)
v1_2 = as_scalar_dim(nvals[0, 0]), as_scalar_dim(nvals[0, 1])
return tuple(v1_2 for i in range(nd))
elif nvals.shape == (2,):
# (before, after) (not in the numpy docstring but works anyway)
v1_2 = as_scalar_dim(nvals[0]), as_scalar_dim(nvals[1])
return tuple(v1_2 for i in range(nd))
elif nvals.shape == (1,):
# (pad,)
v = as_scalar_dim(nvals[0])
return tuple((v, v) for i in range(nd))
elif nvals.shape == ():
# pad
v = as_scalar_dim(nvals.flat[0])
return tuple((v, v) for i in range(nd))
else:
raise ValueError(f"jnp.pad: {name} with {nd=} has unsupported shape {nvals.shape}. "
f"Valid shapes are ({nd}, 2), (1, 2), (2,), (1,), or ().")
def _check_no_padding(axis_padding: tuple[Any, Any], mode: str):
if (axis_padding[0] > 0 or axis_padding[1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
def _pad_constant(array: Array, pad_width: PadValue[int], constant_values: Array) -> Array:
nd = np.ndim(array)
constant_values = lax._convert_element_type(
constant_values, array.dtype, dtypes.is_weakly_typed(array))
constant_values_nd = np.ndim(constant_values)
if constant_values_nd == 0:
widths = [(low, high, 0) for (low, high) in pad_width]
return lax.pad(array, constant_values, widths)
if constant_values_nd == 1:
if constant_values.shape[-1] == 1:
widths = [(low, high, 0) for (low, high) in pad_width]
return lax.pad(array, squeeze(constant_values), widths)
elif constant_values.shape[-1] != 2:
raise ValueError("jnp.pad: constant_values has unsupported shape "
f"{constant_values.shape}. If the shape is 1D or 2D, the "
"last dimension must be of size 1 or 2.")
constant_values = broadcast_to(constant_values, (nd, 2))
for i in range(nd):
widths = [(0, 0, 0)] * nd
if pad_width[i][0] != 0:
widths[i] = (pad_width[i][0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
if pad_width[i][1] != 0:
widths[i] = (0, pad_width[i][1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
def _pad_wrap(array: Array, pad_width: PadValue[int]) -> Array:
for i in range(np.ndim(array)):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "wrap")
continue
size = array.shape[i]
left_repeats, left_remainder = divmod(pad_width[i][0], size)
right_repeats, right_remainder = divmod(pad_width[i][1], size)
total_repeats = left_repeats + right_repeats + 1
parts = []
if left_remainder > 0:
parts += [lax_slicing.slice_in_dim(array, size - left_remainder, size, axis=i)]
parts += total_repeats * [array]
if right_remainder > 0:
parts += [lax_slicing.slice_in_dim(array, 0, right_remainder, axis=i)]
array = lax.concatenate(parts, dimension=i)
return array
def _pad_symmetric_or_reflect(array: Array, pad_width: PadValue[int],
mode: str, reflect_type: str) -> Array:
assert mode in ("symmetric", "reflect")
assert reflect_type in ("even", "odd")
for i in range(np.ndim(array)):
axis_size = array.shape[i]
if axis_size == 0:
_check_no_padding(pad_width[i], mode)
continue
if pad_width[i][0] == 0 and pad_width[i][1] == 0:
continue
def build_padding(array, padding, before):
if before:
edge = lax_slicing.slice_in_dim(array, 0, 1, axis=i)
else:
edge = lax_slicing.slice_in_dim(array, -1, None, axis=i)
# Try to give nicer error messages for unsupported shape polymorphic uses
shape_poly_error_msg = lambda: (
"Shape polymorphism is supported for jnp.pad with 'reflect' or "
"'symmetric' padding mode only when it is possible to determine "
f"at lowering time that the axis size (= {axis_size}) is larger than 1 "
f"and larger or equal than the padding length (= {padding}). "
f"Error while handling {'left' if before else 'right'} padding on axis {i}.")
try:
# We check that we can determine all comparisons.
offset = 1 if (mode == "reflect" and axis_size > 1) else 0
has_poly_dim = not core.is_constant_shape((axis_size, padding))
# For shape polymorphism, ensure the loop below ends after 1 iteration
if has_poly_dim and not (axis_size > 1 and axis_size - offset >= padding):
raise ValueError(shape_poly_error_msg())
except core.InconclusiveDimensionOperation as e:
raise ValueError(shape_poly_error_msg()) from e
while padding > 0:
curr_pad = min(padding, axis_size - offset)
padding -= curr_pad
if has_poly_dim: assert padding == 0
if before:
start = offset
stop = offset + curr_pad
else:
start = -(curr_pad + offset)
stop = None if (mode == "symmetric" or axis_size == 1) else -1
x = lax_slicing.slice_in_dim(array, start, stop, axis=i)
x = flip(x, axis=i)
if reflect_type == 'odd':
x = 2 * edge - x
if axis_size > 1:
if before:
edge = lax_slicing.slice_in_dim(x, 0, 1, axis=i)
else:
edge = lax_slicing.slice_in_dim(x, -1, None, axis=i)
if before:
array = lax.concatenate([x, array], dimension=i)
else:
array = lax.concatenate([array, x], dimension=i)
return array
array = build_padding(array, pad_width[i][0], before=True)
array = build_padding(array, pad_width[i][1], before=False)
return array
def _pad_edge(array: Array, pad_width: PadValue[int]) -> Array:
nd = np.ndim(array)
for i in range(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], "edge")
continue
n = array.shape[i]
npad_before, npad_after = pad_width[i]
edge_before = lax_slicing.slice_in_dim(array, 0, 1, axis=i)
pad_before = repeat(edge_before, npad_before, axis=i)
edge_after = lax_slicing.slice_in_dim(array, n-1, n, axis=i)
pad_after = repeat(edge_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
def _pad_linear_ramp(array: Array, pad_width: PadValue[int],
end_values: PadValue[ArrayLike]) -> Array:
for axis in range(np.ndim(array)):
edge_before = lax_slicing.slice_in_dim(array, 0, 1, axis=axis)
edge_after = lax_slicing.slice_in_dim(array, -1, None, axis=axis)
ramp_before = array_creation.linspace(
start=end_values[axis][0],
stop=edge_before.squeeze(axis), # Dimension is replaced by linspace
num=pad_width[axis][0],
endpoint=False,
dtype=array.dtype,
axis=axis
)
ramp_before = lax._convert_element_type(
ramp_before, weak_type=dtypes.is_weakly_typed(array))
ramp_after = array_creation.linspace(
start=end_values[axis][1],
stop=edge_after.squeeze(axis), # Dimension is replaced by linspace
num=pad_width[axis][1],
endpoint=False,
dtype=array.dtype,
axis=axis
)
ramp_after = lax._convert_element_type(
ramp_after, weak_type=dtypes.is_weakly_typed(array))
# Reverse linear space in appropriate dimension
ramp_after = flip(ramp_after, axis)
array = lax.concatenate([ramp_before, array, ramp_after], dimension=axis)
return array
def _pad_stats(array: Array, pad_width: PadValue[int],
stat_length: PadValue[int] | None,
stat_func: PadStatFunc) -> Array:
nd = np.ndim(array)
for i in range(nd):
if stat_length is None:
stat_before = stat_func(array, axis=i, keepdims=True)
stat_after = stat_before
else:
array_length = array.shape[i]
length_before, length_after = stat_length[i]
if length_before == 0 or length_after == 0:
raise ValueError("stat_length of 0 yields no value for padding")
# Limit stat_length to length of array.
length_before = min(length_before, array_length)
length_after = min(length_after, array_length)
slice_before = lax_slicing.slice_in_dim(array, 0, length_before, axis=i)
slice_after = lax_slicing.slice_in_dim(array, -length_after, None, axis=i)
stat_before = stat_func(slice_before, axis=i, keepdims=True)
stat_after = stat_func(slice_after, axis=i, keepdims=True)
if np.issubdtype(array.dtype, np.integer):
stat_before = round(stat_before)
stat_after = round(stat_after)
stat_before = lax._convert_element_type(
stat_before, array.dtype, dtypes.is_weakly_typed(array))
stat_after = lax._convert_element_type(
stat_after, array.dtype, dtypes.is_weakly_typed(array))
npad_before, npad_after = pad_width[i]
pad_before = repeat(stat_before, npad_before, axis=i)
pad_after = repeat(stat_after, npad_after, axis=i)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
def _pad_empty(array: Array, pad_width: PadValue[int]) -> Array:
# Note: jax.numpy.empty = jax.numpy.zeros
for i in range(np.ndim(array)):
shape_before = array.shape[:i] + (pad_width[i][0],) + array.shape[i + 1:]
pad_before = array_creation.empty_like(array, shape=shape_before)
shape_after = array.shape[:i] + (pad_width[i][1],) + array.shape[i + 1:]
pad_after = array_creation.empty_like(array, shape=shape_after)
array = lax.concatenate([pad_before, array, pad_after], dimension=i)
return array
def _pad_func(array: Array, pad_width: PadValue[int], func: Callable[..., Any], **kwargs) -> Array:
pad_width = _broadcast_to_pairs(pad_width, np.ndim(array), "pad_width")
padded = _pad_constant(array, pad_width, asarray(0))
for axis in range(np.ndim(padded)):
padded = apply_along_axis(func, axis, padded, pad_width[axis], axis, kwargs)
return padded
@api.jit(static_argnums=(1, 2, 4, 5, 6))
def _pad(array: ArrayLike, pad_width: PadValueLike[int], mode: str,
constant_values: ArrayLike, stat_length: PadValueLike[int],
end_values: PadValueLike[ArrayLike], reflect_type: str):
array = asarray(array)
nd = np.ndim(array)
if nd == 0:
return array
stat_funcs: dict[str, PadStatFunc] = {
"maximum": reductions.amax,
"minimum": reductions.amin,
"mean": reductions.mean,
"median": reductions.median
}
pad_width = _broadcast_to_pairs(pad_width, nd, "pad_width")
pad_width_arr = np.array(pad_width)
if pad_width_arr.shape != (nd, 2):
raise ValueError(f"Expected pad_width to have shape {(nd, 2)}; got {pad_width_arr.shape}.")
if np.any(pad_width_arr < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
return _pad_constant(array, pad_width, asarray(constant_values))
elif mode == "wrap":
return _pad_wrap(array, pad_width)
elif mode in ("symmetric", "reflect"):
return _pad_symmetric_or_reflect(array, pad_width, str(mode), reflect_type)
elif mode == "edge":
return _pad_edge(array, pad_width)
elif mode == "linear_ramp":
end_values = _broadcast_to_pairs(end_values, nd, "end_values")
return _pad_linear_ramp(array, pad_width, end_values)
elif mode in stat_funcs:
if stat_length is not None:
stat_length = _broadcast_to_pairs(stat_length, nd, "stat_length")
return _pad_stats(array, pad_width, stat_length, stat_funcs[str(mode)])
elif mode == "empty":
return _pad_empty(array, pad_width)
else:
assert False, ("Should not be reached since pad already handled unsupported and"
"not implemented modes")
@export
def pad(array: ArrayLike, pad_width: PadValueLike[int | Array | np.ndarray],
mode: str | Callable[..., Any] = "constant", **kwargs) -> Array:
"""Add padding to an array.
JAX implementation of :func:`numpy.pad`.
Args:
array: array to pad.
pad_width: specify the pad width for each dimension of an array. Padding widths
may be separately specified for *before* and *after* the array. Options are:
- ``int`` or ``(int,)``: pad each array dimension with the same number of values
both before and after.
- ``(before, after)``: pad each array with ``before`` elements before, and ``after``
elements after
- ``((before_1, after_1), (before_2, after_2), ... (before_N, after_N))``: specify
distinct ``before`` and ``after`` values for each array dimension.
mode: a string or callable. Supported pad modes are:
- ``'constant'`` (default): pad with a constant value, which defaults to zero.
- ``'empty'``: pad with empty values (i.e. zero)
- ``'edge'``: pad with the edge values of the array.
- ``'wrap'``: pad by wrapping the array.
- ``'linear_ramp'``: pad with a linear ramp to specified ``end_values``.
- ``'maximum'``: pad with the maximum value.
- ``'mean'``: pad with the mean value.
- ``'median'``: pad with the median value.
- ``'minimum'``: pad with the minimum value.
- ``'reflect'``: pad by reflection.
- ``'symmetric'``: pad by symmetric reflection.
- ``<callable>``: a callable function. See Notes below.
constant_values: referenced for ``mode = 'constant'``. Specify the constant value
to pad with.
stat_length: referenced for ``mode in ['maximum', 'mean', 'median', 'minimum']``.
An integer or tuple specifying the number of edge values to use when calculating
the statistic.
end_values: referenced for ``mode = 'linear_ramp'``. Specify the end values to
ramp the padding values to.
reflect_type: referenced for ``mode in ['reflect', 'symmetric']``. Specify whether
to use even or odd reflection.
Returns:
A padded copy of ``array``.
Notes:
When ``mode`` is callable, it should have the following signature::
def pad_func(row: Array, pad_width: tuple[int, int],
iaxis: int, kwargs: dict) -> Array:
...
Here ``row`` is a 1D slice of the padded array along axis ``iaxis``, with the pad
values filled with zeros. ``pad_width`` is a tuple specifying the ``(before, after)``
padding sizes, and ``kwargs`` are any additional keyword arguments passed to the
:func:`jax.numpy.pad` function.
Note that while in NumPy, the function should modify ``row`` in-place, in JAX the
function should return the modified ``row``. In JAX, the custom padding function
will be mapped across the padded axis using the :func:`jax.vmap` transformation.
See also:
- :func:`jax.numpy.resize`: resize an array
- :func:`jax.numpy.tile`: create a larger array by tiling a smaller array.
- :func:`jax.numpy.repeat`: create a larger array by repeating values of a smaller array.
Examples:
Pad a 1-dimensional array with zeros:
>>> x = jnp.array([10, 20, 30, 40])
>>> jnp.pad(x, 2)
Array([ 0, 0, 10, 20, 30, 40, 0, 0], dtype=int32)
>>> jnp.pad(x, (2, 4))
Array([ 0, 0, 10, 20, 30, 40, 0, 0, 0, 0], dtype=int32)
Pad a 1-dimensional array with specified values:
>>> jnp.pad(x, 2, constant_values=99)
Array([99, 99, 10, 20, 30, 40, 99, 99], dtype=int32)
Pad a 1-dimensional array with the mean array value:
>>> jnp.pad(x, 2, mode='mean')
Array([25, 25, 10, 20, 30, 40, 25, 25], dtype=int32)
Pad a 1-dimensional array with reflected values:
>>> jnp.pad(x, 2, mode='reflect')
Array([30, 20, 10, 20, 30, 40, 30, 20], dtype=int32)
Pad a 2-dimensional array with different paddings in each dimension:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.pad(x, ((1, 2), (3, 0)))
Array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 2, 3],
[0, 0, 0, 4, 5, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]], dtype=int32)
Pad a 1-dimensional array with a custom padding function:
>>> def custom_pad(row, pad_width, iaxis, kwargs):
... # row represents a 1D slice of the zero-padded array.
... before, after = pad_width
... before_value = kwargs.get('before_value', 0)
... after_value = kwargs.get('after_value', 0)
... row = row.at[:before].set(before_value)
... return row.at[len(row) - after:].set(after_value)
>>> x = jnp.array([2, 3, 4])
>>> jnp.pad(x, 2, custom_pad, before_value=-10, after_value=10)
Array([-10, -10, 2, 3, 4, 10, 10], dtype=int32)
"""
array = util.ensure_arraylike("pad", array)
pad_width = _broadcast_to_pairs(pad_width, np.ndim(array), "pad_width")
if pad_width and not all(core.is_dim(p[0]) and core.is_dim(p[1])
for p in pad_width):
raise TypeError('`pad_width` must be of integral type.')
if callable(mode):
return _pad_func(asarray(array), pad_width, mode, **kwargs)
allowed_kwargs = {
'empty': [], 'edge': [], 'wrap': [],
'constant': ['constant_values'],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
}
try:
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
except KeyError:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
# Set default value if not given.
constant_values = kwargs.get('constant_values', 0)
stat_length = kwargs.get('stat_length', None)
end_values = kwargs.get('end_values', 0)
reflect_type = kwargs.get('reflect_type', "even")
return _pad(array, pad_width, mode, constant_values, stat_length, end_values, reflect_type)
### Array-creation functions
@export
def stack(arrays: np.ndarray | Array | Sequence[ArrayLike],
axis: int = 0, out: None = None, dtype: DTypeLike | None = None) -> Array:
"""Join arrays along a new axis.
JAX implementation of :func:`numpy.stack`.
Args:
arrays: a sequence of arrays to stack; each must have the same shape. If a
single array is given it will be treated equivalently to
`arrays = unstack(arrays)`, but the implementation will avoid explicit
unstacking.
axis: specify the axis along which to stack.
out: unused by JAX
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the stacked result.
See also:
- :func:`jax.numpy.unstack`: inverse of ``stack``.
- :func:`jax.numpy.concatenate`: concatenation along existing axes.
- :func:`jax.numpy.vstack`: stack vertically, i.e. along axis 0.
- :func:`jax.numpy.hstack`: stack horizontally, i.e. along axis 1.
- :func:`jax.numpy.dstack`: stack depth-wise, i.e. along axis 2.
- :func:`jax.numpy.column_stack`: stack columns.
Examples:
>>> x = jnp.array([1, 2, 3])
>>> y = jnp.array([4, 5, 6])
>>> jnp.stack([x, y])
Array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
>>> jnp.stack([x, y], axis=1)
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
:func:`~jax.numpy.unstack` performs the inverse operation:
>>> arr = jnp.stack([x, y], axis=1)
>>> x, y = jnp.unstack(arr, axis=1)
>>> x
Array([1, 2, 3], dtype=int32)
>>> y
Array([4, 5, 6], dtype=int32)
"""
if not len(arrays):
raise ValueError("Need at least one array to stack.")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.stack is not supported.")
if isinstance(arrays, (np.ndarray, Array)):
axis = _canonicalize_axis(axis, arrays.ndim)
return concatenate(expand_dims(arrays, axis + 1), axis=axis, dtype=dtype)
else:
arrays = util.ensure_arraylike_tuple("stack", arrays)
shape0 = np.shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_arrays = []
for a in arrays:
if np.shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(expand_dims(a, axis))
return concatenate(new_arrays, axis=axis, dtype=dtype)
@export
@api.jit(static_argnames="axis")
def unstack(x: ArrayLike, /, *, axis: int = 0) -> tuple[Array, ...]:
"""Unstack an array along an axis.
JAX implementation of :func:`array_api.unstack`.
Args:
x: array to unstack. Must have ``x.ndim >= 1``.
axis: integer axis along which to unstack. Must satisfy
``-x.ndim <= axis < x.ndim``.
Returns:
tuple of unstacked arrays.
See also:
- :func:`jax.numpy.stack`: inverse of ``unstack``
- :func:`jax.numpy.split`: split array into batches along an axis.
Examples:
>>> arr = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> arrs = jnp.unstack(arr)
>>> print(*arrs)
[1 2 3] [4 5 6]
:func:`~jax.numpy.stack` provides the inverse of this:
>>> jnp.stack(arrs)
Array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
"""
x = util.ensure_arraylike("unstack", x)
if x.ndim == 0:
raise ValueError(
"Unstack requires arrays with rank > 0, however a scalar array was "
"passed."
)
dimensions = (axis,)
return tuple(
lax.squeeze(t, dimensions)
for t in lax.split(x, (1,) * x.shape[axis], axis=axis)
)
@export
def tile(A: ArrayLike, reps: DimSize | Sequence[DimSize]) -> Array:
"""Construct an array by repeating ``A`` along specified dimensions.
JAX implementation of :func:`numpy.tile`.
If ``A`` is an array of shape ``(d1, d2, ..., dn)`` and ``reps`` is a sequence of integers,
the resulting array will have a shape of ``(reps[0] * d1, reps[1] * d2, ..., reps[n] * dn)``,
with ``A`` tiled along each dimension.
Args:
A: input array to be repeated. Can be of any shape or dimension.
reps: specifies the number of repetitions along each axis.
Returns:
a new array where the input array has been repeated according to ``reps``.
See also:
- :func:`jax.numpy.repeat`: Construct an array from repeated elements.
- :func:`jax.numpy.broadcast_to`: Broadcast an array to a specified shape.
Examples:
>>> arr = jnp.array([1, 2])
>>> jnp.tile(arr, 2)
Array([1, 2, 1, 2], dtype=int32)
>>> arr = jnp.array([[1, 2],
... [3, 4,]])
>>> jnp.tile(arr, (2, 1))
Array([[1, 2],
[3, 4],
[1, 2],
[3, 4]], dtype=int32)
"""
A = util.ensure_arraylike("tile", A)
try:
iter(reps) # type: ignore[arg-type]
except TypeError:
reps_tup: tuple[DimSize, ...] = (reps,)
else:
reps_tup = tuple(reps) # type: ignore[arg-type]
reps_tup = tuple(operator.index(rep) if core.is_constant_dim(rep) else rep
for rep in reps_tup)
A_shape = (1,) * (len(reps_tup) - np.ndim(A)) + np.shape(A)
reps_tup = (1,) * (len(A_shape) - len(reps_tup)) + reps_tup
result = broadcast_to(reshape(A, [j for i in A_shape for j in [1, i]]),
[k for pair in zip(reps_tup, A_shape) for k in pair])
return reshape(result, tuple(np.multiply(A_shape, reps_tup)))
def _concatenate_array(arr: ArrayLike, axis: int | None,
dtype: DTypeLike | None = None) -> Array:
# Fast path for concatenation when the input is an ndarray rather than a list.
arr = asarray(arr, dtype=dtype)
if arr.ndim == 0 or arr.shape[0] == 0:
raise ValueError("Need at least one array to concatenate.")
if axis is None:
return lax.reshape(arr, (arr.size,))
if arr.ndim == 1:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = _canonicalize_axis(axis, arr.ndim - 1)
shape = arr.shape[1:axis + 1] + (arr.shape[0] * arr.shape[axis + 1],) + arr.shape[axis + 2:]
dimensions = [*range(1, axis + 1), 0, *range(axis + 1, arr.ndim)]
return lax.reshape(arr, shape, dimensions)
@export
def concatenate(arrays: np.ndarray | Array | Sequence[ArrayLike],
axis: int | None = 0, dtype: DTypeLike | None = None) -> Array:
"""Join arrays along an existing axis.
JAX implementation of :func:`numpy.concatenate`.
Args:
arrays: a sequence of arrays to concatenate; each must have the same shape
except along the specified axis. If a single array is given it will be
treated equivalently to `arrays = unstack(arrays)`, but the implementation
will avoid explicit unstacking.
axis: specify the axis along which to concatenate.
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the concatenated result.
See also:
- :func:`jax.lax.concatenate`: XLA concatenation API.
- :func:`jax.numpy.concat`: Array API version of this function.
- :func:`jax.numpy.stack`: concatenate arrays along a new axis.
Examples:
One-dimensional concatenation:
>>> x = jnp.arange(3)
>>> y = jnp.zeros(3, dtype=int)
>>> jnp.concatenate([x, y])
Array([0, 1, 2, 0, 0, 0], dtype=int32)
Two-dimensional concatenation:
>>> x = jnp.ones((2, 3))
>>> y = jnp.zeros((2, 1))
>>> jnp.concatenate([x, y], axis=1)
Array([[1., 1., 1., 0.],
[1., 1., 1., 0.]], dtype=float32)
"""
if isinstance(arrays, (np.ndarray, Array)):
return _concatenate_array(arrays, axis, dtype=dtype)
arrays = util.ensure_arraylike_tuple("concatenate", arrays)
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if axis is None:
return concatenate([ravel(a) for a in arrays], axis=0, dtype=dtype)
if np.ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = _canonicalize_axis(axis, np.ndim(arrays[0]))
if dtype is None:
arrays_out = util.promote_dtypes(*arrays)
else:
arrays_out = [asarray(arr, dtype=dtype) for arr in arrays]
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/jax-ml/jax/issues/653).
k = 16
while len(arrays_out) > 1:
arrays_out = [lax.concatenate(arrays_out[i:i+k], axis)
for i in range(0, len(arrays_out), k)]
return arrays_out[0]
@export
def concat(arrays: Sequence[ArrayLike], /, *, axis: int | None = 0) -> Array:
"""Join arrays along an existing axis.
JAX implementation of :func:`array_api.concat`.
Args:
arrays: a sequence of arrays to concatenate; each must have the same shape
except along the specified axis. If a single array is given it will be
treated equivalently to `arrays = unstack(arrays)`, but the implementation
will avoid explicit unstacking.
axis: specify the axis along which to concatenate.
Returns:
the concatenated result.
See also:
- :func:`jax.lax.concatenate`: XLA concatenation API.
- :func:`jax.numpy.concatenate`: NumPy version of this function.
- :func:`jax.numpy.stack`: concatenate arrays along a new axis.
Examples:
One-dimensional concatenation:
>>> x = jnp.arange(3)
>>> y = jnp.zeros(3, dtype=int)
>>> jnp.concat([x, y])
Array([0, 1, 2, 0, 0, 0], dtype=int32)
Two-dimensional concatenation:
>>> x = jnp.ones((2, 3))
>>> y = jnp.zeros((2, 1))
>>> jnp.concat([x, y], axis=1)
Array([[1., 1., 1., 0.],
[1., 1., 1., 0.]], dtype=float32)
"""
util.check_arraylike("concat", *arrays)
return concatenate(arrays, axis=axis)
@export
def vstack(tup: np.ndarray | Array | Sequence[ArrayLike],
dtype: DTypeLike | None = None) -> Array:
"""Vertically stack arrays.
JAX implementation of :func:`numpy.vstack`.
For arrays of two or more dimensions, this is equivalent to
:func:`jax.numpy.concatenate` with ``axis=0``.
Args:
tup: a sequence of arrays to stack; each must have the same shape along all
but the first axis. If a single array is given it will be treated
equivalently to `tup = unstack(tup)`, but the implementation will avoid
explicit unstacking.
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the stacked result.
See also:
- :func:`jax.numpy.stack`: stack along arbitrary axes
- :func:`jax.numpy.concatenate`: concatenation along existing axes.
- :func:`jax.numpy.hstack`: stack horizontally, i.e. along axis 1.
- :func:`jax.numpy.dstack`: stack depth-wise, i.e. along axis 2.
Examples:
Scalar values:
>>> jnp.vstack([1, 2, 3])
Array([[1],
[2],
[3]], dtype=int32, weak_type=True)
1D arrays:
>>> x = jnp.arange(4)
>>> y = jnp.ones(4)
>>> jnp.vstack([x, y])
Array([[0., 1., 2., 3.],
[1., 1., 1., 1.]], dtype=float32)
2D arrays:
>>> x = x.reshape(1, 4)
>>> y = y.reshape(1, 4)
>>> jnp.vstack([x, y])
Array([[0., 1., 2., 3.],
[1., 1., 1., 1.]], dtype=float32)
"""
arrs: Array | list[Array]
if isinstance(tup, (np.ndarray, Array)):
arrs = api.vmap(atleast_2d)(tup)
else:
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("vstack", *tup, emit_warning=True)
arrs = [atleast_2d(m) for m in tup]
return concatenate(arrs, axis=0, dtype=dtype)
@export
def hstack(tup: np.ndarray | Array | Sequence[ArrayLike],
dtype: DTypeLike | None = None) -> Array:
"""Horizontally stack arrays.
JAX implementation of :func:`numpy.hstack`.
For arrays of one or more dimensions, this is equivalent to
:func:`jax.numpy.concatenate` with ``axis=1``.
Args:
tup: a sequence of arrays to stack; each must have the same shape along all
but the second axis. Input arrays will be promoted to at least rank 1.
If a single array is given it will be treated equivalently to
`tup = unstack(tup)`, but the implementation will avoid explicit unstacking.
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the stacked result.
See also:
- :func:`jax.numpy.stack`: stack along arbitrary axes
- :func:`jax.numpy.concatenate`: concatenation along existing axes.
- :func:`jax.numpy.vstack`: stack vertically, i.e. along axis 0.
- :func:`jax.numpy.dstack`: stack depth-wise, i.e. along axis 2.
Examples:
Scalar values:
>>> jnp.hstack([1, 2, 3])
Array([1, 2, 3], dtype=int32, weak_type=True)
1D arrays:
>>> x = jnp.arange(3)
>>> y = jnp.ones(3)
>>> jnp.hstack([x, y])
Array([0., 1., 2., 1., 1., 1.], dtype=float32)
2D arrays:
>>> x = x.reshape(3, 1)
>>> y = y.reshape(3, 1)
>>> jnp.hstack([x, y])
Array([[0., 1.],
[1., 1.],
[2., 1.]], dtype=float32)
"""
arrs: Array | list[Array]
if isinstance(tup, (np.ndarray, Array)):
arrs = api.vmap(atleast_1d)(tup)
arr0_ndim = arrs.ndim - 1
else:
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("hstack", *tup, emit_warning=True)
arrs = [atleast_1d(m) for m in tup]
arr0_ndim = arrs[0].ndim
return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1, dtype=dtype)
@export
def dstack(tup: np.ndarray | Array | Sequence[ArrayLike],
dtype: DTypeLike | None = None) -> Array:
"""Stack arrays depth-wise.
JAX implementation of :func:`numpy.dstack`.
For arrays of three or more dimensions, this is equivalent to
:func:`jax.numpy.concatenate` with ``axis=2``.
Args:
tup: a sequence of arrays to stack; each must have the same shape along all
but the third axis. Input arrays will be promoted to at least rank 3. If a
single array is given it will be treated equivalently to `tup = unstack(tup)`,
but the implementation will avoid explicit unstacking.
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the stacked result.
See also:
- :func:`jax.numpy.stack`: stack along arbitrary axes
- :func:`jax.numpy.concatenate`: concatenation along existing axes.
- :func:`jax.numpy.vstack`: stack vertically, i.e. along axis 0.
- :func:`jax.numpy.hstack`: stack horizontally, i.e. along axis 1.
Examples:
Scalar values:
>>> jnp.dstack([1, 2, 3])
Array([[[1, 2, 3]]], dtype=int32, weak_type=True)
1D arrays:
>>> x = jnp.arange(3)
>>> y = jnp.ones(3)
>>> jnp.dstack([x, y])
Array([[[0., 1.],
[1., 1.],
[2., 1.]]], dtype=float32)
2D arrays:
>>> x = x.reshape(1, 3)
>>> y = y.reshape(1, 3)
>>> jnp.dstack([x, y])
Array([[[0., 1.],
[1., 1.],
[2., 1.]]], dtype=float32)
"""
arrs: Array | list[Array]
if isinstance(tup, (np.ndarray, Array)):
arrs = api.vmap(atleast_3d)(tup)
else:
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("dstack", *tup, emit_warning=True)
tup = util.ensure_arraylike_tuple("dstack", tup)
arrs = [atleast_3d(m) for m in tup]
return concatenate(arrs, axis=2, dtype=dtype)
@export
def column_stack(tup: np.ndarray | Array | Sequence[ArrayLike]) -> Array:
"""Stack arrays column-wise.
JAX implementation of :func:`numpy.column_stack`.
For arrays of two or more dimensions, this is equivalent to
:func:`jax.numpy.concatenate` with ``axis=1``.
Args:
tup: a sequence of arrays to stack; each must have the same leading dimension.
Input arrays will be promoted to at least rank 2. If a single array is given
it will be treated equivalently to `tup = unstack(tup)`, but the implementation
will avoid explicit unstacking.
dtype: optional dtype of the resulting array. If not specified, the dtype
will be determined via type promotion rules described in :ref:`type-promotion`.
Returns:
the stacked result.
See also:
- :func:`jax.numpy.stack`: stack along arbitrary axes
- :func:`jax.numpy.concatenate`: concatenation along existing axes.
- :func:`jax.numpy.vstack`: stack vertically, i.e. along axis 0.
- :func:`jax.numpy.hstack`: stack horizontally, i.e. along axis 1.
- :func:`jax.numpy.dstack`: stack depth-wise, i.e. along axis 2.
Examples:
Scalar values:
>>> jnp.column_stack([1, 2, 3])
Array([[1, 2, 3]], dtype=int32, weak_type=True)
1D arrays:
>>> x = jnp.arange(3)
>>> y = jnp.ones(3)
>>> jnp.column_stack([x, y])
Array([[0., 1.],
[1., 1.],
[2., 1.]], dtype=float32)
2D arrays:
>>> x = x.reshape(3, 1)
>>> y = y.reshape(3, 1)
>>> jnp.column_stack([x, y])
Array([[0., 1.],
[1., 1.],
[2., 1.]], dtype=float32)
"""
arrs: Array | list[Array] | np.ndarray
if isinstance(tup, (np.ndarray, Array)):
arrs = api.vmap(lambda x: atleast_2d(x).T)(tup) if tup.ndim < 3 else tup
else:
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("column_stack", *tup, emit_warning=True)
arrs = [atleast_2d(arr).T if arr.ndim < 2 else arr for arr in map(asarray, tup)]
return concatenate(arrs, axis=1)
@export
def choose(a: ArrayLike, choices: Array | np.ndarray | Sequence[ArrayLike],
out: None = None, mode: str = 'raise') -> Array:
"""Construct an array by stacking slices of choice arrays.
JAX implementation of :func:`numpy.choose`.
The semantics of this function can be confusing, but in the simplest case where
``a`` is a one-dimensional array, ``choices`` is a two-dimensional array, and
all entries of ``a`` are in-bounds (i.e. ``0 <= a_i < len(choices)``), then the
function is equivalent to the following::
def choose(a, choices):
return jnp.array([choices[a_i, i] for i, a_i in enumerate(a)])
In the more general case, ``a`` may have any number of dimensions and ``choices``
may be an arbitrary sequence of broadcast-compatible arrays. In this case, again
for in-bound indices, the logic is equivalent to::
def choose(a, choices):
a, *choices = jnp.broadcast_arrays(a, *choices)
choices = jnp.array(choices)
return jnp.array([choices[a[idx], *idx] for idx in np.ndindex(a.shape)])
The only additional complexity comes from the ``mode`` argument, which controls
the behavior for out-of-bound indices in ``a`` as described below.
Args:
a: an N-dimensional array of integer indices.
choices: an array or sequence of arrays. All arrays in the sequence must be
mutually broadcast compatible with ``a``.
out: unused by JAX
mode: specify the out-of-bounds indexing mode; one of ``'raise'`` (default),
``'wrap'``, or ``'clip'``. Note that the default mode of ``'raise'`` is
not compatible with JAX transformations.
Returns:
an array containing stacked slices from ``choices`` at the indices
specified by ``a``. The shape of the result is
``broadcast_shapes(a.shape, *(c.shape for c in choices))``.
See also:
- :func:`jax.lax.switch`: choose between N functions based on an index.
Examples:
Here is the simplest case of a 1D index array with a 2D choice array,
in which case this chooses the indexed value from each column:
>>> choices = jnp.array([[ 1, 2, 3, 4],
... [ 5, 6, 7, 8],
... [ 9, 10, 11, 12]])
>>> a = jnp.array([2, 0, 1, 0])
>>> jnp.choose(a, choices)
Array([9, 2, 7, 4], dtype=int32)
The ``mode`` argument specifies what to do with out-of-bound indices;
options are to either ``wrap`` or ``clip``:
>>> a2 = jnp.array([2, 0, 1, 4]) # last index out-of-bound
>>> jnp.choose(a2, choices, mode='clip')
Array([ 9, 2, 7, 12], dtype=int32)
>>> jnp.choose(a2, choices, mode='wrap')
Array([9, 2, 7, 8], dtype=int32)
In the more general case, ``choices`` may be a sequence of array-like
objects with any broadcast-compatible shapes.
>>> choice_1 = jnp.array([1, 2, 3, 4])
>>> choice_2 = 99
>>> choice_3 = jnp.array([[10],
... [20],
... [30]])
>>> a = jnp.array([[0, 1, 2, 0],
... [1, 2, 0, 1],
... [2, 0, 1, 2]])
>>> jnp.choose(a, [choice_1, choice_2, choice_3], mode='wrap')
Array([[ 1, 99, 10, 4],
[99, 20, 3, 99],
[30, 2, 99, 30]], dtype=int32)
"""
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.choose is not supported.")
a, *choices = util.ensure_arraylike_tuple('choose', (a, *choices))
if not issubdtype(a.dtype, np.integer):
raise ValueError("`a` array must be integer typed")
N = len(choices)
if mode == 'raise':
arr: Array = core.concrete_or_error(asarray, a,
"The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
if reductions.any((arr < 0) | (arr >= N)):
raise ValueError("invalid entry in choice array")
elif mode == 'wrap':
arr = asarray(a) % N
elif mode == 'clip':
arr = clip(a, 0, N - 1)
else:
raise ValueError(f"mode={mode!r} not understood. Must be 'raise', 'wrap', or 'clip'")
arr, *choices = broadcast_arrays(arr, *choices)
return array(choices)[(arr,) + indices(arr.shape, sparse=True)]
def _atleast_nd(x: ArrayLike, n: int) -> Array:
m = np.ndim(x)
return lax.broadcast(x, (1,) * (n - m)) if m < n else asarray(x)
def _block(xs: ArrayLike | list[ArrayLike]) -> tuple[Array, int]:
if isinstance(xs, tuple):
raise ValueError("jax.numpy.block does not allow tuples, got {}"
.format(xs))
elif isinstance(xs, list):
if len(xs) == 0:
raise ValueError("jax.numpy.block does not allow empty list arguments")
xs_tup, depths = unzip2([_block(x) for x in xs])
if any(d != depths[0] for d in depths[1:]):
raise ValueError("Mismatched list depths in jax.numpy.block")
rank = max(depths[0], max(np.ndim(x) for x in xs_tup))
xs_tup = tuple(_atleast_nd(x, rank) for x in xs_tup)
return concatenate(xs_tup, axis=-depths[0]), depths[0] + 1
else:
return asarray(xs), 1
@export
@api.jit
def block(arrays: ArrayLike | list[ArrayLike]) -> Array:
"""Create an array from a list of blocks.
JAX implementation of :func:`numpy.block`.
Args:
arrays: an array, or nested list of arrays which will be concatenated
together to form the final array.
Returns:
a single array constructed from the inputs.
See also:
- :func:`concatenate`, :func:`concat`: concatenate arrays along an existing axis.
- :func:`stack`, :func:`vstack`, :func:`hstack`, :func:`dstack` concatenate
arrays along a new axis.
Examples:
consider these blocks:
>>> zeros = jnp.zeros((2, 2))
>>> ones = jnp.ones((2, 2))
>>> twos = jnp.full((2, 2), 2)
>>> threes = jnp.full((2, 2), 3)
Passing a single array to :func:`block` returns the array:
>>> jnp.block(zeros)
Array([[0., 0.],
[0., 0.]], dtype=float32)
Passing a simple list of arrays concatenates them along the last axis:
>>> jnp.block([zeros, ones])
Array([[0., 0., 1., 1.],
[0., 0., 1., 1.]], dtype=float32)
Passing a doubly-nested list of arrays concatenates the inner list along
the last axis, and the outer list along the second-to-last axis:
>>> jnp.block([[zeros, ones],
... [twos, threes]])
Array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]], dtype=float32)
Note that blocks need not align in all dimensions, though the size along the axis
of concatenation must match. For example, this is valid because after the inner,
horizontal concatenation, the resulting blocks have a valid shape for the outer,
vertical concatenation.
>>> a = jnp.zeros((2, 1))
>>> b = jnp.ones((2, 3))
>>> c = jnp.full((1, 2), 2)
>>> d = jnp.full((1, 2), 3)
>>> jnp.block([[a, b], [c, d]])
Array([[0., 1., 1., 1.],
[0., 1., 1., 1.],
[2., 2., 3., 3.]], dtype=float32)
Note also that this logic generalizes to blocks in 3 or more dimensions.
Here's a 3-dimensional block-wise array:
>>> x = jnp.arange(6).reshape((1, 2, 3))
>>> blocks = [[[x for i in range(3)] for j in range(4)] for k in range(5)]
>>> jnp.block(blocks).shape
(5, 8, 9)
"""
out, _ = _block(arrays)
return out
@overload
def atleast_1d() -> list[Array]:
...
@overload
def atleast_1d(x: ArrayLike, /) -> Array:
...
@overload
def atleast_1d(x: ArrayLike, y: ArrayLike, /, *arys: ArrayLike) -> list[Array]:
...
@export
@api.jit
def atleast_1d(*arys: ArrayLike) -> Array | list[Array]:
"""Convert inputs to arrays with at least 1 dimension.
JAX implementation of :func:`numpy.atleast_1d`.
Args:
zero or more arraylike arguments.
Returns:
an array or list of arrays corresponding to the input values. Arrays
of shape ``()`` are converted to shape ``(1,)``, and arrays with other
shapes are returned unchanged.
See also:
- :func:`jax.numpy.asarray`
- :func:`jax.numpy.atleast_2d`
- :func:`jax.numpy.atleast_3d`
Examples:
Scalar arguments are converted to 1D, length-1 arrays:
>>> x = jnp.float32(1.0)
>>> jnp.atleast_1d(x)
Array([1.], dtype=float32)
Higher dimensional inputs are returned unchanged:
>>> y = jnp.arange(4)
>>> jnp.atleast_1d(y)
Array([0, 1, 2, 3], dtype=int32)
Multiple arguments can be passed to the function at once, in which
case a list of results is returned:
>>> jnp.atleast_1d(x, y)
[Array([1.], dtype=float32), Array([0, 1, 2, 3], dtype=int32)]
"""
util.check_arraylike("atleast_1d", *arys, emit_warning=True)
if len(arys) == 1:
return array(arys[0], copy=False, ndmin=1)
else:
return [array(arr, copy=False, ndmin=1) for arr in arys]
@overload
def atleast_2d() -> list[Array]:
...
@overload
def atleast_2d(x: ArrayLike, /) -> Array:
...
@overload
def atleast_2d(x: ArrayLike, y: ArrayLike, /, *arys: ArrayLike) -> list[Array]:
...
@export
@api.jit
def atleast_2d(*arys: ArrayLike) -> Array | list[Array]:
"""Convert inputs to arrays with at least 2 dimensions.
JAX implementation of :func:`numpy.atleast_2d`.
Args:
zero or more arraylike arguments.
Returns:
an array or list of arrays corresponding to the input values. Arrays
of shape ``()`` are converted to shape ``(1, 1)``, 1D arrays of shape
``(N,)`` are converted to shape ``(1, N)``, and arrays of all other
shapes are returned unchanged.
See also:
- :func:`jax.numpy.asarray`
- :func:`jax.numpy.atleast_1d`
- :func:`jax.numpy.atleast_3d`
Examples:
Scalar arguments are converted to 2D, size-1 arrays:
>>> x = jnp.float32(1.0)
>>> jnp.atleast_2d(x)
Array([[1.]], dtype=float32)
One-dimensional arguments have a unit dimension prepended to the shape:
>>> y = jnp.arange(4)
>>> jnp.atleast_2d(y)
Array([[0, 1, 2, 3]], dtype=int32)
Higher dimensional inputs are returned unchanged:
>>> z = jnp.ones((2, 3))
>>> jnp.atleast_2d(z)
Array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
Multiple arguments can be passed to the function at once, in which
case a list of results is returned:
>>> jnp.atleast_2d(x, y)
[Array([[1.]], dtype=float32), Array([[0, 1, 2, 3]], dtype=int32)]
"""
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("atleast_2d", *arys, emit_warning=True)
if len(arys) == 1:
return array(arys[0], copy=False, ndmin=2)
else:
return [array(arr, copy=False, ndmin=2) for arr in arys]
@overload
def atleast_3d() -> list[Array]:
...
@overload
def atleast_3d(x: ArrayLike, /) -> Array:
...
@overload
def atleast_3d(x: ArrayLike, y: ArrayLike, /, *arys: ArrayLike) -> list[Array]:
...
@export
@api.jit
def atleast_3d(*arys: ArrayLike) -> Array | list[Array]:
"""Convert inputs to arrays with at least 3 dimensions.
JAX implementation of :func:`numpy.atleast_3d`.
Args:
zero or more arraylike arguments.
Returns:
an array or list of arrays corresponding to the input values. Arrays
of shape ``()`` are converted to shape ``(1, 1, 1)``, 1D arrays of
shape ``(N,)`` are converted to shape ``(1, N, 1)``, 2D arrays of
shape ``(M, N)`` are converted to shape ``(M, N, 1)``, and arrays
of all other shapes are returned unchanged.
See also:
- :func:`jax.numpy.asarray`
- :func:`jax.numpy.atleast_1d`
- :func:`jax.numpy.atleast_2d`
Examples:
Scalar arguments are converted to 3D, size-1 arrays:
>>> x = jnp.float32(1.0)
>>> jnp.atleast_3d(x)
Array([[[1.]]], dtype=float32)
1D arrays have a unit dimension prepended and appended:
>>> y = jnp.arange(4)
>>> jnp.atleast_3d(y).shape
(1, 4, 1)
2D arrays have a unit dimension appended:
>>> z = jnp.ones((2, 3))
>>> jnp.atleast_3d(z).shape
(2, 3, 1)
Multiple arguments can be passed to the function at once, in which
case a list of results is returned:
>>> x3, y3 = jnp.atleast_3d(x, y)
>>> print(x3)
[[[1.]]]
>>> print(y3)
[[[0]
[1]
[2]
[3]]]
"""
# TODO(jakevdp): Non-array input deprecated 2023-09-22; change to error.
util.check_arraylike("atleast_3d", *arys, emit_warning=True)
if len(arys) == 1:
arr = asarray(arys[0])
if arr.ndim == 0:
arr = lax.expand_dims(arr, dimensions=(0, 1, 2))
elif arr.ndim == 1:
arr = lax.expand_dims(arr, dimensions=(0, 2))
elif arr.ndim == 2:
arr = lax.expand_dims(arr, dimensions=(2,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
@export
def astype(x: ArrayLike, dtype: DTypeLike | None,
/, *, copy: bool = False,
device: xc.Device | Sharding | None = None) -> Array:
"""Convert an array to a specified dtype.
JAX implementation of :func:`numpy.astype`.
This is implemented via :func:`jax.lax.convert_element_type`, which may
have slightly different behavior than :func:`numpy.astype` in some cases.
In particular, the details of float-to-int and int-to-float casts are
implementation dependent.
Args:
x: input array to convert
dtype: output dtype
copy: if True, then always return a copy. If False (default) then only
return a copy if necessary.
device: optionally specify the device to which the output will be committed.
Returns:
An array with the same shape as ``x``, containing values of the specified
dtype.
See Also:
- :func:`jax.lax.convert_element_type`: lower-level function for XLA-style
dtype conversions.
Examples:
>>> x = jnp.array([0, 1, 2, 3])
>>> x
Array([0, 1, 2, 3], dtype=int32)
>>> x.astype('float32')
Array([0.0, 1.0, 2.0, 3.0], dtype=float32)
>>> y = jnp.array([0.0, 0.5, 1.0])
>>> y.astype(int) # truncates fractional values
Array([0, 0, 1], dtype=int32)
"""
x_arr = util.ensure_arraylike("astype", x)
if dtype is None:
dtype = dtypes.default_float_dtype()
else:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "astype")
if issubdtype(x_arr.dtype, np.complexfloating):
if dtypes.isdtype(dtype, ("integral", "real floating")):
deprecations.warn(
"jax-numpy-astype-complex-to-real",
"Casting from complex to real dtypes will soon raise a ValueError. "
"Please first use jnp.real or jnp.imag to take the real/imaginary "
"component of your input.",
stacklevel=2)
elif np.dtype(dtype) == bool:
# convert_element_type(complex, bool) has the wrong semantics.
x_arr = (x_arr != lax._const(x_arr, 0))
# We offer a more specific warning than the usual ComplexWarning so we prefer
# to issue our warning.
result = lax._convert_element_type(
x_arr, dtype, sharding=util.canonicalize_device_to_sharding(device),
warn_on_complex_to_real_cast=False)
return lax._array_copy(result) if copy else result
@export
def copy(a: ArrayLike, order: str | None = None) -> Array:
"""Return a copy of the array.
JAX implementation of :func:`numpy.copy`.
Args:
a: arraylike object to copy
order: not implemented in JAX
Returns:
a copy of the input array ``a``.
See Also:
- :func:`jax.numpy.array`: create an array with or without a copy.
- :meth:`jax.Array.copy`: same function accessed as an array method.
Examples:
Since JAX arrays are immutable, in most cases explicit array copies
are not necessary. One exception is when using a function with donated
arguments (see the ``donate_argnums`` argument to :func:`jax.jit`).
>>> f = jax.jit(lambda x: 2 * x, donate_argnums=0)
>>> x = jnp.arange(4)
>>> y = f(x)
>>> print(y)
[0 2 4 6]
Because we marked ``x`` as being donated, the original array is no longer
available:
>>> print(x) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
RuntimeError: Array has been deleted with shape=int32[4].
In situations like this, an explicit copy will let you keep access to the
original buffer:
>>> x = jnp.arange(4)
>>> y = f(x.copy())
>>> print(y)
[0 2 4 6]
>>> print(x)
[0 1 2 3]
"""
util.check_arraylike("copy", a)
return array(a, copy=True, order=order)
@export
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = False) -> Array:
"""Check if two arrays are element-wise equal.
JAX implementation of :func:`numpy.array_equal`.
Args:
a1: first input array to compare.
a2: second input array to compare.
equal_nan: Boolean. If ``True``, NaNs in ``a1`` will be considered
equal to NaNs in ``a2``. Default is ``False``.
Returns:
Boolean scalar array indicating whether the input arrays are element-wise equal.
See Also:
- :func:`jax.numpy.allclose`
- :func:`jax.numpy.array_equiv`
Examples:
>>> jnp.array_equal(jnp.array([1, 2, 3]), jnp.array([1, 2, 3]))
Array(True, dtype=bool)
>>> jnp.array_equal(jnp.array([1, 2, 3]), jnp.array([1, 2]))
Array(False, dtype=bool)
>>> jnp.array_equal(jnp.array([1, 2, 3]), jnp.array([1, 2, 4]))
Array(False, dtype=bool)
>>> jnp.array_equal(jnp.array([1, 2, float('nan')]),
... jnp.array([1, 2, float('nan')]))
Array(False, dtype=bool)
>>> jnp.array_equal(jnp.array([1, 2, float('nan')]),
... jnp.array([1, 2, float('nan')]), equal_nan=True)
Array(True, dtype=bool)
"""
a1, a2 = asarray(a1), asarray(a2)
if np.shape(a1) != np.shape(a2):
return array(False, dtype=bool)
eq = asarray(a1 == a2)
if equal_nan:
eq = ufuncs.logical_or(eq, ufuncs.logical_and(ufuncs.isnan(a1), ufuncs.isnan(a2)))
return reductions.all(eq)
@export
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> Array:
"""Check if two arrays are element-wise equal.
JAX implementation of :func:`numpy.array_equiv`.
This function will return ``False`` if the input arrays cannot be broadcasted
to the same shape.
Args:
a1: first input array to compare.
a2: second input array to compare.
Returns:
Boolean scalar array indicating whether the input arrays are
element-wise equal after broadcasting.
See Also:
- :func:`jax.numpy.allclose`
- :func:`jax.numpy.array_equal`
Examples:
>>> jnp.array_equiv(jnp.array([1, 2, 3]), jnp.array([1, 2, 3]))
Array(True, dtype=bool)
>>> jnp.array_equiv(jnp.array([1, 2, 3]), jnp.array([1, 2, 4]))
Array(False, dtype=bool)
>>> jnp.array_equiv(jnp.array([[1, 2, 3], [1, 2, 3]]),
... jnp.array([1, 2, 3]))
Array(True, dtype=bool)
"""
a1, a2 = asarray(a1), asarray(a2)
try:
eq = ufuncs.equal(a1, a2)
except ValueError:
# shapes are not broadcastable
return array(False)
return reductions.all(eq)
# General np.from* style functions mostly delegate to numpy.
@export
def frombuffer(buffer: bytes | Any, dtype: DTypeLike = float,
count: int = -1, offset: int = 0) -> Array:
r"""Convert a buffer into a 1-D JAX array.
JAX implementation of :func:`numpy.frombuffer`.
Args:
buffer: an object containing the data. It must be either a bytes object with
a length that is an integer multiple of the dtype element size, or
it must be an object exporting the `Python buffer interface`_.
dtype: optional. Desired data type for the array. Default is ``float64``.
This specifies the dtype used to parse the buffer, but note that after parsing,
64-bit values will be cast to 32-bit JAX arrays if the ``jax_enable_x64``
flag is set to ``False``.
count: optional integer specifying the number of items to read from the buffer.
If -1 (default), all items from the buffer are read.
offset: optional integer specifying the number of bytes to skip at the beginning
of the buffer. Default is 0.
Returns:
A 1-D JAX array representing the interpreted data from the buffer.
See also:
- :func:`jax.numpy.fromstring`: convert a string of text into 1-D JAX array.
Examples:
Using a bytes buffer:
>>> buf = b"\x00\x01\x02\x03\x04"
>>> jnp.frombuffer(buf, dtype=jnp.uint8)
Array([0, 1, 2, 3, 4], dtype=uint8)
>>> jnp.frombuffer(buf, dtype=jnp.uint8, offset=1)
Array([1, 2, 3, 4], dtype=uint8)
Constructing a JAX array via the Python buffer interface, using Python's
built-in :mod:`array` module.
>>> from array import array
>>> pybuffer = array('i', [0, 1, 2, 3, 4])
>>> jnp.frombuffer(pybuffer, dtype=jnp.int32)
Array([0, 1, 2, 3, 4], dtype=int32)
.. _Python buffer interface: https://docs.python.org/3/c-api/buffer.html
"""
return asarray(np.frombuffer(buffer=buffer, dtype=dtype, count=count, offset=offset))
@export
def fromfile(*args, **kwargs):
"""Unimplemented JAX wrapper for jnp.fromfile.
This function is left deliberately unimplemented because it may be non-pure and thus
unsafe for use with JIT and other JAX transformations. Consider using
``jnp.asarray(np.fromfile(...))`` instead, although care should be taken if ``np.fromfile``
is used within jax transformations because of its potential side-effect of consuming the
file object; for more information see `Common Gotchas: Pure Functions
<https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_.
"""
raise NotImplementedError(
"jnp.fromfile() is not implemented because it may be non-pure and thus unsafe for use "
"with JIT and other JAX transformations. Consider using jnp.asarray(np.fromfile(...)) "
"instead, although care should be taken if np.fromfile is used within a jax transformations "
"because of its potential side-effect of consuming the file object; for more information see "
"https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions")
@export
def fromiter(*args, **kwargs):
"""Unimplemented JAX wrapper for jnp.fromiter.
This function is left deliberately unimplemented because it may be non-pure and thus
unsafe for use with JIT and other JAX transformations. Consider using
``jnp.asarray(np.fromiter(...))`` instead, although care should be taken if ``np.fromiter``
is used within jax transformations because of its potential side-effect of consuming the
iterable object; for more information see `Common Gotchas: Pure Functions
<https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_.
"""
raise NotImplementedError(
"jnp.fromiter() is not implemented because it may be non-pure and thus unsafe for use "
"with JIT and other JAX transformations. Consider using jnp.asarray(np.fromiter(...)) "
"instead, although care should be taken if np.fromiter is used within a jax transformations "
"because of its potential side-effect of consuming the iterable object; for more information see "
"https://docs.jax.dev/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions")
@export
def from_dlpack(x: Any, /, *, device: xc.Device | Sharding | None = None,
copy: bool | None = None) -> Array:
"""Construct a JAX array via DLPack.
JAX implementation of :func:`numpy.from_dlpack`.
Args:
x: An object that implements the DLPack_ protocol via the ``__dlpack__``
and ``__dlpack_device__`` methods, or a legacy DLPack tensor on either
CPU or GPU.
device: An optional :class:`~jax.Device` or :class:`~jax.sharding.Sharding`,
representing the single device onto which the returned array should be placed.
If given, then the result is committed to the device. If unspecified,
the resulting array will be unpacked onto the same device it originated from.
Setting ``device`` to a device different from the source of ``external_array``
will require a copy, meaning ``copy`` must be set to either ``True`` or ``None``.
copy: An optional boolean, controlling whether or not a copy is performed.
If ``copy=True`` then a copy is always performed, even if unpacked onto the
same device. If ``copy=False`` then the copy is never performed and will raise
an error if necessary. When ``copy=None`` (default) then a copy may be performed
if needed for a device transfer.
Returns:
A JAX array of the input buffer.
Note:
While JAX arrays are always immutable, dlpack buffers cannot be marked as
immutable, and it is possible for processes external to JAX to mutate them
in-place. If a JAX Array is constructed from a dlpack buffer without copying
and the source buffer is later modified in-place, it may lead to undefined
behavior when using the associated JAX array.
Examples:
Passing data between NumPy and JAX via DLPack_:
>>> import numpy as np
>>> rng = np.random.default_rng(42)
>>> x_numpy = rng.random(4, dtype='float32')
>>> print(x_numpy)
[0.08925092 0.773956 0.6545715 0.43887842]
>>> hasattr(x_numpy, "__dlpack__") # NumPy supports the DLPack interface
True
>>> import jax.numpy as jnp
>>> x_jax = jnp.from_dlpack(x_numpy)
>>> print(x_jax)
[0.08925092 0.773956 0.6545715 0.43887842]
>>> hasattr(x_jax, "__dlpack__") # JAX supports the DLPack interface
True
>>> x_numpy_round_trip = np.from_dlpack(x_jax)
>>> print(x_numpy_round_trip)
[0.08925092 0.773956 0.6545715 0.43887842]
.. _DLPack: https://dmlc.github.io/dlpack
"""
from jax.dlpack import from_dlpack # pylint: disable=g-import-not-at-top
return from_dlpack(x, device=device, copy=copy)
@export
def fromfunction(function: Callable[..., Array], shape: Any,
*, dtype: DTypeLike = float, **kwargs) -> Array:
"""Create an array from a function applied over indices.
JAX implementation of :func:`numpy.fromfunction`. The JAX implementation
differs in that it dispatches via :func:`jax.vmap`, and so unlike in NumPy
the function logically operates on scalar inputs, and need not explicitly
handle broadcasted inputs (See *Examples* below).
Args:
function: a function that takes *N* dynamic scalars and outputs a scalar.
shape: a length-*N* tuple of integers specifying the output shape.
dtype: optionally specify the dtype of the inputs. Defaults to floating-point.
kwargs: additional keyword arguments are passed statically to ``function``.
Returns:
An array of shape ``shape`` if ``function`` returns a scalar, or in general
a pytree of arrays with leading dimensions ``shape``, as determined by the
output of ``function``.
See also:
- :func:`jax.vmap`: the core transformation that the :func:`fromfunction`
API is built on.
Examples:
Generate a multiplication table of a given shape:
>>> jnp.fromfunction(jnp.multiply, shape=(3, 6), dtype=int)
Array([[ 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5],
[ 0, 2, 4, 6, 8, 10]], dtype=int32)
When ``function`` returns a non-scalar the output will have leading
dimension of ``shape``:
>>> def f(x):
... return (x + 1) * jnp.arange(3)
>>> jnp.fromfunction(f, shape=(2,))
Array([[0., 1., 2.],
[0., 2., 4.]], dtype=float32)
``function`` may return multiple results, in which case each is mapped
independently:
>>> def f(x, y):
... return x + y, x * y
>>> x_plus_y, x_times_y = jnp.fromfunction(f, shape=(3, 5))
>>> print(x_plus_y)
[[0. 1. 2. 3. 4.]
[1. 2. 3. 4. 5.]
[2. 3. 4. 5. 6.]]
>>> print(x_times_y)
[[0. 0. 0. 0. 0.]
[0. 1. 2. 3. 4.]
[0. 2. 4. 6. 8.]]
The JAX implementation differs slightly from NumPy's implementation. In
:func:`numpy.fromfunction`, the function is expected to explicitly operate
element-wise on the full grid of input values:
>>> def f(x, y):
... print(f"{x.shape = }\\n{y.shape = }")
... return x + y
...
>>> np.fromfunction(f, (2, 3))
x.shape = (2, 3)
y.shape = (2, 3)
array([[0., 1., 2.],
[1., 2., 3.]])
In :func:`jax.numpy.fromfunction`, the function is vectorized via
:func:`jax.vmap`, and so is expected to operate on scalar values:
>>> jnp.fromfunction(f, (2, 3))
x.shape = ()
y.shape = ()
Array([[0., 1., 2.],
[1., 2., 3.]], dtype=float32)
"""
shape = core.canonicalize_shape(shape, context="shape argument of jnp.fromfunction()")
for i in range(len(shape)):
in_axes = [0 if i == j else None for j in range(len(shape))]
function = api.vmap(function, in_axes=tuple(in_axes[::-1]))
return function(*(arange(s, dtype=dtype) for s in shape), **kwargs)
@export
def fromstring(string: str, dtype: DTypeLike = float, count: int = -1, *, sep: str) -> Array:
"""Convert a string of text into 1-D JAX array.
JAX implementation of :func:`numpy.fromstring`.
Args:
string: input string containing the data.
dtype: optional. Desired data type for the array. Default is ``float``.
count: optional integer specifying the number of items to read from the string.
If -1 (default), all items are read.
sep: the string used to separate values in the input string.
Returns:
A 1-D JAX array containing the parsed data from the input string.
See also:
- :func:`jax.numpy.frombuffer`: construct a JAX array from an object
that implements the buffer interface.
Examples:
>>> jnp.fromstring("1 2 3", dtype=int, sep=" ")
Array([1, 2, 3], dtype=int32)
>>> jnp.fromstring("0.1, 0.2, 0.3", dtype=float, count=2, sep=",")
Array([0.1, 0.2], dtype=float32)
"""
return asarray(np.fromstring(string=string, dtype=dtype, count=count, sep=sep))
@export
def eye(N: DimSize, M: DimSize | None = None,
k: int | ArrayLike = 0,
dtype: DTypeLike | None = None,
*, device: xc.Device | Sharding | None = None) -> Array:
"""Create a square or rectangular identity matrix
JAX implementation of :func:`numpy.eye`.
Args:
N: integer specifying the first dimension of the array.
M: optional integer specifying the second dimension of the array;
defaults to the same value as ``N``.
k: optional integer specifying the offset of the diagonal. Use positive
values for upper diagonals, and negative values for lower diagonals.
Default is zero.
dtype: optional dtype; defaults to floating point.
device: optional :class:`~jax.Device` or :class:`~jax.sharding.Sharding`
to which the created array will be committed.
Returns:
Identity array of shape ``(N, M)``, or ``(N, N)`` if ``M`` is not specified.
See also:
:func:`jax.numpy.identity`: Simpler API for generating square identity matrices.
Examples:
A simple 3x3 identity matrix:
>>> jnp.eye(3)
Array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
Integer identity matrices with offset diagonals:
>>> jnp.eye(3, k=1, dtype=int)
Array([[0, 1, 0],
[0, 0, 1],
[0, 0, 0]], dtype=int32)
>>> jnp.eye(3, k=-1, dtype=int)
Array([[0, 0, 0],
[1, 0, 0],
[0, 1, 0]], dtype=int32)
Non-square identity matrix:
>>> jnp.eye(3, 5, k=1)
Array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.]], dtype=float32)
"""
# TODO(vfdev-5): optimize putting the array directly on the device specified
# instead of putting it on default device and then on the specific device
output = _eye(N, M=M, k=k, dtype=dtype)
if device is not None:
return api.device_put(output, device=device)
return output
def _eye(N: DimSize, M: DimSize | None = None,
k: int | ArrayLike = 0,
dtype: DTypeLike | None = None) -> Array:
dtype = dtypes.check_and_canonicalize_user_dtype(
float if dtype is None else dtype, "eye")
if isinstance(k, int):
k = lax._clip_int_to_valid_range(k, np.int32,
"`argument `k` of jax.numpy.eye")
offset = util.ensure_arraylike("eye", k)
if not (offset.shape == () and dtypes.issubdtype(offset.dtype, np.integer)):
raise ValueError(f"k must be a scalar integer; got {k}")
N_int = core.canonicalize_dim(N, "argument of 'N' jnp.eye()")
M_int = N_int if M is None else core.canonicalize_dim(M, "argument 'M' of jnp.eye()")
if N_int < 0 or M_int < 0:
raise ValueError(f"negative dimensions are not allowed, got {N} and {M}")
i = lax.broadcasted_iota(offset.dtype, (N_int, M_int), 0)
j = lax.broadcasted_iota(offset.dtype, (N_int, M_int), 1)
return (i + offset == j).astype(dtype)
@export
def identity(n: DimSize, dtype: DTypeLike | None = None) -> Array:
"""Create a square identity matrix
JAX implementation of :func:`numpy.identity`.
Args:
n: integer specifying the size of each array dimension.
dtype: optional dtype; defaults to floating point.
Returns:
Identity array of shape ``(n, n)``.
See also:
:func:`jax.numpy.eye`: non-square and/or offset identity matrices.
Examples:
A simple 3x3 identity matrix:
>>> jnp.identity(3)
Array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
A 2x2 integer identity matrix:
>>> jnp.identity(2, dtype=int)
Array([[1, 0],
[0, 1]], dtype=int32)
"""
if dtype is not None:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "identity")
return eye(n, dtype=dtype)
@export
def arange(start: ArrayLike | DimSize, stop: ArrayLike | DimSize | None = None,
step: ArrayLike | None = None, dtype: DTypeLike | None = None,
*, device: xc.Device | Sharding | None = None,
out_sharding: NamedSharding | P | None = None) -> Array:
"""Create an array of evenly-spaced values.
JAX implementation of :func:`numpy.arange`, implemented in terms of
:func:`jax.lax.iota`.
Similar to Python's :func:`range` function, this can be called with a few
different positional signatures:
- ``jnp.arange(stop)``: generate values from 0 to ``stop``, stepping by 1.
- ``jnp.arange(start, stop)``: generate values from ``start`` to ``stop``,
stepping by 1.
- ``jnp.arange(start, stop, step)``: generate values from ``start`` to ``stop``,
stepping by ``step``.
Like with Python's :func:`range` function, the starting value is inclusive,
and the stop value is exclusive.
Args:
start: start of the interval, inclusive.
stop: optional end of the interval, exclusive. If not specified, then
``(start, stop) = (0, start)``
step: optional step size for the interval. Default = 1.
dtype: optional dtype for the returned array; if not specified it will
be determined via type promotion of `start`, `stop`, and `step`.
device: (optional) :class:`~jax.Device` or :class:`~jax.sharding.Sharding`
to which the created array will be committed.
out_sharding: (optional) :class:`~jax.NamedSharding` or :class:`~jax.P` to
which the created array will be committed. Use `out_sharding` argument,
if using explicit sharding
(https://docs.jax.dev/en/latest/notebooks/explicit-sharding.html)
Returns:
Array of evenly-spaced values from ``start`` to ``stop``, separated by ``step``.
Note:
Using ``arange`` with a floating-point ``step`` argument can lead to unexpected
results due to accumulation of floating-point errors, especially with
lower-precision data types like ``float8_*`` and ``bfloat16``.
To avoid precision errors, consider generating a range of integers, and scaling
it to the desired range. For example, instead of this::
jnp.arange(-1, 1, 0.01, dtype='bfloat16')
it can be more accurate to generate a sequence of integers, and scale them::
(jnp.arange(-100, 100) * 0.01).astype('bfloat16')
Examples:
Single-argument version specifies only the ``stop`` value:
>>> jnp.arange(4)
Array([0, 1, 2, 3], dtype=int32)
Passing a floating-point ``stop`` value leads to a floating-point result:
>>> jnp.arange(4.0)
Array([0., 1., 2., 3.], dtype=float32)
Two-argument version specifies ``start`` and ``stop``, with ``step=1``:
>>> jnp.arange(1, 6)
Array([1, 2, 3, 4, 5], dtype=int32)
Three-argument version specifies ``start``, ``stop``, and ``step``:
>>> jnp.arange(0, 2, 0.5)
Array([0. , 0.5, 1. , 1.5], dtype=float32)
See Also:
- :func:`jax.numpy.linspace`: generate a fixed number of evenly-spaced values.
- :func:`jax.lax.iota`: directly generate integer sequences in XLA.
"""
sharding = util.choose_device_or_out_sharding(
device, out_sharding, 'jnp.arange')
if sharding is None or not sharding._is_concrete:
assert sharding is None or isinstance(sharding, NamedSharding)
return _arange(start, stop=stop, step=step, dtype=dtype,
out_sharding=sharding)
else:
output = _arange(start, stop=stop, step=step, dtype=dtype)
return api.device_put(output, sharding)
def _arange(start: ArrayLike | DimSize, stop: ArrayLike | DimSize | None = None,
step: ArrayLike | None = None, dtype: DTypeLike | None = None,
out_sharding: NamedSharding | None = None) -> Array:
if dtype is not None:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "arange")
if not config.dynamic_shapes.value:
util.check_arraylike("arange", start)
if stop is None and step is None:
start = core.concrete_or_error(None, start, "It arose in the jnp.arange argument 'stop'")
else:
start = core.concrete_or_error(None, start, "It arose in the jnp.arange argument 'start'")
util.check_arraylike_or_none("arange", None, stop, step)
stop = core.concrete_or_error(None, stop, "It arose in the jnp.arange argument 'stop'")
step = core.concrete_or_error(None, step, "It arose in the jnp.arange argument 'step'")
start_name = "stop" if stop is None and step is None else "start"
for name, val in [(start_name, start), ("stop", stop), ("step", step)]:
if val is not None and np.ndim(val) != 0:
raise ValueError(f"jax.numpy.arange: arguments must be scalars; got {name}={val}")
if any(core.is_symbolic_dim(v) for v in (start, stop, step)):
# Some dynamic shapes
if stop is None and step is None:
stop = start
start = 0
step = 1
elif stop is not None and step is None:
step = 1
return _arange_dynamic(start, stop, step, dtype or dtypes.default_int_dtype())
if dtype is None:
dtype = result_type(start, *(x for x in [stop, step] if x is not None))
dtype = dtypes.jax_dtype(dtype)
if stop is None and step is None:
start_dtype = _dtype(start)
if (not dtypes.issubdtype(start_dtype, np.integer) and
not dtypes.issubdtype(start_dtype, dtypes.extended)):
ceil_ = ufuncs.ceil if isinstance(start, core.Tracer) else np.ceil
start = ceil_(start).astype(int)
return lax.broadcasted_iota(dtype, (start,), 0, out_sharding=out_sharding) # type: ignore[arg-type]
else:
if step is None and start == 0 and stop is not None:
return lax.broadcasted_iota(dtype, (np.ceil(stop).astype(int),), 0,
out_sharding=out_sharding)
return array(np.arange(start, stop=stop, step=step, dtype=dtype),
device=out_sharding)
def _arange_dynamic(
start: DimSize, stop: DimSize, step: DimSize, dtype: DTypeLike) -> Array:
# Here if at least one of start, stop, step are dynamic.
if any(not core.is_dim(v) for v in (start, stop, step)):
raise ValueError(
"In arange with non-constant arguments all of start, stop, and step "
f"must be either dimension expressions or integers: start={start}, "
f"stop={stop}, step={step}")
# Must resolve statically if step is {<0, ==0, >0}
try:
if step == 0:
raise ValueError("arange has step == 0")
step_gt_0 = (step > 0)
except core.InconclusiveDimensionOperation as e:
raise core.InconclusiveDimensionOperation(
f"In arange with non-constant arguments the step ({step}) must " +
f"be resolved statically if it is > 0 or < 0.\nDetails: {e}")
gap = step if step_gt_0 else - step
distance = (stop - start) if step_gt_0 else (start - stop)
size = core.max_dim(0, distance + gap - 1) // gap
return (array(start, dtype=dtype) +
array(step, dtype=dtype) * lax.iota(dtype, size))
@export
def meshgrid(*xi: ArrayLike, copy: bool = True, sparse: bool = False,
indexing: str = 'xy') -> list[Array]:
"""Construct N-dimensional grid arrays from N 1-dimensional vectors.
JAX implementation of :func:`numpy.meshgrid`.
Args:
xi: N arrays to convert to a grid.
copy: whether to copy the input arrays. JAX supports only ``copy=True``,
though under JIT compilation the compiler may opt to avoid copies.
sparse: if False (default), then each returned arrays will be of shape
``[len(x1), len(x2), ..., len(xN)]``. If False, then returned arrays
will be of shape ``[1, 1, ..., len(xi), ..., 1, 1]``.
indexing: options are ``'xy'`` for cartesian indexing (default) or ``'ij'``
for matrix indexing.
Returns:
A length-N list of grid arrays.
See also:
- :func:`jax.numpy.indices`: generate a grid of indices.
- :obj:`jax.numpy.mgrid`: create a meshgrid using indexing syntax.
- :obj:`jax.numpy.ogrid`: create an open meshgrid using indexing syntax.
Examples:
For the following examples, we'll use these 1D arrays as inputs:
>>> x = jnp.array([1, 2])
>>> y = jnp.array([10, 20, 30])
2D cartesian mesh grid:
>>> x_grid, y_grid = jnp.meshgrid(x, y)
>>> print(x_grid)
[[1 2]
[1 2]
[1 2]]
>>> print(y_grid)
[[10 10]
[20 20]
[30 30]]
2D sparse cartesian mesh grid:
>>> x_grid, y_grid = jnp.meshgrid(x, y, sparse=True)
>>> print(x_grid)
[[1 2]]
>>> print(y_grid)
[[10]
[20]
[30]]
2D matrix-index mesh grid:
>>> x_grid, y_grid = jnp.meshgrid(x, y, indexing='ij')
>>> print(x_grid)
[[1 1 1]
[2 2 2]]
>>> print(y_grid)
[[10 20 30]
[10 20 30]]
"""
args = list(util.ensure_arraylike_tuple("meshgrid", tuple(xi)))
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
if indexing not in ["xy", "ij"]:
raise ValueError(f"Valid values for indexing are 'xy' and 'ij', got {indexing}")
if any(a.ndim != 1 for a in args):
raise ValueError("Arguments to jax.numpy.meshgrid must be 1D, got shapes "
f"{[a.shape for a in args]}")
if indexing == "xy" and len(args) >= 2:
args[0], args[1] = args[1], args[0]
shape = [1 if sparse else a.shape[0] for a in args]
_a_shape = lambda i, a: [*shape[:i], a.shape[0], *shape[i + 1:]] if sparse else shape
output = [lax.broadcast_in_dim(a, _a_shape(i, a), (i,)) for i, a, in enumerate(args)]
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@export
@api.jit
def i0(x: ArrayLike) -> Array:
r"""Calculate modified Bessel function of first kind, zeroth order.
JAX implementation of :func:`numpy.i0`.
Modified Bessel function of first kind, zeroth order is defined by:
.. math::
\mathrm{i0}(x) = I_0(x) = \sum_{k=0}^{\infty} \frac{(x^2/4)^k}{(k!)^2}
Args:
x: scalar or array. Specifies the argument of Bessel function. Complex inputs
are not supported.
Returns:
An array containing the corresponding values of the modified Bessel function
of ``x``.
See also:
- :func:`jax.scipy.special.i0`: Calculates the modified Bessel function of
zeroth order.
- :func:`jax.scipy.special.i1`: Calculates the modified Bessel function of
first order.
- :func:`jax.scipy.special.i0e`: Calculates the exponentially scaled modified
Bessel function of zeroth order.
Examples:
>>> x = jnp.array([-2, -1, 0, 1, 2])
>>> jnp.i0(x)
Array([2.2795851, 1.266066 , 1.0000001, 1.266066 , 2.2795851], dtype=float32)
"""
x_arr, = util.promote_args_inexact("i0", x)
if not issubdtype(x_arr.dtype, np.floating):
raise ValueError(f"Unsupported input type to jax.numpy.i0: {x_arr.dtype}")
return _i0(x_arr)
@custom_jvp
def _i0(x):
abs_x = lax.abs(x)
return lax.mul(lax.exp(abs_x), lax_special.bessel_i0e(abs_x))
@_i0.defjvp
def _i0_jvp(primals, tangents):
primal_out, tangent_out = api.jvp(_i0.fun, primals, tangents)
return primal_out, where(primals[0] == 0, 0.0, tangent_out)
@export
def ix_(*args: ArrayLike) -> tuple[Array, ...]:
"""Return a multi-dimensional grid (open mesh) from N one-dimensional sequences.
JAX implementation of :func:`numpy.ix_`.
Args:
*args: N one-dimensional arrays
Returns:
Tuple of Jax arrays forming an open mesh, each with N dimensions.
See Also:
- :obj:`jax.numpy.ogrid`
- :obj:`jax.numpy.mgrid`
- :func:`jax.numpy.meshgrid`
Examples:
>>> rows = jnp.array([0, 2])
>>> cols = jnp.array([1, 3])
>>> open_mesh = jnp.ix_(rows, cols)
>>> open_mesh
(Array([[0],
[2]], dtype=int32), Array([[1, 3]], dtype=int32))
>>> [grid.shape for grid in open_mesh]
[(2, 1), (1, 2)]
>>> x = jnp.array([[10, 20, 30, 40],
... [50, 60, 70, 80],
... [90, 100, 110, 120],
... [130, 140, 150, 160]])
>>> x[open_mesh]
Array([[ 20, 40],
[100, 120]], dtype=int32)
"""
args = util.ensure_arraylike_tuple("ix", args)
n = len(args)
output = []
for i, a in enumerate(args):
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if a.dtype == bool:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, np.zeros((), np.intp)))
else:
output.append(lax.broadcast_in_dim(a, shape, (i,)))
return tuple(output)
@overload
def indices(dimensions: Sequence[int], dtype: DTypeLike | None = None,
sparse: Literal[False] = False) -> Array: ...
@overload
def indices(dimensions: Sequence[int], dtype: DTypeLike | None = None,
*, sparse: Literal[True]) -> tuple[Array, ...]: ...
@overload
def indices(dimensions: Sequence[int], dtype: DTypeLike | None = None,
sparse: bool = False) -> Array | tuple[Array, ...]: ...
@export
def indices(dimensions: Sequence[int], dtype: DTypeLike | None = None,
sparse: bool = False) -> Array | tuple[Array, ...]:
"""Generate arrays of grid indices.
JAX implementation of :func:`numpy.indices`.
Args:
dimensions: the shape of the grid.
dtype: the dtype of the indices (defaults to integer).
sparse: if True, then return sparse indices. Default is False, which
returns dense indices.
Returns:
An array of shape ``(len(dimensions), *dimensions)`` If ``sparse`` is False,
or a sequence of arrays of the same length as ``dimensions`` if ``sparse`` is True.
See also:
- :func:`jax.numpy.meshgrid`: generate a grid from arbitrary input arrays.
- :obj:`jax.numpy.mgrid`: generate dense indices using a slicing syntax.
- :obj:`jax.numpy.ogrid`: generate sparse indices using a slicing syntax.
Examples:
>>> jnp.indices((2, 3))
Array([[[0, 0, 0],
[1, 1, 1]],
<BLANKLINE>
[[0, 1, 2],
[0, 1, 2]]], dtype=int32)
>>> jnp.indices((2, 3), sparse=True)
(Array([[0],
[1]], dtype=int32), Array([[0, 1, 2]], dtype=int32))
"""
dtype = dtypes.check_and_canonicalize_user_dtype(
int if dtype is None else dtype, "indices")
dimensions = tuple(
core.concrete_or_error(operator.index, d, "dimensions argument of jnp.indices")
for d in dimensions)
N = len(dimensions)
output = []
s = dimensions
for i, dim in enumerate(dimensions):
idx = lax.iota(dtype, dim)
if sparse:
s = (1,)*i + (dim,) + (1,)*(N - i - 1)
output.append(lax.broadcast_in_dim(idx, s, (i,)))
if sparse:
return tuple(output)
return stack(output, 0) if output else array([], dtype=dtype)
@export
def repeat(a: ArrayLike, repeats: ArrayLike, axis: int | None = None, *,
total_repeat_length: int | None = None,
out_sharding: NamedSharding | P | None = None) -> Array:
"""Construct an array from repeated elements.
JAX implementation of :func:`numpy.repeat`.
Args:
a: N-dimensional array
repeats: 1D integer array specifying the number of repeats. Must match the
length of the repeated axis.
axis: integer specifying the axis of ``a`` along which to construct the
repeated array. If None (default) then ``a`` is first flattened.
total_repeat_length: this must be specified statically for ``jnp.repeat``
to be compatible with :func:`~jax.jit` and other JAX transformations.
If ``sum(repeats)`` is larger than the specified ``total_repeat_length``,
the remaining values will be discarded. If ``sum(repeats)`` is smaller
than ``total_repeat_length``, the final value will be repeated.
Returns:
an array constructed from repeated values of ``a``.
See Also:
- :func:`jax.numpy.tile`: repeat a full array rather than individual values.
Examples:
Repeat each value twice along the last axis:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.repeat(a, 2, axis=-1)
Array([[1, 1, 2, 2],
[3, 3, 4, 4]], dtype=int32)
If ``axis`` is not specified, the input array will be flattened:
>>> jnp.repeat(a, 2)
Array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)
Pass an array to ``repeats`` to repeat each value a different number of times:
>>> repeats = jnp.array([2, 3])
>>> jnp.repeat(a, repeats, axis=1)
Array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)
In order to use ``repeat`` within ``jit`` and other JAX transformations, the
size of the output must be specified statically using ``total_repeat_length``:
>>> jit_repeat = jax.jit(jnp.repeat, static_argnames=['axis', 'total_repeat_length'])
>>> jit_repeat(a, repeats, axis=1, total_repeat_length=5)
Array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)
If `total_repeat_length` is smaller than ``sum(repeats)``, the result will be truncated:
>>> jit_repeat(a, repeats, axis=1, total_repeat_length=4)
Array([[1, 1, 2, 2],
[3, 3, 4, 4]], dtype=int32)
If it is larger, then the additional entries will be filled with the final value:
>>> jit_repeat(a, repeats, axis=1, total_repeat_length=7)
Array([[1, 1, 2, 2, 2, 2, 2],
[3, 3, 4, 4, 4, 4, 4]], dtype=int32)
"""
if out_sharding is not None:
return _auto_repeat(_repeat, a, repeats, axis, total_repeat_length,
out_sharding)
ctx_mesh = get_abstract_mesh()
if ctx_mesh._any_axis_explicit:
aval = core.typeof(a)
if axis is None or aval.sharding.spec[axis] is not None:
raise ValueError(
"Please pass sharding to `jnp.repeat` via `out_sharding` parameter.")
assert axis is not None and aval.sharding.spec[axis] is None
out_sharding = (NamedSharding(ctx_mesh, P())
if aval.sharding.mesh.empty else aval.sharding)
return _auto_repeat(_repeat, a, repeats, axis, total_repeat_length,
out_sharding)
try:
return _repeat(repeats, a, axis=axis,
total_repeat_length=total_repeat_length)
except core.ShardingTypeError as e:
raise ValueError(
"Please pass sharding to `jnp.repeat` via `out_sharding` parameter.")
def _auto_repeat(fun, a, repeats, axis, total_repeat_length, out_sharding):
out_sharding = canonicalize_sharding(out_sharding, 'repeat')
if total_repeat_length is None:
return auto_axes(partial(fun, repeats, axis=axis,
total_repeat_length=total_repeat_length),
out_sharding=out_sharding,
axes=out_sharding.mesh.explicit_axes # type: ignore
)(a)
else:
return auto_axes(
partial(fun, axis=axis, total_repeat_length=total_repeat_length),
out_sharding=out_sharding,
axes=out_sharding.mesh.explicit_axes # type: ignore
)(repeats, a)
def _repeat(repeats: ArrayLike, a: ArrayLike, *, axis: int | None = None,
total_repeat_length: int | None = None) -> Array:
if core.is_dim(repeats):
util.check_arraylike("repeat", a)
else:
util.check_arraylike("repeat", a, repeats)
arr = asarray(a)
if axis is None:
arr = arr.ravel()
axis = 0
axis = core.concrete_or_error(operator.index, axis, "'axis' argument of jnp.repeat()")
assert isinstance(axis, int) # to appease mypy
if core.is_symbolic_dim(repeats):
if total_repeat_length is not None:
raise ValueError("jnp.repeat with a non-constant `repeats` is supported only "
f"when `total_repeat_length` is None. ({repeats=} {total_repeat_length=})")
# If total_repeat_length is not given, use a default.
if total_repeat_length is None:
repeats = core.concrete_or_error(None, repeats,
"When jit-compiling jnp.repeat, the total number of repeats must be static. "
"To fix this, either specify a static value for `repeats`, or pass a static "
"value to `total_repeat_length`.")
# Fast path for when repeats is a scalar.
if np.ndim(repeats) == 0 and np.ndim(arr) != 0:
input_shape = arr.shape
axis = _canonicalize_axis(axis, len(input_shape))
aux_axis = axis + 1
aux_shape: list[DimSize] = list(input_shape)
aux_shape.insert(aux_axis, operator.index(repeats) if core.is_constant_dim(repeats) else repeats) # type: ignore
arr = lax.broadcast_in_dim(
arr, aux_shape, [i for i in range(len(aux_shape)) if i != aux_axis])
result_shape: list[DimSize] = list(input_shape)
result_shape[axis] *= repeats
return arr.reshape(result_shape)
repeats = np.ravel(repeats)
if arr.ndim != 0:
repeats = np.broadcast_to(repeats, [arr.shape[axis]])
total_repeat_length = np.sum(repeats)
else:
repeats = ravel(repeats)
if arr.ndim != 0:
repeats = broadcast_to(repeats, [arr.shape[axis]])
# Special case when a is a scalar.
if arr.ndim == 0:
if np.shape(repeats) == (1,):
return array_creation.full([total_repeat_length], arr)
else:
raise ValueError('`repeat` with a scalar parameter `a` is only '
'implemented for scalar values of the parameter `repeats`.')
# Special case if total_repeat_length is zero.
if total_repeat_length == 0:
result_shape = list(arr.shape)
result_shape[axis] = 0
return reshape(array([], dtype=arr.dtype), result_shape)
# If repeats is on a zero sized axis, then return the array.
if arr.shape[axis] == 0:
return arr
# This implementation of repeat avoid having to instantiate a large.
# intermediate tensor.
# Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.
exclusive_repeats = roll(repeats, shift=1).at[0].set(0)
# Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]
scatter_indices = reductions.cumsum(exclusive_repeats)
# Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]
block_split_indicators = array_creation.zeros([total_repeat_length], dtype='int32')
block_split_indicators = block_split_indicators.at[scatter_indices].add(1)
# Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]
gather_indices = reductions.cumsum(block_split_indicators) - 1
return indexing.take(arr, gather_indices, axis=axis)
@export
@api.jit(static_argnames=('axis',))
def trapezoid(y: ArrayLike, x: ArrayLike | None = None, dx: ArrayLike = 1.0,
axis: int = -1) -> Array:
r"""
Integrate along the given axis using the composite trapezoidal rule.
JAX implementation of :func:`numpy.trapezoid`
The trapezoidal rule approximates the integral under a curve by summing the
areas of trapezoids formed between adjacent data points.
Args:
y: array of data to integrate.
x: optional array of sample points corresponding to the ``y`` values. If not
provided, ``x`` defaults to equally spaced with spacing given by ``dx``.
dx: The spacing between sample points when `x` is None (default: 1.0).
axis: The axis along which to integrate (default: -1)
Returns:
The definite integral approximated by the trapezoidal rule.
Examples:
Integrate over a regular grid, with spacing 1.0:
>>> y = jnp.array([1, 2, 3, 2, 3, 2, 1])
>>> jnp.trapezoid(y, dx=1.0)
Array(13., dtype=float32)
Integrate over an irregular grid:
>>> x = jnp.array([0, 2, 5, 7, 10, 15, 20])
>>> jnp.trapezoid(y, x)
Array(43., dtype=float32)
Approximate :math:`\int_0^{2\pi} \sin^2(x)dx`, which equals :math:`\pi`:
>>> x = jnp.linspace(0, 2 * jnp.pi, 1000)
>>> y = jnp.sin(x) ** 2
>>> result = jnp.trapezoid(y, x)
>>> jnp.allclose(result, jnp.pi)
Array(True, dtype=bool)
"""
# TODO(phawkins): remove this annotation after fixing jnp types.
dx_array: Array
if x is None:
y = util.ensure_arraylike('trapezoid', y)
y_arr, = util.promote_dtypes_inexact(y)
dx_array = asarray(dx)
else:
y, x = util.ensure_arraylike('trapezoid', y, x)
y_arr, x_arr = util.promote_dtypes_inexact(y, x)
if x_arr.ndim == 1:
dx_array = diff(x_arr)
else:
dx_array = moveaxis(diff(x_arr, axis=axis), axis, -1)
y_arr = moveaxis(y_arr, axis, -1)
return 0.5 * (dx_array * (y_arr[..., 1:] + y_arr[..., :-1])).sum(-1)
@export
def tri(N: int, M: int | None = None, k: int = 0, dtype: DTypeLike | None = None) -> Array:
r"""Return an array with ones on and below the diagonal and zeros elsewhere.
JAX implementation of :func:`numpy.tri`
Args:
N: int. Dimension of the rows of the returned array.
M: optional, int. Dimension of the columns of the returned array. If not
specified, then ``M = N``.
k: optional, int, default=0. Specifies the sub-diagonal on and below which
the array is filled with ones. ``k=0`` refers to main diagonal, ``k<0``
refers to sub-diagonal below the main diagonal and ``k>0`` refers to
sub-diagonal above the main diagonal.
dtype: optional, data type of the returned array. The default type is float.
Returns:
An array of shape ``(N, M)`` containing the lower triangle with elements
below the sub-diagonal specified by ``k`` are set to one and zero elsewhere.
See also:
- :func:`jax.numpy.tril`: Returns a lower triangle of an array.
- :func:`jax.numpy.triu`: Returns an upper triangle of an array.
Examples:
>>> jnp.tri(3)
Array([[1., 0., 0.],
[1., 1., 0.],
[1., 1., 1.]], dtype=float32)
When ``M`` is not equal to ``N``:
>>> jnp.tri(3, 4)
Array([[1., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 1., 1., 0.]], dtype=float32)
when ``k>0``:
>>> jnp.tri(3, k=1)
Array([[1., 1., 0.],
[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
When ``k<0``:
>>> jnp.tri(3, 4, k=-1)
Array([[0., 0., 0., 0.],
[1., 0., 0., 0.],
[1., 1., 0., 0.]], dtype=float32)
"""
if dtype is None:
# TODO(phawkins): this is a strange default.
dtype = np.dtype(np.float32)
else:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "tri")
M = M if M is not None else N
return lax._tri(dtype, (N, M), k)
@export
@api.jit(static_argnames=('k',))
def tril(m: ArrayLike, k: int = 0) -> Array:
r"""Return lower triangle of an array.
JAX implementation of :func:`numpy.tril`
Args:
m: input array. Must have ``m.ndim >= 2``.
k: k: optional, int, default=0. Specifies the sub-diagonal above which the
elements of the array are set to zero. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
Returns:
An array with same shape as input containing the lower triangle of the given
array with elements above the sub-diagonal specified by ``k`` are set to
zero.
See also:
- :func:`jax.numpy.triu`: Returns an upper triangle of an array.
- :func:`jax.numpy.tri`: Returns an array with ones on and below the
diagonal and zeros elsewhere.
Examples:
>>> x = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> jnp.tril(x)
Array([[ 1, 0, 0, 0],
[ 5, 6, 0, 0],
[ 9, 10, 11, 0]], dtype=int32)
>>> jnp.tril(x, k=1)
Array([[ 1, 2, 0, 0],
[ 5, 6, 7, 0],
[ 9, 10, 11, 12]], dtype=int32)
>>> jnp.tril(x, k=-1)
Array([[ 0, 0, 0, 0],
[ 5, 0, 0, 0],
[ 9, 10, 0, 0]], dtype=int32)
When ``m.ndim > 2``, ``jnp.tril`` operates batch-wise on the trailing axes.
>>> x1 = jnp.array([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> jnp.tril(x1)
Array([[[1, 0],
[3, 4]],
<BLANKLINE>
[[5, 0],
[7, 8]]], dtype=int32)
"""
m = util.ensure_arraylike("tril", m)
m_shape = np.shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
N, M = m_shape[-2:]
mask = tri(N, M, k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, array_creation.zeros_like(m))
@export
@api.jit(static_argnames=('k',))
def triu(m: ArrayLike, k: int = 0) -> Array:
r"""Return upper triangle of an array.
JAX implementation of :func:`numpy.triu`
Args:
m: input array. Must have ``m.ndim >= 2``.
k: optional, int, default=0. Specifies the sub-diagonal below which the
elements of the array are set to zero. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
Returns:
An array with same shape as input containing the upper triangle of the given
array with elements below the sub-diagonal specified by ``k`` are set to
zero.
See also:
- :func:`jax.numpy.tril`: Returns a lower triangle of an array.
- :func:`jax.numpy.tri`: Returns an array with ones on and below the
diagonal and zeros elsewhere.
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [10, 11, 12]])
>>> jnp.triu(x)
Array([[1, 2, 3],
[0, 5, 6],
[0, 0, 9],
[0, 0, 0]], dtype=int32)
>>> jnp.triu(x, k=1)
Array([[0, 2, 3],
[0, 0, 6],
[0, 0, 0],
[0, 0, 0]], dtype=int32)
>>> jnp.triu(x, k=-1)
Array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]], dtype=int32)
When ``m.ndim > 2``, ``jnp.triu`` operates batch-wise on the trailing axes.
>>> x1 = jnp.array([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> jnp.triu(x1)
Array([[[1, 2],
[0, 4]],
<BLANKLINE>
[[5, 6],
[0, 8]]], dtype=int32)
"""
m = util.ensure_arraylike("triu", m)
m_shape = np.shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
N, M = m_shape[-2:]
mask = tri(N, M, k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), array_creation.zeros_like(m), m)
@export
@api.jit(static_argnames=('axis1', 'axis2', 'dtype'))
def trace(a: ArrayLike, offset: int | ArrayLike = 0, axis1: int = 0, axis2: int = 1,
dtype: DTypeLike | None = None, out: None = None) -> Array:
"""Calculate sum of the diagonal of input along the given axes.
JAX implementation of :func:`numpy.trace`.
Args:
a: input array. Must have ``a.ndim >= 2``.
offset: optional, int, default=0. Diagonal offset from the main diagonal.
Can be positive or negative.
axis1: optional, default=0. The first axis along which to take the sum of
diagonal. Must be a static integer value.
axis2: optional, default=1. The second axis along which to take the sum of
diagonal. Must be a static integer value.
dtype: optional. The dtype of the output array. Should be provided as static
argument in JIT compilation.
out: Not used by JAX.
Returns:
An array of dimension x.ndim-2 containing the sum of the diagonal elements
along axes (axis1, axis2)
See also:
- :func:`jax.numpy.diag`: Returns the specified diagonal or constructs a diagonal
array
- :func:`jax.numpy.diagonal`: Returns the specified diagonal of an array.
- :func:`jax.numpy.diagflat`: Returns a 2-D array with the flattened input array
laid out on the diagonal.
Examples:
>>> x = jnp.arange(1, 9).reshape(2, 2, 2)
>>> x
Array([[[1, 2],
[3, 4]],
<BLANKLINE>
[[5, 6],
[7, 8]]], dtype=int32)
>>> jnp.trace(x)
Array([ 8, 10], dtype=int32)
>>> jnp.trace(x, offset=1)
Array([3, 4], dtype=int32)
>>> jnp.trace(x, axis1=1, axis2=2)
Array([ 5, 13], dtype=int32)
>>> jnp.trace(x, offset=1, axis1=1, axis2=2)
Array([2, 6], dtype=int32)
"""
a = util.ensure_arraylike("trace", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.trace is not supported.")
if _canonicalize_axis(axis1, np.ndim(a)) == _canonicalize_axis(axis2, np.ndim(a)):
raise ValueError(f"axis1 and axis2 can not be same. axis1={axis1} and axis2={axis2}")
if dtype is not None:
dtype = dtypes.check_and_canonicalize_user_dtype(dtype, "trace")
a_shape = np.shape(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, array_creation.zeros_like(a))
return reductions.sum(a, axis=(-2, -1), dtype=dtype)
@export
def mask_indices(n: int,
mask_func: Callable[[ArrayLike, int], Array],
k: int = 0, *, size: int | None = None) -> tuple[Array, Array]:
"""Return indices of a mask of an (n, n) array.
Args:
n: static integer array dimension.
mask_func: a function that takes a shape ``(n, n)`` array and
an optional offset ``k``, and returns a shape ``(n, n)`` mask.
Examples of functions with this signature are
:func:`~jax.numpy.triu` and :func:`~jax.numpy.tril`.
k: a scalar value passed to ``mask_func``.
size: optional argument specifying the static size of the output arrays.
This is passed to :func:`~jax.numpy.nonzero` when generating the indices
from the mask.
Returns:
a tuple of indices where ``mask_func`` is nonzero.
See also:
- :func:`jax.numpy.triu_indices`: compute ``mask_indices`` for :func:`~jax.numpy.triu`.
- :func:`jax.numpy.tril_indices`: compute ``mask_indices`` for :func:`~jax.numpy.tril`.
Examples:
Calling ``mask_indices`` on built-in masking functions:
>>> jnp.mask_indices(3, jnp.triu)
(Array([0, 0, 0, 1, 1, 2], dtype=int32), Array([0, 1, 2, 1, 2, 2], dtype=int32))
>>> jnp.mask_indices(3, jnp.tril)
(Array([0, 1, 1, 2, 2, 2], dtype=int32), Array([0, 0, 1, 0, 1, 2], dtype=int32))
Calling ``mask_indices`` on a custom masking function:
>>> def mask_func(x, k=0):
... i = jnp.arange(x.shape[0])[:, None]
... j = jnp.arange(x.shape[1])
... return (i + 1) % (j + 1 + k) == 0
>>> mask_func(jnp.ones((3, 3)))
Array([[ True, False, False],
[ True, True, False],
[ True, False, True]], dtype=bool)
>>> jnp.mask_indices(3, mask_func)
(Array([0, 1, 1, 2, 2], dtype=int32), Array([0, 0, 1, 0, 2], dtype=int32))
"""
i, j = nonzero(mask_func(array_creation.ones((n, n)), k), size=size)
return (i, j)
def _triu_size(n, m, k):
if k < 0:
return n * m - _triu_size(m, n, (1 - k))
elif k >= m:
return 0
else:
mk = core.min_dim(n, m - k)
return mk * (mk + 1) // 2 + mk * (m - k - mk)
@export
def triu_indices(n: DimSize, k: DimSize = 0, m: DimSize | None = None) -> tuple[Array, Array]:
"""Return the indices of upper triangle of an array of size ``(n, m)``.
JAX implementation of :func:`numpy.triu_indices`.
Args:
n: int. Number of rows of the array for which the indices are returned.
k: optional, int, default=0. Specifies the sub-diagonal on and above which
the indices of upper triangle are returned. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
m: optional, int. Number of columns of the array for which the indices are
returned. If not specified, then ``m = n``.
Returns:
A tuple of two arrays containing the indices of the upper triangle, one along
each axis.
See also:
- :func:`jax.numpy.tril_indices`: Returns the indices of lower triangle of an
array of size ``(n, m)``.
- :func:`jax.numpy.triu_indices_from`: Returns the indices of upper triangle
of a given array.
- :func:`jax.numpy.tril_indices_from`: Returns the indices of lower triangle
of a given array.
Examples:
If only ``n`` is provided in input, the indices of upper triangle of an array
of size ``(n, n)`` array are returned.
>>> jnp.triu_indices(3)
(Array([0, 0, 0, 1, 1, 2], dtype=int32), Array([0, 1, 2, 1, 2, 2], dtype=int32))
If both ``n`` and ``m`` are provided in input, the indices of upper triangle
of an ``(n, m)`` array are returned.
>>> jnp.triu_indices(3, m=2)
(Array([0, 0, 1], dtype=int32), Array([0, 1, 1], dtype=int32))
If ``k = 1``, the indices on and above the first sub-diagonal above the main
diagonal are returned.
>>> jnp.triu_indices(3, k=1)
(Array([0, 0, 1], dtype=int32), Array([1, 2, 2], dtype=int32))
If ``k = -1``, the indices on and above the first sub-diagonal below the main
diagonal are returned.
>>> jnp.triu_indices(3, k=-1)
(Array([0, 0, 0, 1, 1, 1, 2, 2], dtype=int32), Array([0, 1, 2, 0, 1, 2, 1, 2], dtype=int32))
"""
n = core.concrete_dim_or_error(n, "n argument of jnp.triu_indices")
k = core.concrete_dim_or_error(k, "k argument of jnp.triu_indices")
m = n if m is None else core.concrete_dim_or_error(m, "m argument of jnp.triu_indices")
i, j = nonzero(triu(array_creation.ones((n, m)), k=k), size=_triu_size(n, m, k))
return i, j
@export
def tril_indices(n: DimSize, k: DimSize = 0, m: DimSize | None = None) -> tuple[Array, Array]:
"""Return the indices of lower triangle of an array of size ``(n, m)``.
JAX implementation of :func:`numpy.tril_indices`.
Args:
n: int. Number of rows of the array for which the indices are returned.
k: optional, int, default=0. Specifies the sub-diagonal on and below which
the indices of lower triangle are returned. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
m: optional, int. Number of columns of the array for which the indices are
returned. If not specified, then ``m = n``.
Returns:
A tuple of two arrays containing the indices of the lower triangle, one along
each axis.
See also:
- :func:`jax.numpy.triu_indices`: Returns the indices of upper triangle of an
array of size ``(n, m)``.
- :func:`jax.numpy.triu_indices_from`: Returns the indices of upper triangle
of a given array.
- :func:`jax.numpy.tril_indices_from`: Returns the indices of lower triangle
of a given array.
Examples:
If only ``n`` is provided in input, the indices of lower triangle of an array
of size ``(n, n)`` array are returned.
>>> jnp.tril_indices(3)
(Array([0, 1, 1, 2, 2, 2], dtype=int32), Array([0, 0, 1, 0, 1, 2], dtype=int32))
If both ``n`` and ``m`` are provided in input, the indices of lower triangle
of an ``(n, m)`` array are returned.
>>> jnp.tril_indices(3, m=2)
(Array([0, 1, 1, 2, 2], dtype=int32), Array([0, 0, 1, 0, 1], dtype=int32))
If ``k = 1``, the indices on and below the first sub-diagonal above the main
diagonal are returned.
>>> jnp.tril_indices(3, k=1)
(Array([0, 0, 1, 1, 1, 2, 2, 2], dtype=int32), Array([0, 1, 0, 1, 2, 0, 1, 2], dtype=int32))
If ``k = -1``, the indices on and below the first sub-diagonal below the main
diagonal are returned.
>>> jnp.tril_indices(3, k=-1)
(Array([1, 2, 2], dtype=int32), Array([0, 0, 1], dtype=int32))
"""
n = core.concrete_dim_or_error(n, "n argument of jnp.triu_indices")
k = core.concrete_dim_or_error(k, "k argument of jnp.triu_indices")
m = n if m is None else core.concrete_dim_or_error(m, "m argument of jnp.triu_indices")
i, j = nonzero(tril(array_creation.ones((n, m)), k=k), size=_triu_size(m, n, -k))
return i, j
@export
def triu_indices_from(arr: ArrayLike | SupportsShape, k: int = 0) -> tuple[Array, Array]:
"""Return the indices of upper triangle of a given array.
JAX implementation of :func:`numpy.triu_indices_from`.
Args:
arr: input array. Must have ``arr.ndim == 2``.
k: optional, int, default=0. Specifies the sub-diagonal on and above which
the indices of upper triangle are returned. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
Returns:
A tuple of two arrays containing the indices of the upper triangle, one along
each axis.
See also:
- :func:`jax.numpy.tril_indices_from`: Returns the indices of lower triangle
of a given array.
- :func:`jax.numpy.triu_indices`: Returns the indices of upper triangle of an
array of size ``(n, m)``.
- :func:`jax.numpy.triu`: Return an upper triangle of an array.
Examples:
>>> arr = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.triu_indices_from(arr)
(Array([0, 0, 0, 1, 1, 2], dtype=int32), Array([0, 1, 2, 1, 2, 2], dtype=int32))
Elements indexed by ``jnp.triu_indices_from`` correspond to those in the
output of ``jnp.triu``.
>>> ind = jnp.triu_indices_from(arr)
>>> arr[ind]
Array([1, 2, 3, 5, 6, 9], dtype=int32)
>>> jnp.triu(arr)
Array([[1, 2, 3],
[0, 5, 6],
[0, 0, 9]], dtype=int32)
When ``k > 0``:
>>> jnp.triu_indices_from(arr, k=1)
(Array([0, 0, 1], dtype=int32), Array([1, 2, 2], dtype=int32))
When ``k < 0``:
>>> jnp.triu_indices_from(arr, k=-1)
(Array([0, 0, 0, 1, 1, 1, 2, 2], dtype=int32), Array([0, 1, 2, 0, 1, 2, 1, 2], dtype=int32))
"""
if hasattr(arr, "shape"):
arr_shape = arr.shape
else:
arr = util.ensure_arraylike("triu_indices_from", arr)
arr_shape = arr.shape
if len(arr_shape) != 2:
raise ValueError("Only 2-D inputs are accepted")
return triu_indices(arr_shape[0], k=k, m=arr_shape[1])
@export
def tril_indices_from(arr: ArrayLike | SupportsShape, k: int = 0) -> tuple[Array, Array]:
"""Return the indices of lower triangle of a given array.
JAX implementation of :func:`numpy.tril_indices_from`.
Args:
arr: input array. Must have ``arr.ndim == 2``.
k: optional, int, default=0. Specifies the sub-diagonal on and below which
the indices of upper triangle are returned. ``k=0`` refers to main diagonal,
``k<0`` refers to sub-diagonal below the main diagonal and ``k>0`` refers
to sub-diagonal above the main diagonal.
Returns:
A tuple of two arrays containing the indices of the lower triangle, one along
each axis.
See also:
- :func:`jax.numpy.triu_indices_from`: Returns the indices of upper triangle
of a given array.
- :func:`jax.numpy.tril_indices`: Returns the indices of lower triangle of an
array of size ``(n, m)``.
- :func:`jax.numpy.tril`: Returns a lower triangle of an array
Examples:
>>> arr = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.tril_indices_from(arr)
(Array([0, 1, 1, 2, 2, 2], dtype=int32), Array([0, 0, 1, 0, 1, 2], dtype=int32))
Elements indexed by ``jnp.tril_indices_from`` correspond to those in the
output of ``jnp.tril``.
>>> ind = jnp.tril_indices_from(arr)
>>> arr[ind]
Array([1, 4, 5, 7, 8, 9], dtype=int32)
>>> jnp.tril(arr)
Array([[1, 0, 0],
[4, 5, 0],
[7, 8, 9]], dtype=int32)
When ``k > 0``:
>>> jnp.tril_indices_from(arr, k=1)
(Array([0, 0, 1, 1, 1, 2, 2, 2], dtype=int32), Array([0, 1, 0, 1, 2, 0, 1, 2], dtype=int32))
When ``k < 0``:
>>> jnp.tril_indices_from(arr, k=-1)
(Array([1, 2, 2], dtype=int32), Array([0, 0, 1], dtype=int32))
"""
if hasattr(arr, "shape"):
arr_shape = arr.shape
else:
arr = util.ensure_arraylike("tril_indices_from", arr)
arr_shape = arr.shape
if len(arr_shape) != 2:
raise ValueError("Only 2-D inputs are accepted")
return tril_indices(arr_shape[0], k=k, m=arr_shape[1])
@export
def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap: bool = False, *,
inplace: bool = True) -> Array:
"""Return a copy of the array with the diagonal overwritten.
JAX implementation of :func:`numpy.fill_diagonal`.
The semantics of :func:`numpy.fill_diagonal` are to modify arrays in-place, which
is not possible for JAX's immutable arrays. The JAX version returns a modified
copy of the input, and adds the ``inplace`` parameter which must be set to
`False`` by the user as a reminder of this API difference.
Args:
a: input array. Must have ``a.ndim >= 2``. If ``a.ndim >= 3``, then all
dimensions must be the same size.
val: scalar or array with which to fill the diagonal. If an array, it will
be flattened and repeated to fill the diagonal entries.
wrap: Not implemented by JAX. Only the default value of ``False`` is supported.
inplace: must be set to False to indicate that the input is not modified
in-place, but rather a modified copy is returned.
Returns:
A copy of ``a`` with the diagonal set to ``val``.
Examples:
>>> x = jnp.zeros((3, 3), dtype=int)
>>> jnp.fill_diagonal(x, jnp.array([1, 2, 3]), inplace=False)
Array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int32)
Unlike :func:`numpy.fill_diagonal`, the input ``x`` is not modified.
If the diagonal value has too many entries, it will be truncated
>>> jnp.fill_diagonal(x, jnp.arange(100, 200), inplace=False)
Array([[100, 0, 0],
[ 0, 101, 0],
[ 0, 0, 102]], dtype=int32)
If the diagonal has too few entries, it will be repeated:
>>> x = jnp.zeros((4, 4), dtype=int)
>>> jnp.fill_diagonal(x, jnp.array([3, 4]), inplace=False)
Array([[3, 0, 0, 0],
[0, 4, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]], dtype=int32)
For non-square arrays, the diagonal of the leading square slice is filled:
>>> x = jnp.zeros((3, 5), dtype=int)
>>> jnp.fill_diagonal(x, 1, inplace=False)
Array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]], dtype=int32)
And for square N-dimensional arrays, the N-dimensional diagonal is filled:
>>> y = jnp.zeros((2, 2, 2))
>>> jnp.fill_diagonal(y, 1, inplace=False)
Array([[[1., 0.],
[0., 0.]],
<BLANKLINE>
[[0., 0.],
[0., 1.]]], dtype=float32)
"""
if inplace:
raise NotImplementedError("JAX arrays are immutable, must use inplace=False")
if wrap:
raise NotImplementedError("wrap=True is not implemented, must use wrap=False")
a, val = util.ensure_arraylike("fill_diagonal", a, val)
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
if a.ndim > 2 and not all(n == a.shape[0] for n in a.shape[1:]):
raise ValueError("All dimensions of input must be of equal length")
n = min(a.shape)
idx = diag_indices(n, a.ndim)
return a.at[idx].set(val if val.ndim == 0 else _tile_to_size(val.ravel(), n))
@export
def diag_indices(n: int, ndim: int = 2) -> tuple[Array, ...]:
"""Return indices for accessing the main diagonal of a multidimensional array.
JAX implementation of :func:`numpy.diag_indices`.
Args:
n: int. The size of each dimension of the square array.
ndim: optional, int, default=2. The number of dimensions of the array.
Returns:
A tuple of arrays, each of length `n`, containing the indices to access
the main diagonal.
See also:
- :func:`jax.numpy.diag_indices_from`
- :func:`jax.numpy.diagonal`
Examples:
>>> jnp.diag_indices(3)
(Array([0, 1, 2], dtype=int32), Array([0, 1, 2], dtype=int32))
>>> jnp.diag_indices(4, ndim=3)
(Array([0, 1, 2, 3], dtype=int32),
Array([0, 1, 2, 3], dtype=int32),
Array([0, 1, 2, 3], dtype=int32))
"""
n = core.concrete_or_error(operator.index, n, "'n' argument of jnp.diag_indices()")
ndim = core.concrete_or_error(operator.index, ndim, "'ndim' argument of jnp.diag_indices()")
if n < 0:
raise ValueError("n argument to diag_indices must be nonnegative, got {}"
.format(n))
if ndim < 0:
raise ValueError("ndim argument to diag_indices must be nonnegative, got {}"
.format(ndim))
index_dtype = lax_utils.int_dtype_for_dim(n, signed=True)
# We'd give the correct output values with int32, but use the default dtype to
# match NumPy type semantics if x64 mode is enabled for now.
if index_dtype == np.dtype(np.int32):
index_dtype = dtypes.default_int_dtype()
return (lax.iota(index_dtype, n),) * ndim
@export
def diag_indices_from(arr: ArrayLike) -> tuple[Array, ...]:
"""Return indices for accessing the main diagonal of a given array.
JAX implementation of :func:`numpy.diag_indices_from`.
Args:
arr: Input array. Must be at least 2-dimensional and have equal length along
all dimensions.
Returns:
A tuple of arrays containing the indices to access the main diagonal of
the input array.
See also:
- :func:`jax.numpy.diag_indices`
- :func:`jax.numpy.diagonal`
Examples:
>>> arr = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.diag_indices_from(arr)
(Array([0, 1, 2], dtype=int32), Array([0, 1, 2], dtype=int32))
>>> arr = jnp.array([[[1, 2], [3, 4]],
... [[5, 6], [7, 8]]])
>>> jnp.diag_indices_from(arr)
(Array([0, 1], dtype=int32),
Array([0, 1], dtype=int32),
Array([0, 1], dtype=int32))
"""
arr = util.ensure_arraylike("diag_indices_from", arr)
nd = np.ndim(arr)
if not np.ndim(arr) >= 2:
raise ValueError("input array must be at least 2-d")
s = np.shape(arr)
if len(set(np.shape(arr))) != 1:
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(s[0], ndim=nd)
@export
@api.jit(static_argnames=('offset', 'axis1', 'axis2'))
def diagonal(a: ArrayLike, offset: int = 0, axis1: int = 0,
axis2: int = 1) -> Array:
"""Returns the specified diagonal of an array.
JAX implementation of :func:`numpy.diagonal`.
The JAX version always returns a copy of the input, although if this is used
within a JIT compilation, the compiler may avoid the copy.
Args:
a: Input array. Must be at least 2-dimensional.
offset: optional, default=0. Diagonal offset from the main diagonal.
Must be a static integer value. Can be positive or negative.
axis1: optional, default=0. The first axis along which to take the diagonal.
axis2: optional, default=1. The second axis along which to take the diagonal.
Returns:
A 1D array for 2D input, and in general a N-1 dimensional array
for N-dimensional input.
See also:
- :func:`jax.numpy.diag`
- :func:`jax.numpy.diagflat`
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.diagonal(x)
Array([1, 5, 9], dtype=int32)
>>> jnp.diagonal(x, offset=1)
Array([2, 6], dtype=int32)
>>> jnp.diagonal(x, offset=-1)
Array([4, 8], dtype=int32)
"""
a = util.ensure_arraylike("diagonal", a)
if np.ndim(a) < 2:
raise ValueError("diagonal requires an array of at least two dimensions.")
offset = core.concrete_or_error(operator.index, offset, "'offset' argument of jnp.diagonal()")
def _default_diag(a):
a_shape = np.shape(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
diag_size = max(
0, min(a_shape[axis1] + min(offset, 0), a_shape[axis2] - max(offset, 0))
)
i = arange(diag_size)
j = arange(abs(offset), abs(offset) + diag_size)
return a[..., i, j] if offset >= 0 else a[..., j, i]
# The mosaic lowering rule for diag is only defined for square arrays.
# TODO(mvoz): Add support for offsets.
if np.shape(a)[0] != np.shape(a)[1] or np.ndim(a) != 2 or offset != 0 or a.dtype == bool:
return _default_diag(a)
else:
a_shape_eye = eye(np.shape(a)[0], dtype=a.dtype)
def _mosaic_diag(a):
def _sum(x, axis):
return lax.reduce(
x,
np.array(0, x.dtype),
lax.add if x.dtype != bool else lax.bitwise_or,
(axis,),
)
return _sum(lax.mul(a_shape_eye, a), axis=0)
return control_flow.platform_dependent(a, default=_default_diag, mosaic=_mosaic_diag)
@export
def diag(v: ArrayLike, k: int = 0) -> Array:
"""Returns the specified diagonal or constructs a diagonal array.
JAX implementation of :func:`numpy.diag`.
The JAX version always returns a copy of the input, although if this is used
within a JIT compilation, the compiler may avoid the copy.
Args:
v: Input array. Can be a 1-D array to create a diagonal matrix or a
2-D array to extract a diagonal.
k: optional, default=0. Diagonal offset. Positive values place the diagonal
above the main diagonal, negative values place it below the main diagonal.
Returns:
If `v` is a 2-D array, a 1-D array containing the diagonal elements.
If `v` is a 1-D array, a 2-D array with the input elements placed along the
specified diagonal.
See also:
- :func:`jax.numpy.diagflat`
- :func:`jax.numpy.diagonal`
Examples:
Creating a diagonal matrix from a 1-D array:
>>> jnp.diag(jnp.array([1, 2, 3]))
Array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int32)
Specifying a diagonal offset:
>>> jnp.diag(jnp.array([1, 2, 3]), k=1)
Array([[0, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=int32)
Extracting a diagonal from a 2-D array:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.diag(x)
Array([1, 5, 9], dtype=int32)
"""
v = util.ensure_arraylike("diag", v)
return _diag(v, operator.index(k))
@api.jit(static_argnames=('k',))
def _diag(v: Array, k: int):
v_shape = np.shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + abs(k)
v = lax.pad(v, zero(v), ((max(0, k), max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, array_creation.zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
@export
def diagflat(v: ArrayLike, k: int = 0) -> Array:
"""Return a 2-D array with the flattened input array laid out on the diagonal.
JAX implementation of :func:`numpy.diagflat`.
This differs from `np.diagflat` for some scalar values of `v`. JAX always returns
a two-dimensional array, whereas NumPy may return a scalar depending on the type
of `v`.
Args:
v: Input array. Can be N-dimensional but is flattened to 1D.
k: optional, default=0. Diagonal offset. Positive values place the diagonal
above the main diagonal, negative values place it below the main diagonal.
Returns:
A 2D array with the input elements placed along the diagonal with the
specified offset (k). The remaining entries are filled with zeros.
See also:
- :func:`jax.numpy.diag`
- :func:`jax.numpy.diagonal`
Examples:
>>> jnp.diagflat(jnp.array([1, 2, 3]))
Array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int32)
>>> jnp.diagflat(jnp.array([1, 2, 3]), k=1)
Array([[0, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]], dtype=int32)
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.diagflat(a)
Array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]], dtype=int32)
"""
util.check_arraylike("diagflat", v)
v_ravel = ravel(v)
v_length = len(v_ravel)
adj_length = v_length + abs(k)
res = array_creation.zeros(adj_length*adj_length, dtype=v_ravel.dtype)
i = arange(0, adj_length-abs(k))
if (k >= 0):
fi = i+k+i*adj_length
else:
fi = i+(i-k)*adj_length
res = res.at[fi].set(v_ravel)
res = res.reshape(adj_length, adj_length)
return res
# TODO(jakevdp): add support for N-dimensional inputs as in NumPy v2.2
@export
def trim_zeros(filt: ArrayLike, trim: str ='fb',
axis: int | Sequence[int] | None = None) -> Array:
"""Trim leading and/or trailing zeros of the input array.
JAX implementation of :func:`numpy.trim_zeros`.
Args:
filt: N-dimensional input array.
trim: string, optional, default = ``fb``. Specifies from which end the input
is trimmed.
- ``f`` - trims only the leading zeros.
- ``b`` - trims only the trailing zeros.
- ``fb`` - trims both leading and trailing zeros.
axis: optional axis or axes along which to trim. If not specified, trim along
all axes of the array.
Returns:
An array containing the trimmed input with same dtype as ``filt``.
Examples:
One-dimensional input:
>>> x = jnp.array([0, 0, 2, 0, 1, 4, 3, 0, 0, 0])
>>> jnp.trim_zeros(x)
Array([2, 0, 1, 4, 3], dtype=int32)
>>> jnp.trim_zeros(x, trim='f')
Array([2, 0, 1, 4, 3, 0, 0, 0], dtype=int32)
>>> jnp.trim_zeros(x, trim='b')
Array([0, 0, 2, 0, 1, 4, 3], dtype=int32)
Two-dimensional input:
>>> x = jnp.zeros((4, 5)).at[1:3, 1:4].set(1)
>>> x
Array([[0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.]], dtype=float32)
>>> jnp.trim_zeros(x)
Array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
>>> jnp.trim_zeros(x, trim='f')
Array([[1., 1., 1., 0.],
[1., 1., 1., 0.],
[0., 0., 0., 0.]], dtype=float32)
>>> jnp.trim_zeros(x, axis=0)
Array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.]], dtype=float32)
>>> jnp.trim_zeros(x, axis=1)
Array([[0., 0., 0.],
[1., 1., 1.],
[1., 1., 1.],
[0., 0., 0.]], dtype=float32)
"""
filt = util.ensure_arraylike("trim_zeros", filt)
core.concrete_or_error(None, filt,
"Error arose in the `filt` argument of trim_zeros()")
axis_set = set(_canonicalize_axis_tuple(axis, filt.ndim))
if not axis_set or ('f' not in trim.lower() and 'b' not in trim.lower()):
return filt
def _get_slice(x: Array, ax: int) -> slice:
if ax not in axis_set:
return slice(None)
mask = x.any(axis=[i for i in range(x.ndim) if i != ax])
if not mask.any():
return slice(0, 0)
start = int(mask.argmax()) if 'f' in trim.lower() else None
stop = x.shape[ax] - int(mask[::-1].argmax()) if 'b' in trim.lower() else None
return slice(start, stop)
return filt[*(_get_slice(filt, ax) for ax in range(filt.ndim))]
def trim_zeros_tol(filt, tol, trim='fb'):
filt = core.concrete_or_error(asarray, filt,
"Error arose in the `filt` argument of trim_zeros_tol()")
nz = (ufuncs.abs(filt) < tol)
if reductions.all(nz):
return array_creation.empty(0, _dtype(filt))
start = argmin(nz) if 'f' in trim.lower() else 0
end = argmin(nz[::-1]) if 'b' in trim.lower() else 0
return filt[start:len(filt) - end]
@export
@api.jit(static_argnames=('axis',))
def append(
arr: ArrayLike, values: ArrayLike, axis: int | None = None
) -> Array:
"""Return a new array with values appended to the end of the original array.
JAX implementation of :func:`numpy.append`.
Args:
arr: original array.
values: values to be appended to the array. The ``values`` must have
the same number of dimensions as ``arr``, and all dimensions must
match except in the specified axis.
axis: axis along which to append values. If None (default), both ``arr``
and ``values`` will be flattened before appending.
Returns:
A new array with values appended to ``arr``.
See also:
- :func:`jax.numpy.insert`
- :func:`jax.numpy.delete`
Examples:
>>> a = jnp.array([1, 2, 3])
>>> b = jnp.array([4, 5, 6])
>>> jnp.append(a, b)
Array([1, 2, 3, 4, 5, 6], dtype=int32)
Appending along a specific axis:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> b = jnp.array([[5, 6]])
>>> jnp.append(a, b, axis=0)
Array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
Appending along a trailing axis:
>>> a = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> b = jnp.array([[7], [8]])
>>> jnp.append(a, b, axis=1)
Array([[1, 2, 3, 7],
[4, 5, 6, 8]], dtype=int32)
"""
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
@export
def delete(
arr: ArrayLike,
obj: ArrayLike | slice,
axis: int | None = None,
*,
assume_unique_indices: bool = False,
) -> Array:
"""Delete entry or entries from an array.
JAX implementation of :func:`numpy.delete`.
Args:
arr: array from which entries will be deleted.
obj: index, indices, or slice to be deleted.
axis: axis along which entries will be deleted.
assume_unique_indices: In case of array-like integer (not boolean) indices,
assume the indices are unique, and perform the deletion in a way that is
compatible with JIT and other JAX transformations.
Returns:
Copy of ``arr`` with specified indices deleted.
Note:
``delete()`` usually requires the index specification to be static. If the
index is an integer array that is guaranteed to contain unique entries, you
may specify ``assume_unique_indices=True`` to perform the operation in a
manner that does not require static indices.
See also:
- :func:`jax.numpy.insert`: insert entries into an array.
Examples:
Delete entries from a 1D array:
>>> a = jnp.array([4, 5, 6, 7, 8, 9])
>>> jnp.delete(a, 2)
Array([4, 5, 7, 8, 9], dtype=int32)
>>> jnp.delete(a, slice(1, 4)) # delete a[1:4]
Array([4, 8, 9], dtype=int32)
>>> jnp.delete(a, slice(None, None, 2)) # delete a[::2]
Array([5, 7, 9], dtype=int32)
Delete entries from a 2D array along a specified axis:
>>> a2 = jnp.array([[4, 5, 6],
... [7, 8, 9]])
>>> jnp.delete(a2, 1, axis=1)
Array([[4, 6],
[7, 9]], dtype=int32)
Delete multiple entries via a sequence of indices:
>>> indices = jnp.array([0, 1, 3])
>>> jnp.delete(a, indices)
Array([6, 8, 9], dtype=int32)
This will fail under :func:`~jax.jit` and other transformations, because
the output shape cannot be known with the possibility of duplicate indices:
>>> jax.jit(jnp.delete)(a, indices) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: traced array with shape int32[3].
If you can ensure that the indices are unique, pass ``assume_unique_indices``
to allow this to be executed under JIT:
>>> jit_delete = jax.jit(jnp.delete, static_argnames=['assume_unique_indices'])
>>> jit_delete(a, indices, assume_unique_indices=True)
Array([6, 8, 9], dtype=int32)
"""
a = util.ensure_arraylike("delete", arr)
if axis is None:
a = a.ravel()
axis = 0
axis = _canonicalize_axis(axis, a.ndim)
# Case 1: obj is a static integer.
try:
obj = operator.index(obj) # type: ignore[arg-type]
obj = _canonicalize_axis(obj, a.shape[axis])
except TypeError:
pass
else:
idx = tuple(slice(None) for i in range(axis))
return concatenate([a[idx + (slice(0, obj),)], a[idx + (slice(obj + 1, None),)]], axis=axis)
# Case 2: obj is a static slice.
if isinstance(obj, slice):
obj = arange(a.shape[axis])[obj]
assume_unique_indices = True
# Case 3: obj is an array
# NB: pass both arrays to check for appropriate error message.
util.check_arraylike("delete", a, obj)
# Can't use ensure_arraylike here because obj may be static.
if hasattr(obj, "__jax_array__"):
obj = obj.__jax_array__()
# Case 3a: unique integer indices; delete in a JIT-compatible way
if issubdtype(_dtype(obj), np.integer) and assume_unique_indices:
obj = asarray(obj).ravel()
obj = clip(where(obj < 0, obj + a.shape[axis], obj), 0, a.shape[axis])
obj = sort(obj)
obj -= arange(len(obj), dtype=obj.dtype) # type: ignore
i = arange(a.shape[axis] - obj.size, dtype=obj.dtype)
i += (i[None, :] >= obj[:, None]).sum(0, dtype=i.dtype)
return a[(slice(None),) * axis + (i,)]
# Case 3b: non-unique indices: must be static.
obj_array = core.concrete_or_error(np.asarray, obj, "'obj' array argument of jnp.delete()")
if issubdtype(obj_array.dtype, np.integer):
# TODO(jakevdp): in theory this could be done dynamically if obj has no duplicates,
# but this would require the complement of lax.gather.
mask = np.ones(a.shape[axis], dtype=bool)
mask[obj_array] = False
elif obj_array.dtype == bool:
if obj_array.shape != (a.shape[axis],):
raise ValueError("np.delete(arr, obj): for boolean indices, obj must be one-dimensional "
"with length matching specified axis.")
mask = ~obj_array
else:
raise ValueError(f"np.delete(arr, obj): got obj.dtype={obj_array.dtype}; must be integer or bool.")
return a[tuple(slice(None) for i in range(axis)) + (mask,)]
@export
def insert(arr: ArrayLike, obj: ArrayLike | slice, values: ArrayLike,
axis: int | None = None) -> Array:
"""Insert entries into an array at specified indices.
JAX implementation of :func:`numpy.insert`.
Args:
arr: array object into which values will be inserted.
obj: slice or array of indices specifying insertion locations.
values: array of values to be inserted.
axis: specify the insertion axis in the case of multi-dimensional
arrays. If unspecified, ``arr`` will be flattened.
Returns:
A copy of ``arr`` with values inserted at the specified locations.
See also:
- :func:`jax.numpy.delete`: delete entries from an array.
Examples:
Inserting a single value:
>>> x = jnp.arange(5)
>>> jnp.insert(x, 2, 99)
Array([ 0, 1, 99, 2, 3, 4], dtype=int32)
Inserting multiple identical values using a slice:
>>> jnp.insert(x, slice(None, None, 2), -1)
Array([-1, 0, 1, -1, 2, 3, -1, 4], dtype=int32)
Inserting multiple values using an index:
>>> indices = jnp.array([4, 2, 5])
>>> values = jnp.array([10, 11, 12])
>>> jnp.insert(x, indices, values)
Array([ 0, 1, 11, 2, 3, 10, 4, 12], dtype=int32)
Inserting columns into a 2D array:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> indices = jnp.array([1, 3])
>>> values = jnp.array([[10, 11],
... [12, 13]])
>>> jnp.insert(x, indices, values, axis=1)
Array([[ 1, 10, 2, 3, 11],
[ 4, 12, 5, 6, 13]], dtype=int32)
"""
a, _, values_arr = util.ensure_arraylike("insert", arr, 0 if isinstance(obj, slice) else obj, values)
if axis is None:
a = ravel(a)
axis = 0
axis = core.concrete_or_error(None, axis, "axis argument of jnp.insert()")
axis = _canonicalize_axis(axis, a.ndim)
if isinstance(obj, slice):
indices = arange(*obj.indices(a.shape[axis]))
else:
indices = asarray(obj)
if indices.ndim > 1:
raise ValueError("jnp.insert(): obj must be a slice, a one-dimensional "
f"array, or a scalar; got {obj}")
if not np.issubdtype(indices.dtype, np.integer):
if indices.size == 0 and not isinstance(obj, Array):
indices = indices.astype(int)
else:
# Note: np.insert allows boolean inputs but the behavior is deprecated.
raise ValueError("jnp.insert(): index array must be "
f"integer typed; got {obj}")
values_arr = array(values_arr, ndmin=a.ndim, dtype=a.dtype, copy=False)
if indices.size == 1:
index = ravel(indices)[0]
if indices.ndim == 0:
values_arr = moveaxis(values_arr, 0, axis)
indices = array_creation.full(values_arr.shape[axis], index)
n_input = a.shape[axis]
n_insert = broadcast_shapes(indices.shape, (values_arr.shape[axis],))[0]
out_shape = list(a.shape)
out_shape[axis] += n_insert
out = array_creation.zeros_like(a, shape=tuple(out_shape))
indices = where(indices < 0, indices + n_input, indices)
indices = clip(indices, 0, n_input)
values_ind = indices.at[argsort(indices)].add(arange(n_insert, dtype=indices.dtype))
arr_mask = array_creation.ones(n_input + n_insert, dtype=bool).at[values_ind].set(False)
arr_ind = where(arr_mask, size=n_input)[0]
out = out.at[(slice(None),) * axis + (values_ind,)].set(values_arr)
out = out.at[(slice(None),) * axis + (arr_ind,)].set(a)
return out
@export
def apply_along_axis(
func1d: Callable, axis: int, arr: ArrayLike, *args, **kwargs
) -> Array:
"""Apply a function to 1D array slices along an axis.
JAX implementation of :func:`numpy.apply_along_axis`. While NumPy implements
this iteratively, JAX implements this via :func:`jax.vmap`, and so ``func1d``
must be compatible with ``vmap``.
Args:
func1d: a callable function with signature ``func1d(arr, /, *args, **kwargs)``
where ``*args`` and ``**kwargs`` are the additional positional and keyword
arguments passed to :func:`apply_along_axis`.
axis: integer axis along which to apply the function.
arr: the array over which to apply the function.
args, kwargs: additional positional and keyword arguments are passed through
to ``func1d``.
Returns:
The result of ``func1d`` applied along the specified axis.
See also:
- :func:`jax.vmap`: a more direct way to create a vectorized version of a function.
- :func:`jax.numpy.apply_over_axes`: repeatedly apply a function over multiple axes.
- :func:`jax.numpy.vectorize`: create a vectorized version of a function.
Examples:
A simple example in two dimensions, where the function is applied either row-wise
or column-wise:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> def func1d(x):
... return jnp.sum(x ** 2)
>>> jnp.apply_along_axis(func1d, 0, x)
Array([17, 29, 45], dtype=int32)
>>> jnp.apply_along_axis(func1d, 1, x)
Array([14, 77], dtype=int32)
For 2D inputs, this can be equivalently expressed using :func:`jax.vmap`,
though note that `vmap` specifies the mapped axis rather than the applied axis:
>>> jax.vmap(func1d, in_axes=1)(x) # same as applying along axis 0
Array([17, 29, 45], dtype=int32)
>>> jax.vmap(func1d, in_axes=0)(x) # same as applying along axis 1
Array([14, 77], dtype=int32)
For 3D inputs, :func:`apply_along_axis` is equivalent to mapping over two
dimensions:
>>> x_3d = jnp.arange(24).reshape(2, 3, 4)
>>> jnp.apply_along_axis(func1d, 2, x_3d)
Array([[ 14, 126, 366],
[ 734, 1230, 1854]], dtype=int32)
>>> jax.vmap(jax.vmap(func1d))(x_3d)
Array([[ 14, 126, 366],
[ 734, 1230, 1854]], dtype=int32)
The applied function may also take arbitrary positional or keyword arguments,
which should be passed directly as additional arguments to :func:`apply_along_axis`:
>>> def func1d(x, exponent):
... return jnp.sum(x ** exponent)
>>> jnp.apply_along_axis(func1d, 0, x, exponent=3)
Array([ 65, 133, 243], dtype=int32)
"""
util.check_arraylike("apply_along_axis", arr)
num_dims = np.ndim(arr)
axis = _canonicalize_axis(axis, num_dims)
func = lambda arr: func1d(arr, *args, **kwargs)
for i in range(1, num_dims - axis):
func = api.vmap(func, in_axes=i, out_axes=-1)
for i in range(axis):
func = api.vmap(func, in_axes=0, out_axes=0)
return func(arr)
@export
def apply_over_axes(func: Callable[[ArrayLike, int], Array], a: ArrayLike,
axes: Sequence[int]) -> Array:
"""Apply a function repeatedly over specified axes.
JAX implementation of :func:`numpy.apply_over_axes`.
Args:
func: the function to apply, with signature ``func(Array, int) -> Array``, and
where ``y = func(x, axis)`` must satisfy ``y.ndim in [x.ndim, x.ndim - 1]``.
a: N-dimensional array over which to apply the function.
axes: the sequence of axes over which to apply the function.
Returns:
An N-dimensional array containing the result of the repeated function application.
See also:
- :func:`jax.numpy.apply_along_axis`: apply a 1D function along a single axis.
Examples:
This function is designed to have similar semantics to typical associative
:mod:`jax.numpy` reductions over one or more axes with ``keepdims=True``.
For example:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.apply_over_axes(jnp.sum, x, [0])
Array([[5, 7, 9]], dtype=int32)
>>> jnp.sum(x, [0], keepdims=True)
Array([[5, 7, 9]], dtype=int32)
>>> jnp.apply_over_axes(jnp.min, x, [1])
Array([[1],
[4]], dtype=int32)
>>> jnp.min(x, [1], keepdims=True)
Array([[1],
[4]], dtype=int32)
>>> jnp.apply_over_axes(jnp.prod, x, [0, 1])
Array([[720]], dtype=int32)
>>> jnp.prod(x, [0, 1], keepdims=True)
Array([[720]], dtype=int32)
"""
a_arr = util.ensure_arraylike("apply_over_axes", a)
for axis in axes:
b = func(a_arr, axis)
if b.ndim == a_arr.ndim:
a_arr = b
elif b.ndim == a_arr.ndim - 1:
a_arr = expand_dims(b, axis)
else:
raise ValueError("function is not returning an array of the correct shape")
return a_arr
@export
@api.jit(static_argnames=('axisa', 'axisb', 'axisc', 'axis'))
def cross(a, b, axisa: int = -1, axisb: int = -1, axisc: int = -1,
axis: int | None = None):
r"""Compute the (batched) cross product of two arrays.
JAX implementation of :func:`numpy.cross`.
This computes the 2-dimensional or 3-dimensional cross product,
.. math::
c = a \times b
In 3 dimensions, ``c`` is a length-3 array. In 2 dimensions, ``c`` is
a scalar.
Args:
a: N-dimensional array. ``a.shape[axisa]`` indicates the dimension of
the cross product, and must be 2 or 3.
b: N-dimensional array. Must have ``b.shape[axisb] == a.shape[axisb]``,
and other dimensions of ``a`` and ``b`` must be broadcast compatible.
axisa: specicy the axis of ``a`` along which to compute the cross product.
axisb: specicy the axis of ``b`` along which to compute the cross product.
axisc: specicy the axis of ``c`` along which the cross product result
will be stored.
axis: if specified, this overrides ``axisa``, ``axisb``, and ``axisc``
with a single value.
Returns:
The array ``c`` containing the (batched) cross product of ``a`` and ``b``
along the specified axes.
See also:
- :func:`jax.numpy.linalg.cross`: an array API compatible function for
computing cross products over 3-vectors.
Examples:
A 2-dimensional cross product returns a scalar:
>>> a = jnp.array([1, 2])
>>> b = jnp.array([3, 4])
>>> jnp.cross(a, b)
Array(-2, dtype=int32)
A 3-dimensional cross product returns a length-3 vector:
>>> a = jnp.array([1, 2, 3])
>>> b = jnp.array([4, 5, 6])
>>> jnp.cross(a, b)
Array([-3, 6, -3], dtype=int32)
With multi-dimensional inputs, the cross-product is computed along
the last axis by default. Here's a batched 3-dimensional cross
product, operating on the rows of the inputs:
>>> a = jnp.array([[1, 2, 3],
... [3, 4, 3]])
>>> b = jnp.array([[2, 3, 2],
... [4, 5, 6]])
>>> jnp.cross(a, b)
Array([[-5, 4, -1],
[ 9, -6, -1]], dtype=int32)
Specifying axis=0 makes this a batched 2-dimensional cross product,
operating on the columns of the inputs:
>>> jnp.cross(a, b, axis=0)
Array([-2, -2, 12], dtype=int32)
Equivalently, we can independently specify the axis of the inputs ``a``
and ``b`` and the output ``c``:
>>> jnp.cross(a, b, axisa=0, axisb=0, axisc=0)
Array([-2, -2, 12], dtype=int32)
"""
# TODO(jakevdp): NumPy 2.0 deprecates 2D inputs. Follow suit here.
util.check_arraylike("cross", a, b)
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a.shape[-1] == 2 and b.shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2] if a.shape[-1] == 3 else array_creation.zeros_like(a0)
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2] if b.shape[-1] == 3 else array_creation.zeros_like(b0)
c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])
return moveaxis(c, 0, axisc)
@export
@api.jit
def kron(a: ArrayLike, b: ArrayLike) -> Array:
"""Compute the Kronecker product of two input arrays.
JAX implementation of :func:`numpy.kron`.
The Kronecker product is an operation on two matrices of arbitrary size that
produces a block matrix. Each element of the first matrix ``a`` is multiplied by
the entire second matrix ``b``. If ``a`` has shape (m, n) and ``b``
has shape (p, q), the resulting matrix will have shape (m * p, n * q).
Args:
a: first input array with any shape.
b: second input array with any shape.
Returns:
A new array representing the Kronecker product of the inputs ``a`` and ``b``.
The shape of the output is the element-wise product of the input shapes.
See also:
- :func:`jax.numpy.outer`: compute the outer product of two arrays.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> b = jnp.array([[5, 6],
... [7, 8]])
>>> jnp.kron(a, b)
Array([[ 5, 6, 10, 12],
[ 7, 8, 14, 16],
[15, 18, 20, 24],
[21, 24, 28, 32]], dtype=int32)
"""
util.check_arraylike("kron", a, b)
a, b = util.promote_dtypes(a, b)
if np.ndim(a) < np.ndim(b):
a = expand_dims(a, range(np.ndim(b) - np.ndim(a)))
elif np.ndim(b) < np.ndim(a):
b = expand_dims(b, range(np.ndim(a) - np.ndim(b)))
a_reshaped = expand_dims(a, range(1, 2 * np.ndim(a), 2))
b_reshaped = expand_dims(b, range(0, 2 * np.ndim(b), 2))
out_shape = tuple(np.multiply(np.shape(a), np.shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@export
@api.jit(static_argnames=('N', 'increasing'))
def vander(
x: ArrayLike, N: int | None = None, increasing: bool = False
) -> Array:
"""Generate a Vandermonde matrix.
JAX implementation of :func:`numpy.vander`.
Args:
x: input array. Must have ``x.ndim == 1``.
N: int, optional, default=None. Specifies the number of the columns the
output matrix. If not specified, ``N = len(x)``.
increasing: bool, optional, default=False. Specifies the order of the powers
of the columns. If ``True``, the powers increase from left to right,
:math:`[x^0, x^1, ..., x^{(N-1)}]`. By default, the powers decrease from left to
right :math:`[x^{(N-1)}, ..., x^1, x^0]`.
Returns:
An array of shape ``[len(x), N]`` containing the generated Vandermonde matrix.
Examples:
>>> x = jnp.array([1, 2, 3, 4])
>>> jnp.vander(x)
Array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[27, 9, 3, 1],
[64, 16, 4, 1]], dtype=int32)
If ``N = 2``, generates a Vandermonde matrix with ``2`` columns.
>>> jnp.vander(x, N=2)
Array([[1, 1],
[2, 1],
[3, 1],
[4, 1]], dtype=int32)
Generates the Vandermonde matrix in increasing order of powers, when
``increasing=True``.
>>> jnp.vander(x, increasing=True)
Array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 4, 16, 64]], dtype=int32)
"""
x = util.ensure_arraylike("vander", x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array")
N = x.shape[0] if N is None else core.concrete_or_error(
operator.index, N, "'N' argument of jnp.vander()")
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(x.dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return ufuncs.power(x[..., None], expand_dims(iota, tuple(range(x.ndim))))
### Misc
@export
def argwhere(
a: ArrayLike,
*,
size: int | None = None,
fill_value: ArrayLike | None = None,
) -> Array:
"""Find the indices of nonzero array elements
JAX implementation of :func:`numpy.argwhere`.
``jnp.argwhere(x)`` is essentially equivalent to ``jnp.column_stack(jnp.nonzero(x))``
with special handling for zero-dimensional (i.e. scalar) inputs.
Because the size of the output of ``argwhere`` is data-dependent, the function is not
typically compatible with JIT. The JAX version adds the optional ``size`` argument, which
specifies the size of the leading dimension of the output - it must be specified statically
for ``jnp.argwhere`` to be compiled with non-static operands. See :func:`jax.numpy.nonzero`
for a full discussion of ``size`` and its semantics.
Args:
a: array for which to find nonzero elements
size: optional integer specifying statically the number of expected nonzero elements.
This must be specified in order to use ``argwhere`` within JAX transformations like
:func:`jax.jit`. See :func:`jax.numpy.nonzero` for more information.
fill_value: optional array specifying the fill value when ``size`` is specified.
See :func:`jax.numpy.nonzero` for more information.
Returns:
a two-dimensional array of shape ``[size, x.ndim]``. If ``size`` is not specified as
an argument, it is equal to the number of nonzero elements in ``x``.
See Also:
- :func:`jax.numpy.where`
- :func:`jax.numpy.nonzero`
Examples:
Two-dimensional array:
>>> x = jnp.array([[1, 0, 2],
... [0, 3, 0]])
>>> jnp.argwhere(x)
Array([[0, 0],
[0, 2],
[1, 1]], dtype=int32)
Equivalent computation using :func:`jax.numpy.column_stack` and :func:`jax.numpy.nonzero`:
>>> jnp.column_stack(jnp.nonzero(x))
Array([[0, 0],
[0, 2],
[1, 1]], dtype=int32)
Special case for zero-dimensional (i.e. scalar) inputs:
>>> jnp.argwhere(1)
Array([], shape=(1, 0), dtype=int32)
>>> jnp.argwhere(0)
Array([], shape=(0, 0), dtype=int32)
"""
a = util.ensure_arraylike("argwhere", a)
result = transpose(vstack(nonzero(atleast_1d(a), size=size, fill_value=fill_value)))
if np.ndim(a) == 0:
return result[:0].reshape(result.shape[0], 0)
return result.reshape(result.shape[0], np.ndim(a))
@export
def argmax(a: ArrayLike, axis: int | None = None, out: None = None,
keepdims: bool | None = None) -> Array:
"""Return the index of the maximum value of an array.
JAX implementation of :func:`numpy.argmax`.
Args:
a: input array
axis: optional integer specifying the axis along which to find the maximum
value. If ``axis`` is not specified, ``a`` will be flattened.
out: unused by JAX
keepdims: if True, then return an array with the same number of dimensions
as ``a``.
Returns:
an array containing the index of the maximum value along the specified axis.
See also:
- :func:`jax.numpy.argmin`: return the index of the minimum value.
- :func:`jax.numpy.nanargmax`: compute ``argmax`` while ignoring NaN values.
Note:
When the maximum value occurs more than once along a particular axis, the
smallest index is returned.
Examples:
>>> x = jnp.array([1, 3, 5, 4, 2])
>>> jnp.argmax(x)
Array(2, dtype=int32)
>>> x = jnp.array([[1, 3, 2],
... [5, 4, 1]])
>>> jnp.argmax(x, axis=1)
Array([1, 0], dtype=int32)
>>> jnp.argmax(x, axis=1, keepdims=True)
Array([[1],
[0]], dtype=int32)
"""
arr = util.ensure_arraylike("argmax", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmax is not supported.")
return _argmax(arr, None if axis is None else operator.index(axis),
keepdims=bool(keepdims))
@api.jit(static_argnames=('axis', 'keepdims'), inline=True)
def _argmax(a: Array, axis: int | None = None, keepdims: bool = False) -> Array:
if axis is None:
dims = list(range(np.ndim(a)))
a = ravel(a)
axis = 0
else:
dims = [axis]
if a.shape[axis] == 0:
raise ValueError("attempt to get argmax of an empty sequence")
# TODO(phawkins): use an int64 index if the dimension is large enough.
result = lax.argmax(a, _canonicalize_axis(axis, a.ndim), int)
return expand_dims(result, dims) if keepdims else result
@export
def argmin(a: ArrayLike, axis: int | None = None, out: None = None,
keepdims: bool | None = None) -> Array:
"""Return the index of the minimum value of an array.
JAX implementation of :func:`numpy.argmin`.
Args:
a: input array
axis: optional integer specifying the axis along which to find the minimum
value. If ``axis`` is not specified, ``a`` will be flattened.
out: unused by JAX
keepdims: if True, then return an array with the same number of dimensions
as ``a``.
Returns:
an array containing the index of the minimum value along the specified axis.
Note:
When the minimum value occurs more than once along a particular axis, the
smallest index is returned.
See also:
- :func:`jax.numpy.argmax`: return the index of the maximum value.
- :func:`jax.numpy.nanargmin`: compute ``argmin`` while ignoring NaN values.
Examples:
>>> x = jnp.array([1, 3, 5, 4, 2])
>>> jnp.argmin(x)
Array(0, dtype=int32)
>>> x = jnp.array([[1, 3, 2],
... [5, 4, 1]])
>>> jnp.argmin(x, axis=1)
Array([0, 2], dtype=int32)
>>> jnp.argmin(x, axis=1, keepdims=True)
Array([[0],
[2]], dtype=int32)
"""
arr = util.ensure_arraylike("argmin", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmin is not supported.")
return _argmin(arr, None if axis is None else operator.index(axis),
keepdims=bool(keepdims))
@api.jit(static_argnames=('axis', 'keepdims'), inline=True)
def _argmin(a: Array, axis: int | None = None, keepdims: bool = False) -> Array:
if axis is None:
dims = list(range(np.ndim(a)))
a = ravel(a)
axis = 0
else:
dims = [axis]
if a.shape[axis] == 0:
raise ValueError("attempt to get argmin of an empty sequence")
# TODO(phawkins): use an int64 index if the dimension is large enough.
result = lax.argmin(a, _canonicalize_axis(axis, a.ndim), int)
return expand_dims(result, dims) if keepdims else result
@export
def nanargmax(
a: ArrayLike,
axis: int | None = None,
out: None = None,
keepdims: bool | None = None,
) -> Array:
"""Return the index of the maximum value of an array, ignoring NaNs.
JAX implementation of :func:`numpy.nanargmax`.
Args:
a: input array
axis: optional integer specifying the axis along which to find the maximum
value. If ``axis`` is not specified, ``a`` will be flattened.
out: unused by JAX
keepdims: if True, then return an array with the same number of dimensions
as ``a``.
Returns:
an array containing the index of the maximum value along the specified axis.
Note:
In the case of an axis with all-NaN values, the returned index will be -1.
This differs from the behavior of :func:`numpy.nanargmax`, which raises an error.
See also:
- :func:`jax.numpy.argmax`: return the index of the maximum value.
- :func:`jax.numpy.nanargmin`: compute ``argmin`` while ignoring NaN values.
Examples:
>>> x = jnp.array([1, 3, 5, 4, jnp.nan])
Using a standard :func:`~jax.numpy.argmax` leads to potentially unexpected results:
>>> jnp.argmax(x)
Array(4, dtype=int32)
Using ``nanargmax`` returns the index of the maximum non-NaN value.
>>> jnp.nanargmax(x)
Array(2, dtype=int32)
>>> x = jnp.array([[1, 3, jnp.nan],
... [5, 4, jnp.nan]])
>>> jnp.nanargmax(x, axis=1)
Array([1, 0], dtype=int32)
>>> jnp.nanargmax(x, axis=1, keepdims=True)
Array([[1],
[0]], dtype=int32)
"""
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanargmax is not supported.")
a = util.ensure_arraylike("nanargmax", a)
return _nanargmax(a, None if axis is None else operator.index(axis), keepdims=bool(keepdims))
@api.jit(static_argnames=('axis', 'keepdims'))
def _nanargmax(a: Array, axis: int | None = None, keepdims: bool = False):
if not issubdtype(a.dtype, np.inexact):
return argmax(a, axis=axis, keepdims=keepdims)
nan_mask = ufuncs.isnan(a)
a = where(nan_mask, -np.inf, a)
res = argmax(a, axis=axis, keepdims=keepdims)
return where(reductions.all(nan_mask, axis=axis, keepdims=keepdims), -1, res)
@export
def nanargmin(
a: ArrayLike,
axis: int | None = None,
out: None = None,
keepdims: bool | None = None,
) -> Array:
"""Return the index of the minimum value of an array, ignoring NaNs.
JAX implementation of :func:`numpy.nanargmin`.
Args:
a: input array
axis: optional integer specifying the axis along which to find the maximum
value. If ``axis`` is not specified, ``a`` will be flattened.
out: unused by JAX
keepdims: if True, then return an array with the same number of dimensions
as ``a``.
Returns:
an array containing the index of the minimum value along the specified axis.
Note:
In the case of an axis with all-NaN values, the returned index will be -1.
This differs from the behavior of :func:`numpy.nanargmin`, which raises an error.
See also:
- :func:`jax.numpy.argmin`: return the index of the minimum value.
- :func:`jax.numpy.nanargmax`: compute ``argmax`` while ignoring NaN values.
Examples:
>>> x = jnp.array([jnp.nan, 3, 5, 4, 2])
>>> jnp.nanargmin(x)
Array(4, dtype=int32)
>>> x = jnp.array([[1, 3, jnp.nan],
... [5, 4, jnp.nan]])
>>> jnp.nanargmin(x, axis=1)
Array([0, 1], dtype=int32)
>>> jnp.nanargmin(x, axis=1, keepdims=True)
Array([[0],
[1]], dtype=int32)
"""
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanargmin is not supported.")
a = util.ensure_arraylike("nanargmin", a)
return _nanargmin(a, None if axis is None else operator.index(axis), keepdims=bool(keepdims))
@api.jit(static_argnames=('axis', 'keepdims'))
def _nanargmin(a: Array, axis: int | None = None, keepdims : bool = False):
if not issubdtype(a.dtype, np.inexact):
return argmin(a, axis=axis, keepdims=keepdims)
nan_mask = ufuncs.isnan(a)
a = where(nan_mask, np.inf, a)
res = argmin(a, axis=axis, keepdims=keepdims)
return where(reductions.all(nan_mask, axis=axis, keepdims=keepdims), -1, res)
@api.jit(static_argnums=(2,))
def _roll_dynamic(a: Array, shift: Array, axis: Sequence[int]) -> Array:
b_shape = lax.broadcast_shapes(shift.shape, np.shape(axis))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
for x, i in zip(broadcast_to(shift, b_shape),
np.broadcast_to(axis, b_shape)):
a_shape_i = array(a.shape[i], dtype=np.int32)
x = ufuncs.remainder(lax.convert_element_type(x, np.int32),
lax.max(a_shape_i, np.int32(1)))
a_concat = lax.concatenate((a, a), i)
a = lax_slicing.dynamic_slice_in_dim(a_concat, a_shape_i - x, a.shape[i], axis=i)
return a
@api.jit(static_argnums=(1, 2))
def _roll_static(a: Array, shift: Sequence[int], axis: Sequence[int]) -> Array:
for ax, s in zip(*np.broadcast_arrays(axis, shift)):
if a.shape[ax] == 0:
continue
i = (-s) % a.shape[ax]
a = lax.concatenate([lax_slicing.slice_in_dim(a, i, a.shape[ax], axis=ax),
lax_slicing.slice_in_dim(a, 0, i, axis=ax)],
dimension=ax)
return a
@export
def roll(a: ArrayLike, shift: ArrayLike | Sequence[int],
axis: int | Sequence[int] | None = None) -> Array:
"""Roll the elements of an array along a specified axis.
JAX implementation of :func:`numpy.roll`.
Args:
a: input array.
shift: the number of positions to shift the specified axis. If an integer,
all axes are shifted by the same amount. If a tuple, the shift for each
axis is specified individually.
axis: the axis or axes to roll. If ``None``, the array is flattened, shifted,
and then reshaped to its original shape.
Returns:
A copy of ``a`` with elements rolled along the specified axis or axes.
See also:
- :func:`jax.numpy.rollaxis`: roll the specified axis to a given position.
Examples:
>>> a = jnp.array([0, 1, 2, 3, 4, 5])
>>> jnp.roll(a, 2)
Array([4, 5, 0, 1, 2, 3], dtype=int32)
Roll elements along a specific axis:
>>> a = jnp.array([[ 0, 1, 2, 3],
... [ 4, 5, 6, 7],
... [ 8, 9, 10, 11]])
>>> jnp.roll(a, 1, axis=0)
Array([[ 8, 9, 10, 11],
[ 0, 1, 2, 3],
[ 4, 5, 6, 7]], dtype=int32)
>>> jnp.roll(a, [2, 3], axis=[0, 1])
Array([[ 5, 6, 7, 4],
[ 9, 10, 11, 8],
[ 1, 2, 3, 0]], dtype=int32)
"""
arr = util.ensure_arraylike("roll", a)
if axis is None:
return roll(arr.ravel(), shift, 0).reshape(arr.shape)
axis = _ensure_index_tuple(axis)
axis = tuple(_canonicalize_axis(ax, arr.ndim) for ax in axis)
try:
shift = _ensure_index_tuple(shift)
except TypeError:
return _roll_dynamic(arr, asarray(shift), axis)
else:
return _roll_static(arr, shift, axis)
@export
@api.jit(static_argnames=('axis', 'start'))
def rollaxis(a: ArrayLike, axis: int, start: int = 0) -> Array:
"""Roll the specified axis to a given position.
JAX implementation of :func:`numpy.rollaxis`.
This function exists for compatibility with NumPy, but in most cases the newer
:func:`jax.numpy.moveaxis` instead, because the meaning of its arguments is
more intuitive.
Args:
a: input array.
axis: index of the axis to roll forward.
start: index toward which the axis will be rolled (default = 0). After
normalizing negative axes, if ``start <= axis``, the axis is rolled to
the ``start`` index; if ``start > axis``, the axis is rolled until the
position before ``start``.
Returns:
Copy of ``a`` with rolled axis.
Notes:
Unlike :func:`numpy.rollaxis`, :func:`jax.numpy.rollaxis` will return a copy rather
than a view of the input array. However, under JIT, the compiler will optimize away
such copies when possible, so this doesn't have performance impacts in practice.
See also:
- :func:`jax.numpy.moveaxis`: newer API with clearer semantics than ``rollaxis``;
this should be preferred to ``rollaxis`` in most cases.
- :func:`jax.numpy.swapaxes`: swap two axes.
- :func:`jax.numpy.transpose`: general permutation of axes.
Examples:
>>> a = jnp.ones((2, 3, 4, 5))
Roll axis 2 to the start of the array:
>>> jnp.rollaxis(a, 2).shape
(4, 2, 3, 5)
Roll axis 1 to the end of the array:
>>> jnp.rollaxis(a, 1, a.ndim).shape
(2, 4, 5, 3)
Equivalent of these two with :func:`~jax.numpy.moveaxis`
>>> jnp.moveaxis(a, 2, 0).shape
(4, 2, 3, 5)
>>> jnp.moveaxis(a, 1, -1).shape
(2, 4, 5, 3)
"""
a = util.ensure_arraylike("rollaxis", a)
start = core.concrete_or_error(operator.index, start, "'start' argument of jnp.rollaxis()")
a_ndim = np.ndim(a)
axis = _canonicalize_axis(axis, a_ndim)
if not (-a_ndim <= start <= a_ndim):
raise ValueError(f"{start=} must satisfy {-a_ndim}<=start<={a_ndim}")
if start < 0:
start += a_ndim
if start > axis:
start -= 1
return moveaxis(a, axis, start)
@export
@api.jit(static_argnames=('axis', 'bitorder'))
def packbits(a: ArrayLike, axis: int | None = None, bitorder: str = "big") -> Array:
"""Pack array of bits into a uint8 array.
JAX implementation of :func:`numpy.packbits`
Args:
a: N-dimensional array of bits to pack.
axis: optional axis along which to pack bits. If not specified, ``a`` will
be flattened.
bitorder: ``"big"`` (default) or ``"little"``: specify whether the bit order
is big-endian or little-endian.
Returns:
A uint8 array of packed values.
See also:
- :func:`jax.numpy.unpackbits`: inverse of ``packbits``.
Examples:
Packing bits in one dimension:
>>> bits = jnp.array([0, 0, 0, 0, 0, 1, 1, 1])
>>> jnp.packbits(bits)
Array([7], dtype=uint8)
>>> 0b00000111 # equivalent bit-wise representation:
7
Optionally specifying little-endian convention:
>>> jnp.packbits(bits, bitorder="little")
Array([224], dtype=uint8)
>>> 0b11100000 # equivalent bit-wise representation
224
If the number of bits is not a multiple of 8, it will be right-padded
with zeros:
>>> jnp.packbits(jnp.array([1, 0, 1]))
Array([160], dtype=uint8)
>>> jnp.packbits(jnp.array([1, 0, 1, 0, 0, 0, 0, 0]))
Array([160], dtype=uint8)
For a multi-dimensional input, bits may be packed along a specified axis:
>>> a = jnp.array([[1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
... [0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1]])
>>> vals = jnp.packbits(a, axis=1)
>>> vals
Array([[212, 150],
[ 69, 207]], dtype=uint8)
The inverse of ``packbits`` is provided by :func:`~jax.numpy.unpackbits`:
>>> jnp.unpackbits(vals, axis=1)
Array([[1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1]], dtype=uint8)
"""
arr = util.ensure_arraylike("packbits", a)
if not (issubdtype(arr.dtype, np.integer) or issubdtype(arr.dtype, np.bool_)):
raise TypeError('Expected an input array of integer or boolean data type')
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
arr = lax.ne(arr, lax._const(arr, 0)).astype('uint8')
bits = arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
arr = ravel(arr)
axis = 0
arr = swapaxes(arr, axis, -1)
remainder = arr.shape[-1] % 8
if remainder:
arr = lax.pad(arr, np.uint8(0),
(arr.ndim - 1) * [(0, 0, 0)] + [(0, 8 - remainder, 0)])
arr = arr.reshape(arr.shape[:-1] + (arr.shape[-1] // 8, 8))
bits = expand_dims(bits, tuple(range(arr.ndim - 1)))
packed = (arr << bits).sum(-1).astype('uint8')
return swapaxes(packed, axis, -1)
@export
@api.jit(static_argnames=('axis', 'count', 'bitorder'))
def unpackbits(
a: ArrayLike,
axis: int | None = None,
count: int | None = None,
bitorder: str = "big",
) -> Array:
"""Unpack the bits in a uint8 array.
JAX implementation of :func:`numpy.unpackbits`.
Args:
a: N-dimensional array of type ``uint8``.
axis: optional axis along which to unpack. If not specified, ``a`` will
be flattened
count: specify the number of bits to unpack (if positive) or the number
of bits to trim from the end (if negative).
bitorder: ``"big"`` (default) or ``"little"``: specify whether the bit order
is big-endian or little-endian.
Returns:
a uint8 array of unpacked bits.
See also:
- :func:`jax.numpy.packbits`: this inverse of ``unpackbits``.
Examples:
Unpacking bits from a scalar:
>>> jnp.unpackbits(jnp.uint8(27)) # big-endian by default
Array([0, 0, 0, 1, 1, 0, 1, 1], dtype=uint8)
>>> jnp.unpackbits(jnp.uint8(27), bitorder="little")
Array([1, 1, 0, 1, 1, 0, 0, 0], dtype=uint8)
Compare this to the Python binary representation:
>>> 0b00011011
27
Unpacking bits along an axis:
>>> vals = jnp.array([[154],
... [ 49]], dtype='uint8')
>>> bits = jnp.unpackbits(vals, axis=1)
>>> bits
Array([[1, 0, 0, 1, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1]], dtype=uint8)
Using :func:`~jax.numpy.packbits` to invert this:
>>> jnp.packbits(bits, axis=1)
Array([[154],
[ 49]], dtype=uint8)
The ``count`` keyword lets ``unpackbits`` serve as an inverse of ``packbits``
in cases where not all bits are present:
>>> bits = jnp.array([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1]) # 11 bits
>>> vals = jnp.packbits(bits)
>>> vals
Array([219, 96], dtype=uint8)
>>> jnp.unpackbits(vals) # 16 zero-padded bits
Array([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], dtype=uint8)
>>> jnp.unpackbits(vals, count=11) # specify 11 output bits
Array([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1], dtype=uint8)
>>> jnp.unpackbits(vals, count=-5) # specify 5 bits to be trimmed
Array([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1], dtype=uint8)
"""
arr = util.ensure_arraylike("unpackbits", a)
if arr.dtype != np.uint8:
raise TypeError("Expected an input array of unsigned byte data type")
if bitorder not in ['little', 'big']:
raise ValueError("'order' must be either 'little' or 'big'")
bits = asarray(1) << arange(8, dtype='uint8')
if bitorder == 'big':
bits = bits[::-1]
if axis is None:
arr = ravel(arr)
axis = 0
arr = swapaxes(arr, axis, -1)
unpacked = ((arr[..., None] & expand_dims(bits, tuple(range(arr.ndim)))) > 0).astype('uint8')
unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))
if count is not None:
if count > unpacked.shape[-1]:
unpacked = pad(unpacked, [(0, 0)] * (unpacked.ndim - 1) + [(0, count - unpacked.shape[-1])])
else:
unpacked = unpacked[..., :count]
return swapaxes(unpacked, axis, -1)
def _gcd_cond_fn(xs: tuple[Array, Array]) -> Array:
x1, x2 = xs
return reductions.any(x2 != 0)
def _gcd_body_fn(xs: tuple[Array, Array]) -> tuple[Array, Array]:
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@export
@api.jit
def gcd(x1: ArrayLike, x2: ArrayLike) -> Array:
"""Compute the greatest common divisor of two arrays.
JAX implementation of :func:`numpy.gcd`.
Args:
x1: First input array. The elements must have integer dtype.
x2: Second input array. The elements must have integer dtype.
Returns:
An array containing the greatest common divisors of the corresponding
elements from the absolute values of `x1` and `x2`.
See also:
- :func:`jax.numpy.lcm`: compute the least common multiple of two arrays.
Examples:
Scalar inputs:
>>> jnp.gcd(12, 18)
Array(6, dtype=int32, weak_type=True)
Array inputs:
>>> x1 = jnp.array([12, 18, 24])
>>> x2 = jnp.array([5, 10, 15])
>>> jnp.gcd(x1, x2)
Array([1, 2, 3], dtype=int32)
Broadcasting:
>>> x1 = jnp.array([12])
>>> x2 = jnp.array([6, 9, 12])
>>> jnp.gcd(x1, x2)
Array([ 6, 3, 12], dtype=int32)
"""
x1, x2 = util.ensure_arraylike("gcd", x1, x2)
x1, x2 = util.promote_dtypes(x1, x2)
if not issubdtype(x1.dtype, np.integer):
raise ValueError("Arguments to jax.numpy.gcd must be integers.")
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = control_flow.while_loop(_gcd_cond_fn, _gcd_body_fn, (ufuncs.abs(x1), ufuncs.abs(x2)))
return gcd
@export
@api.jit
def lcm(x1: ArrayLike, x2: ArrayLike) -> Array:
"""Compute the least common multiple of two arrays.
JAX implementation of :func:`numpy.lcm`.
Args:
x1: First input array. The elements must have integer dtype.
x2: Second input array. The elements must have integer dtype.
Returns:
An array containing the least common multiple of the corresponding
elements from the absolute values of `x1` and `x2`.
See also:
- :func:`jax.numpy.gcd`: compute the greatest common divisor of two arrays.
Examples:
Scalar inputs:
>>> jnp.lcm(12, 18)
Array(36, dtype=int32, weak_type=True)
Array inputs:
>>> x1 = jnp.array([12, 18, 24])
>>> x2 = jnp.array([5, 10, 15])
>>> jnp.lcm(x1, x2)
Array([ 60, 90, 120], dtype=int32)
Broadcasting:
>>> x1 = jnp.array([12])
>>> x2 = jnp.array([6, 9, 12])
>>> jnp.lcm(x1, x2)
Array([12, 36, 12], dtype=int32)
"""
x1, x2 = util.ensure_arraylike("lcm", x1, x2)
x1, x2 = util.promote_dtypes(x1, x2)
x1, x2 = ufuncs.abs(x1), ufuncs.abs(x2)
if not issubdtype(x1.dtype, np.integer):
raise ValueError("Arguments to jax.numpy.lcm must be integers.")
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
ufuncs.multiply(x1, ufuncs.floor_divide(x2, d)))
@export
def extract(condition: ArrayLike, arr: ArrayLike,
*, size: int | None = None, fill_value: ArrayLike = 0) -> Array:
"""Return the elements of an array that satisfy a condition.
JAX implementation of :func:`numpy.extract`.
Args:
condition: array of conditions. Will be converted to boolean and flattened to 1D.
arr: array of values to extract. Will be flattened to 1D.
size: optional static size for output. Must be specified in order for ``extract``
to be compatible with JAX transformations like :func:`~jax.jit` or :func:`~jax.vmap`.
fill_value: if ``size`` is specified, fill padded entries with this value (default: 0).
Returns:
1D array of extracted entries . If ``size`` is specified, the result will have shape
``(size,)`` and be right-padded with ``fill_value``. If ``size`` is not specified,
the output shape will depend on the number of True entries in ``condition``.
Notes:
This function does not require strict shape agreement between ``condition`` and ``arr``.
If ``condition.size > arr.size``, then ``condition`` will be truncated, and if
``arr.size > condition.size``, then ``arr`` will be truncated.
See also:
:func:`jax.numpy.compress`: multi-dimensional version of ``extract``.
Examples:
Extract values from a 1D array:
>>> x = jnp.array([1, 2, 3, 4, 5, 6])
>>> mask = (x % 2 == 0)
>>> jnp.extract(mask, x)
Array([2, 4, 6], dtype=int32)
In the simplest case, this is equivalent to boolean indexing:
>>> x[mask]
Array([2, 4, 6], dtype=int32)
For use with JAX transformations, you can pass the ``size`` argument to
specify a static shape for the output, along with an optional ``fill_value``
that defaults to zero:
>>> jnp.extract(mask, x, size=len(x), fill_value=0)
Array([2, 4, 6, 0, 0, 0], dtype=int32)
Notice that unlike with boolean indexing, ``extract`` does not require strict
agreement between the sizes of the array and condition, and will effectively
truncate both to the minimum size:
>>> short_mask = jnp.array([False, True])
>>> jnp.extract(short_mask, x)
Array([2], dtype=int32)
>>> long_mask = jnp.array([True, False, True, False, False, False, False, False])
>>> jnp.extract(long_mask, x)
Array([1, 3], dtype=int32)
"""
util.check_arraylike("extreact", condition, arr, fill_value)
return compress(ravel(condition), ravel(arr), size=size, fill_value=fill_value)
@export
def compress(condition: ArrayLike, a: ArrayLike, axis: int | None = None,
*, size: int | None = None, fill_value: ArrayLike = 0, out: None = None) -> Array:
"""Compress an array along a given axis using a boolean condition.
JAX implementation of :func:`numpy.compress`.
Args:
condition: 1-dimensional array of conditions. Will be converted to boolean.
a: N-dimensional array of values.
axis: axis along which to compress. If None (default) then ``a`` will be
flattened, and axis will be set to 0.
size: optional static size for output. Must be specified in order for ``compress``
to be compatible with JAX transformations like :func:`~jax.jit` or :func:`~jax.vmap`.
fill_value: if ``size`` is specified, fill padded entries with this value (default: 0).
out: not implemented by JAX.
Returns:
An array of dimension ``a.ndim``, compressed along the specified axis.
See also:
- :func:`jax.numpy.extract`: 1D version of ``compress``.
- :meth:`jax.Array.compress`: equivalent functionality as an array method.
Notes:
This function does not require strict shape agreement between ``condition`` and ``a``.
If ``condition.size > a.shape[axis]``, then ``condition`` will be truncated, and if
``a.shape[axis] > condition.size``, then ``a`` will be truncated.
Examples:
Compressing along the rows of a 2D array:
>>> a = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> condition = jnp.array([True, False, True])
>>> jnp.compress(condition, a, axis=0)
Array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]], dtype=int32)
For convenience, you can equivalently use the :meth:`~jax.Array.compress`
method of JAX arrays:
>>> a.compress(condition, axis=0)
Array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]], dtype=int32)
Note that the condition need not match the shape of the specified axis;
here we compress the columns with the length-3 condition. Values beyond
the size of the condition are ignored:
>>> jnp.compress(condition, a, axis=1)
Array([[ 1, 3],
[ 5, 7],
[ 9, 11]], dtype=int32)
The optional ``size`` argument lets you specify a static output size so
that the output is statically-shaped, and so this function can be used
with transformations like :func:`~jax.jit` and :func:`~jax.vmap`:
>>> f = lambda c, a: jnp.extract(c, a, size=len(a), fill_value=0)
>>> mask = (a % 3 == 0)
>>> jax.vmap(f)(mask, a)
Array([[ 3, 0, 0, 0],
[ 6, 0, 0, 0],
[ 9, 12, 0, 0]], dtype=int32)
"""
condition_arr, arr, fill_value = util.ensure_arraylike("compress", condition, a, fill_value)
condition_arr = condition_arr.astype(bool)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.compress is not supported.")
if condition_arr.ndim != 1:
raise ValueError("condition must be a 1D array")
if axis is None:
axis = 0
arr = ravel(arr)
else:
arr = moveaxis(arr, axis, 0)
condition_arr, extra = condition_arr[:arr.shape[0]], condition_arr[arr.shape[0]:]
arr = arr[:condition_arr.shape[0]]
if size is None:
if reductions.any(extra):
raise ValueError("condition contains entries that are out of bounds")
result = arr[condition_arr]
elif not 0 <= size <= arr.shape[0]:
raise ValueError("size must be positive and not greater than the size of the array axis;"
f" got {size=} for a.shape[axis]={arr.shape[0]}")
else:
mask = expand_dims(condition_arr, range(1, arr.ndim))
arr = where(mask, arr, array(fill_value, dtype=arr.dtype))
result = arr[argsort(condition_arr, stable=True, descending=True)][:size]
return moveaxis(result, 0, axis)
@export
@api.jit(static_argnames=('rowvar', 'bias', 'ddof', 'dtype'))
def cov(m: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True,
bias: bool = False, ddof: int | None = None,
fweights: ArrayLike | None = None,
aweights: ArrayLike | None = None,
dtype: DTypeLike | None = None) -> Array:
r"""Estimate the weighted sample covariance.
JAX implementation of :func:`numpy.cov`.
The covariance :math:`C_{ij}` between variable *i* and variable *j* is defined
as
.. math::
cov[X_i, X_j] = E[(X_i - E[X_i])(X_j - E[X_j])]
Given an array of *N* observations of the variables :math:`X_i` and :math:`X_j`,
this can be estimated via the sample covariance:
.. math::
C_{ij} = \frac{1}{N - 1} \sum_{n=1}^N (X_{in} - \overline{X_i})(X_{jn} - \overline{X_j})
Where :math:`\overline{X_i} = \frac{1}{N} \sum_{k=1}^N X_{ik}` is the mean of the
observations.
Args:
m: array of shape ``(M, N)`` (if ``rowvar`` is True), or ``(N, M)``
(if ``rowvar`` is False) representing ``N`` observations of ``M`` variables.
``m`` may also be one-dimensional, representing ``N`` observations of a
single variable.
y: optional set of additional observations, with the same form as ``m``. If
specified, then ``y`` is combined with ``m``, i.e. for the default
``rowvar = True`` case, ``m`` becomes ``jnp.vstack([m, y])``.
rowvar: if True (default) then each row of ``m`` represents a variable. If
False, then each column represents a variable.
bias: if False (default) then normalize the covariance by ``N - 1``. If True,
then normalize the covariance by ``N``
ddof: specify the degrees of freedom. Defaults to ``1`` if ``bias`` is False,
or to ``0`` if ``bias`` is True.
fweights: optional array of integer frequency weights of shape ``(N,)``. This
is an absolute weight specifying the number of times each observation is
included in the computation.
aweights: optional array of observation weights of shape ``(N,)``. This is
a relative weight specifying the "importance" of each observation. In the
``ddof=0`` case, it is equivalent to assigning probabilities to each
observation.
dtype: optional data type of the result. Must be a float or complex type;
if not specified, it will be determined based on the dtype of the input.
Returns:
A covariance matrix of shape ``(M, M)``, or a scalar with shape ``()`` if ``M = 1``.
See also:
- :func:`jax.numpy.corrcoef`: compute the correlation coefficient, a normalized
version of the covariance matrix.
Examples:
Consider these observations of two variables that correlate perfectly.
The covariance matrix in this case is a 2x2 matrix of ones:
>>> x = jnp.array([[0, 1, 2],
... [0, 1, 2]])
>>> jnp.cov(x)
Array([[1., 1.],
[1., 1.]], dtype=float32)
Now consider these observations of two variables that are perfectly
anti-correlated. The covariance matrix in this case has ``-1`` in the
off-diagonal:
>>> x = jnp.array([[-1, 0, 1],
... [ 1, 0, -1]])
>>> jnp.cov(x)
Array([[ 1., -1.],
[-1., 1.]], dtype=float32)
Equivalently, these sequences can be specified as separate arguments,
in which case they are stacked before continuing the computation.
>>> x = jnp.array([-1, 0, 1])
>>> y = jnp.array([1, 0, -1])
>>> jnp.cov(x, y)
Array([[ 1., -1.],
[-1., 1.]], dtype=float32)
In general, the entries of the covariance matrix may be any positive
or negative real value. For example, here is the covariance of 100
points drawn from a 3-dimensional standard normal distribution:
>>> key = jax.random.key(0)
>>> x = jax.random.normal(key, shape=(3, 100))
>>> with jnp.printoptions(precision=2):
... print(jnp.cov(x))
[[0.9 0.03 0.1 ]
[0.03 1. 0.01]
[0.1 0.01 0.85]]
"""
if y is not None:
m, y = util.promote_args_inexact("cov", m, y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
else:
m, = util.promote_args_inexact("cov", m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
if dtype is not None and not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"cov: dtype must be a subclass of float or complex; got {dtype=}")
X = atleast_2d(m)
if not rowvar and m.ndim != 1:
X = X.T
if X.shape[0] == 0:
return array([]).reshape(0, 0)
if y is not None:
y_arr = atleast_2d(y)
if not rowvar and y_arr.shape[0] != 1:
y_arr = y_arr.T
X = concatenate((X, y_arr), axis=0)
if X.shape[1] == 0:
cov_shape = () if X.shape[0] == 1 else (X.shape[0], X.shape[0])
return array_creation.full(cov_shape, np.nan, dtype=X.dtype)
if ddof is None:
ddof = 1 if bias == 0 else 0
w: Array | None = None
if fweights is not None:
fweights = util.ensure_arraylike("cov", fweights)
if np.ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if np.shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
if not issubdtype(fweights.dtype, np.integer):
raise TypeError("fweights must be integer.")
# Ensure positive fweights; note that numpy raises an error on negative fweights.
w = abs(fweights)
if aweights is not None:
aweights = util.ensure_arraylike("cov", aweights)
if np.ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if np.shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
# Ensure positive aweights: note that numpy raises an error for negative aweights.
aweights = abs(aweights)
w = aweights if w is None else w * aweights
if dtype is not None:
X = X.astype(dtype)
w = w.astype(dtype) if w is not None else w
avg, w_sum = reductions.average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * reductions.sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * lax.broadcast_to_rank(w, X.ndim)).T
return ufuncs.true_divide(tensor_contractions.dot(X, X_T.conj()), f).squeeze()
@export
@api.jit(static_argnames=('rowvar', 'dtype'))
def corrcoef(x: ArrayLike, y: ArrayLike | None = None, rowvar: bool = True,
dtype: DTypeLike | None = None) -> Array:
r"""Compute the Pearson correlation coefficients.
JAX implementation of :func:`numpy.corrcoef`.
This is a normalized version of the sample covariance computed by :func:`jax.numpy.cov`.
For a sample covariance :math:`C_{ij}`, the correlation coefficients are
.. math::
R_{ij} = \frac{C_{ij}}{\sqrt{C_{ii}C_{jj}}}
they are constructed such that the values satisfy :math:`-1 \le R_{ij} \le 1`.
Args:
x: array of shape ``(M, N)`` (if ``rowvar`` is True), or ``(N, M)``
(if ``rowvar`` is False) representing ``N`` observations of ``M`` variables.
``x`` may also be one-dimensional, representing ``N`` observations of a
single variable.
y: optional set of additional observations, with the same form as ``m``. If
specified, then ``y`` is combined with ``m``, i.e. for the default
``rowvar = True`` case, ``m`` becomes ``jnp.vstack([m, y])``.
rowvar: if True (default) then each row of ``m`` represents a variable. If
False, then each column represents a variable.
dtype: optional data type of the result. Must be a float or complex type;
if not specified, it will be determined based on the dtype of the input.
Returns:
A covariance matrix of shape ``(M, M)``.
See also:
- :func:`jax.numpy.cov`: compute the covariance matrix.
Examples:
Consider these observations of two variables that correlate perfectly.
The correlation matrix in this case is a 2x2 matrix of ones:
>>> x = jnp.array([[0, 1, 2],
... [0, 1, 2]])
>>> jnp.corrcoef(x)
Array([[1., 1.],
[1., 1.]], dtype=float32)
Now consider these observations of two variables that are perfectly
anti-correlated. The correlation matrix in this case has ``-1`` in the
off-diagonal:
>>> x = jnp.array([[-1, 0, 1],
... [ 1, 0, -1]])
>>> jnp.corrcoef(x)
Array([[ 1., -1.],
[-1., 1.]], dtype=float32)
Equivalently, these sequences can be specified as separate arguments,
in which case they are stacked before continuing the computation.
>>> x = jnp.array([-1, 0, 1])
>>> y = jnp.array([1, 0, -1])
>>> jnp.corrcoef(x, y)
Array([[ 1., -1.],
[-1., 1.]], dtype=float32)
The entries of the correlation matrix are normalized such that they
lie within the range -1 to +1, where +1 indicates perfect correlation
and -1 indicates perfect anti-correlation. For example, here is the
correlation of 100 points drawn from a 3-dimensional standard normal
distribution:
>>> key = jax.random.key(0)
>>> x = jax.random.normal(key, shape=(3, 100))
>>> with jnp.printoptions(precision=2):
... print(jnp.corrcoef(x))
[[1. 0.03 0.12]
[0.03 1. 0.01]
[0.12 0.01 1. ]]
"""
util.check_arraylike("corrcoef", x)
if dtype is not None and not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"corrcoef: dtype must be a subclass of float or complex; got {dtype=}")
c = cov(x, y, rowvar, dtype=dtype)
if len(np.shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return ufuncs.divide(c, c)
d = diag(c)
stddev = ufuncs.sqrt(ufuncs.real(d)).astype(c.dtype)
c = c / stddev[:, None] / stddev[None, :]
real_part = clip(ufuncs.real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(ufuncs.imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@partial(vectorize, excluded={0, 1, 3, 4})
def _searchsorted_via_scan(unrolled: bool, sorted_arr: Array, query: Array,
side: str, dtype: type) -> Array:
op = lax._sort_le_comparator if side == 'left' else lax._sort_lt_comparator
unsigned_dtype = np.uint32 if dtype == np.int32 else np.uint64
def body_fun(state, _):
low, high = state
mid = low.astype(unsigned_dtype) + high.astype(unsigned_dtype)
mid = lax.div(mid, array(2, dtype=unsigned_dtype)).astype(dtype)
go_left = op(query, sorted_arr[mid])
return (where(go_left, low, mid), where(go_left, mid, high)), ()
n_levels = int(np.ceil(np.log2(len(sorted_arr) + 1)))
init = (array(0, dtype=dtype), array(len(sorted_arr), dtype=dtype))
vma = core.typeof(sorted_arr).vma
init = tuple(core.pvary(i, tuple(vma)) for i in init)
carry, _ = control_flow.scan(body_fun, init, (), length=n_levels,
unroll=n_levels if unrolled else 1)
return carry[1]
def _searchsorted_via_sort(sorted_arr: Array, query: Array, side: str, dtype: type) -> Array:
working_dtype = lax_utils.int_dtype_for_dim(sorted_arr.size + query.size,
signed=False)
def _rank(x):
idx = lax.iota(working_dtype, x.shape[0])
return array_creation.zeros_like(idx).at[argsort(x)].set(idx)
query_flat = query.ravel()
if side == 'left':
index = _rank(lax.concatenate([query_flat, sorted_arr], 0))[:query.size]
else:
index = _rank(lax.concatenate([sorted_arr, query_flat], 0))[sorted_arr.size:]
return lax.reshape(lax.sub(index, _rank(query_flat)), np.shape(query)).astype(dtype)
def _searchsorted_via_compare_all(sorted_arr: Array, query: Array, side: str, dtype: type) -> Array:
op = lax._sort_lt_comparator if side == 'left' else lax._sort_le_comparator
comparisons = api.vmap(op, in_axes=(0, None))(sorted_arr, query)
return comparisons.sum(dtype=dtype, axis=0)
@export
@api.jit(static_argnames=('side', 'method'))
def searchsorted(a: ArrayLike, v: ArrayLike, side: str = 'left',
sorter: ArrayLike | None = None, *, method: str = 'scan') -> Array:
"""Perform a binary search within a sorted array.
JAX implementation of :func:`numpy.searchsorted`.
This will return the indices within a sorted array ``a`` where values in ``v``
can be inserted to maintain its sort order.
Args:
a: one-dimensional array, assumed to be in sorted order unless ``sorter`` is specified.
v: N-dimensional array of query values
side: ``'left'`` (default) or ``'right'``; specifies whether insertion indices will be
to the left or the right in case of ties.
sorter: optional array of indices specifying the sort order of ``a``. If specified,
then the algorithm assumes that ``a[sorter]`` is in sorted order.
method: one of ``'scan'`` (default), ``'scan_unrolled'``, ``'sort'`` or ``'compare_all'``.
See *Note* below.
Returns:
Array of insertion indices of shape ``v.shape``.
Note:
The ``method`` argument controls the algorithm used to compute the insertion indices.
- ``'scan'`` (the default) tends to be more performant on CPU, particularly when ``a`` is
very large.
- ``'scan_unrolled'`` is more performant on GPU at the expense of additional compile time.
- ``'sort'`` is often more performant on accelerator backends like GPU and TPU, particularly
when ``v`` is very large.
- ``'compare_all'`` tends to be the most performant when ``a`` is very small.
Examples:
Searching for a single value:
>>> a = jnp.array([1, 2, 2, 3, 4, 5, 5])
>>> jnp.searchsorted(a, 2)
Array(1, dtype=int32)
>>> jnp.searchsorted(a, 2, side='right')
Array(3, dtype=int32)
Searching for a batch of values:
>>> vals = jnp.array([0, 3, 8, 1.5, 2])
>>> jnp.searchsorted(a, vals)
Array([0, 3, 7, 1, 1], dtype=int32)
Optionally, the ``sorter`` argument can be used to find insertion indices into
an array sorted via :func:`jax.numpy.argsort`:
>>> a = jnp.array([4, 3, 5, 1, 2])
>>> sorter = jnp.argsort(a)
>>> jnp.searchsorted(a, vals, sorter=sorter)
Array([0, 2, 5, 1, 1], dtype=int32)
The result is equivalent to passing the sorted array:
>>> jnp.searchsorted(jnp.sort(a), vals)
Array([0, 2, 5, 1, 1], dtype=int32)
"""
if sorter is None:
a, v = util.ensure_arraylike("searchsorted", a, v)
else:
a, v, sorter = util.ensure_arraylike("searchsorted", a, v, sorter)
if side not in ['left', 'right']:
raise ValueError(f"{side!r} is an invalid value for keyword 'side'. "
"Expected one of ['left', 'right'].")
if method not in ['scan', 'scan_unrolled', 'sort', 'compare_all']:
raise ValueError(
f"{method!r} is an invalid value for keyword 'method'. "
"Expected one of ['sort', 'scan', 'scan_unrolled', 'compare_all'].")
if np.ndim(a) != 1:
raise ValueError("a should be 1-dimensional")
a, v = util.promote_dtypes(a, v)
if sorter is not None:
a = a[sorter]
dtype = lax_utils.int_dtype_for_dim(a.shape[0], signed=True)
if a.shape[0] == 0:
return array_creation.zeros_like(v, dtype=dtype)
impl = {
'scan': partial(_searchsorted_via_scan, False),
'scan_unrolled': partial(_searchsorted_via_scan, True),
'sort': _searchsorted_via_sort,
'compare_all': _searchsorted_via_compare_all,
}[method]
a, v = core.standard_insert_pvary(a, v)
return impl(a, v, side, dtype) # type: ignore
@export
@api.jit(static_argnames=('right', 'method'))
def digitize(x: ArrayLike, bins: ArrayLike, right: bool = False,
*, method: str | None = None) -> Array:
"""Convert an array to bin indices.
JAX implementation of :func:`numpy.digitize`.
Args:
x: array of values to digitize.
bins: 1D array of bin edges. Must be monotonically increasing or decreasing.
right: if true, the intervals include the right bin edges. If false (default)
the intervals include the left bin edges.
method: optional method argument to be passed to :func:`~jax.numpy.searchsorted`.
See that function for available options.
Returns:
An integer array of the same shape as ``x`` indicating the bin number that
the values are in.
See also:
- :func:`jax.numpy.searchsorted`: find insertion indices for values in a
sorted array.
- :func:`jax.numpy.histogram`: compute frequency of array values within
specified bins.
Examples:
>>> x = jnp.array([1.0, 2.0, 2.5, 1.5, 3.0, 3.5])
>>> bins = jnp.array([1, 2, 3])
>>> jnp.digitize(x, bins)
Array([1, 2, 2, 1, 3, 3], dtype=int32)
>>> jnp.digitize(x, bins, right=True)
Array([0, 1, 2, 1, 2, 3], dtype=int32)
``digitize`` supports reverse-ordered bins as well:
>>> bins = jnp.array([3, 2, 1])
>>> jnp.digitize(x, bins)
Array([2, 1, 1, 2, 0, 0], dtype=int32)
"""
x, bins_arr = util.ensure_arraylike("digitize", x, bins)
right = core.concrete_or_error(bool, right, "right argument of jnp.digitize()")
if bins_arr.ndim != 1:
raise ValueError(f"digitize: bins must be a 1-dimensional array; got {bins=}")
if bins_arr.shape[0] == 0:
return array_creation.zeros_like(x, dtype=np.int32)
side = 'right' if not right else 'left'
kwds: dict[str, str] = {} if method is None else {'method': method}
return where(
bins_arr[-1] >= bins_arr[0],
searchsorted(bins_arr, x, side=side, **kwds),
bins_arr.shape[0] - searchsorted(bins_arr[::-1], x, side=side, **kwds)
)
@export
def piecewise(x: ArrayLike, condlist: Array | Sequence[ArrayLike],
funclist: list[ArrayLike | Callable[..., Array]],
*args, **kw) -> Array:
"""Evaluate a function defined piecewise across the domain.
JAX implementation of :func:`numpy.piecewise`, in terms of :func:`jax.lax.switch`.
Note:
Unlike :func:`numpy.piecewise`, :func:`jax.numpy.piecewise` requires functions
in ``funclist`` to be traceable by JAX, as it is implemented via
:func:`jax.lax.switch`.
Args:
x: array of input values.
condlist: boolean array or sequence of boolean arrays corresponding to the
functions in ``funclist``. If a sequence of arrays, the length of each
array must match the length of ``x``
funclist: list of arrays or functions; must either be the same length as
``condlist``, or have length ``len(condlist) + 1``, in which case the
last entry is the default applied when none of the conditions are True.
Alternatively, entries of ``funclist`` may be numerical values, in which
case they indicate a constant function.
args, kwargs: additional arguments are passed to each function in
``funclist``.
Returns:
An array which is the result of evaluating the functions on ``x`` at
the specified conditions.
See also:
- :func:`jax.lax.switch`: choose between *N* functions based on an index.
- :func:`jax.lax.cond`: choose between two functions based on a boolean condition.
- :func:`jax.numpy.where`: choose between two results based on a boolean mask.
- :func:`jax.lax.select`: choose between two results based on a boolean mask.
- :func:`jax.lax.select_n`: choose between *N* results based on a boolean mask.
Examples:
Here's an example of a function which is zero for negative values, and linear
for positive values:
>>> x = jnp.array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> condlist = [x < 0, x >= 0]
>>> funclist = [lambda x: 0 * x, lambda x: x]
>>> jnp.piecewise(x, condlist, funclist)
Array([0, 0, 0, 0, 0, 1, 2, 3, 4], dtype=int32)
``funclist`` can also contain a simple scalar value for constant functions:
>>> condlist = [x < 0, x >= 0]
>>> funclist = [0, lambda x: x]
>>> jnp.piecewise(x, condlist, funclist)
Array([0, 0, 0, 0, 0, 1, 2, 3, 4], dtype=int32)
You can specify a default value by appending an extra condition to ``funclist``:
>>> condlist = [x < -1, x > 1]
>>> funclist = [lambda x: 1 + x, lambda x: x - 1, 0]
>>> jnp.piecewise(x, condlist, funclist)
Array([-3, -2, -1, 0, 0, 0, 1, 2, 3], dtype=int32)
``condlist`` may also be a simple array of scalar conditions, in which case
the associated function applies to the whole range
>>> condlist = jnp.array([False, True, False])
>>> funclist = [lambda x: x * 0, lambda x: x * 10, lambda x: x * 100]
>>> jnp.piecewise(x, condlist, funclist)
Array([-40, -30, -20, -10, 0, 10, 20, 30, 40], dtype=int32)
"""
x_arr = util.ensure_arraylike("piecewise", x)
nc, nf = len(condlist), len(funclist)
if nf == nc + 1:
funclist = funclist[-1:] + funclist[:-1]
elif nf == nc:
funclist = [0] + list(funclist)
else:
raise ValueError(f"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}")
consts = {i: c for i, c in enumerate(funclist) if not callable(c)}
funcs = {i: f for i, f in enumerate(funclist) if callable(f)}
return _piecewise(x_arr, asarray(condlist, dtype=bool), consts,
frozenset(funcs.items()), # dict is not hashable.
*args, **kw)
@api.jit(static_argnames=['funcs'])
def _piecewise(x: Array, condlist: Array, consts: dict[int, ArrayLike],
funcs: frozenset[tuple[int, Callable[..., Array]]],
*args, **kw) -> Array:
funcdict = dict(funcs)
funclist = [consts.get(i, funcdict.get(i)) for i in range(len(condlist) + 1)]
indices = argmax(reductions.cumsum(concatenate(
[array_creation.zeros_like(condlist[:1]), condlist], 0), 0), 0)
dtype = x.dtype
def _call(f):
return lambda x: f(x, *args, **kw).astype(dtype)
def _const(v):
return lambda x: array(v, dtype=dtype)
funclist = [_call(f) if callable(f) else _const(f) for f in funclist]
return vectorize(control_flow.switch, excluded=(1,))(indices, funclist, x)
def _tile_to_size(arr: Array, size: int) -> Array:
assert arr.ndim == 1
if arr.size < size:
arr = tile(arr, int(np.ceil(size / arr.size)))
assert arr.size >= size
return arr[:size] if arr.size > size else arr
| PadStatFunc |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 10671,
"end": 14412
} | class ____(Processor):
"""
When the cursor is on or right after a bracket, it highlights the matching
bracket.
:param max_cursor_distance: Only highlight matching brackets when the
cursor is within this distance. (From inside a `Processor`, we can't
know which lines will be visible on the screen. But we also don't want
to scan the whole document for matching brackets on each key press, so
we limit to this value.)
"""
_closing_braces = "])}>"
def __init__(
self, chars: str = "[](){}<>", max_cursor_distance: int = 1000
) -> None:
self.chars = chars
self.max_cursor_distance = max_cursor_distance
self._positions_cache: SimpleCache[Hashable, list[tuple[int, int]]] = (
SimpleCache(maxsize=8)
)
def _get_positions_to_highlight(self, document: Document) -> list[tuple[int, int]]:
"""
Return a list of (row, col) tuples that need to be highlighted.
"""
pos: int | None
# Try for the character under the cursor.
if document.current_char and document.current_char in self.chars:
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance,
)
# Try for the character before the cursor.
elif (
document.char_before_cursor
and document.char_before_cursor in self._closing_braces
and document.char_before_cursor in self.chars
):
document = Document(document.text, document.cursor_position - 1)
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance,
)
else:
pos = None
# Return a list of (row, col) tuples that need to be highlighted.
if pos:
pos += document.cursor_position # pos is relative.
row, col = document.translate_index_to_position(pos)
return [
(row, col),
(document.cursor_position_row, document.cursor_position_col),
]
else:
return []
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
# When the application is in the 'done' state, don't highlight.
if get_app().is_done:
return Transformation(fragments)
# Get the highlight positions.
key = (get_app().render_counter, document.text, document.cursor_position)
positions = self._positions_cache.get(
key, lambda: self._get_positions_to_highlight(document)
)
# Apply if positions were found at this line.
if positions:
for row, col in positions:
if row == lineno:
col = source_to_display(col)
fragments = explode_text_fragments(fragments)
style, text, *_ = fragments[col]
if col == document.cursor_position_col:
style += " class:matching-bracket.cursor "
else:
style += " class:matching-bracket.other "
fragments[col] = (style, text)
return Transformation(fragments)
| HighlightMatchingBracketProcessor |
python | joke2k__faker | tests/providers/test_barcode.py | {
"start": 3177,
"end": 5256
} | class ____:
ean8_pattern: Pattern = re.compile(r"\d{8}")
ean13_pattern: Pattern = re.compile(r"\d{13}")
@staticmethod
def assert_prefix(barcode_digits, prefixes):
for prefix in prefixes:
if all(a == b for a, b in zip(barcode_digits, map(int, prefix))):
return
str_barc = "".join(str(x) for x in barcode_digits)
str_pref = ", ".join(map(lambda _prefix: "".join(str(x) for x in _prefix)), prefixes)
raise AssertionError(f"{str_barc} doesn't match any of the prefixes: {str_pref}")
def test_localized_ean(self, faker, num_samples, provider):
for _ in range(num_samples):
ean8 = faker.localized_ean(8)
ean13 = faker.localized_ean(13)
assert self.ean8_pattern.match(ean8)
assert self.ean13_pattern.match(ean13)
ean8_digits = [int(digit) for digit in ean8]
ean13_digits = [int(digit) for digit in ean13]
assert (sum(ean8_digits) + 2 * sum(ean8_digits[::2])) % 10 == 0
assert (sum(ean13_digits) + 2 * sum(ean13_digits[1::2])) % 10 == 0
self.assert_prefix(ean8_digits, provider.local_prefixes)
self.assert_prefix(ean13_digits, provider.local_prefixes)
def test_localized_ean8(self, faker, num_samples, provider):
for _ in range(num_samples):
ean8 = faker.localized_ean8()
assert self.ean8_pattern.match(ean8)
ean8_digits = [int(digit) for digit in ean8]
assert (sum(ean8_digits) + 2 * sum(ean8_digits[::2])) % 10 == 0
self.assert_prefix(ean8_digits, provider.local_prefixes)
def test_localized_ean13(self, faker, num_samples, provider):
for _ in range(num_samples):
ean13 = faker.localized_ean13()
assert self.ean13_pattern.match(ean13)
ean13_digits = [int(digit) for digit in ean13]
assert (sum(ean13_digits) + 2 * sum(ean13_digits[1::2])) % 10 == 0
self.assert_prefix(ean13_digits, provider.local_prefixes)
| _LocaleCommonMixin |
python | getsentry__sentry | src/sentry/incidents/endpoints/serializers/incident.py | {
"start": 3948,
"end": 4925
} | class ____(IncidentSerializer):
def __init__(self, expand=None):
if expand is None:
expand = []
if "original_alert_rule" not in expand:
expand.append("original_alert_rule")
super().__init__(expand=expand)
def serialize(self, obj, attrs, user, **kwargs) -> DetailedIncidentSerializerResponse:
base_context = super().serialize(obj, attrs, user)
# The query we should use to get accurate results in Discover.
context = DetailedIncidentSerializerResponse(
**base_context, discoverQuery=self._build_discover_query(obj)
)
return context
def _build_discover_query(self, incident) -> str:
return apply_dataset_query_conditions(
SnubaQuery.Type(incident.alert_rule.snuba_query.type),
incident.alert_rule.snuba_query.query,
incident.alert_rule.snuba_query.event_types,
discover=True,
)
| DetailedIncidentSerializer |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/auth_manager/user.py | {
"start": 1299,
"end": 2043
} | class ____(BaseUser):
"""
User model for users managed by the AWS Auth Manager.
:param user_id: The user ID.
:param groups: The groups the user belongs to.
:param username: The username of the user.
:param email: The email of the user.
"""
def __init__(
self, *, user_id: str, groups: list[str], username: str | None = None, email: str | None = None
) -> None:
self.user_id = user_id
self.groups = groups
self.username = username
self.email = email
def get_id(self) -> str:
return self.user_id
def get_name(self) -> str:
return self.username or self.email or self.user_id
def get_groups(self):
return self.groups
| AwsAuthManagerUser |
python | doocs__leetcode | lcci/17.09.Get Kth Magic Number/Solution.py | {
"start": 0,
"end": 328
} | class ____:
def getKthMagicNumber(self, k: int) -> int:
h = [1]
vis = {1}
for _ in range(k - 1):
cur = heappop(h)
for f in (3, 5, 7):
if (nxt := cur * f) not in vis:
vis.add(nxt)
heappush(h, nxt)
return h[0]
| Solution |
python | PyCQA__pylint | pylint/testutils/pyreverse.py | {
"start": 2773,
"end": 4403
} | class ____(NamedTuple):
"""Named tuple containing the test file and the expected output."""
source: Path
options: TestFileOptions
def get_functional_test_files(
root_directory: Path,
) -> list[FunctionalPyreverseTestfile]:
"""Get all functional test files from the given directory."""
test_files = []
for path in root_directory.rglob("*.py"):
if path.stem.startswith("_"):
continue
config_file = path.with_suffix(".rc")
if config_file.exists():
test_files.append(
FunctionalPyreverseTestfile(
source=path, options=_read_config(config_file)
)
)
else:
test_files.append(
FunctionalPyreverseTestfile(
source=path,
options={
"source_roots": [],
"output_formats": ["mmd"],
"command_line_args": [],
},
)
)
return test_files
def _read_config(config_file: Path) -> TestFileOptions:
config = configparser.ConfigParser()
config.read(str(config_file))
source_roots = config.get("testoptions", "source_roots", fallback=None)
return {
"source_roots": source_roots.split(",") if source_roots else [],
"output_formats": config.get(
"testoptions", "output_formats", fallback="mmd"
).split(","),
"command_line_args": shlex.split(
config.get("testoptions", "command_line_args", fallback="")
),
}
| FunctionalPyreverseTestfile |
python | networkx__networkx | networkx/algorithms/components/tests/test_strongly_connected.py | {
"start": 83,
"end": 6021
} | class ____:
@classmethod
def setup_class(cls):
cls.gc = []
G = nx.DiGraph()
G.add_edges_from(
[
(1, 2),
(2, 3),
(2, 8),
(3, 4),
(3, 7),
(4, 5),
(5, 3),
(5, 6),
(7, 4),
(7, 6),
(8, 1),
(8, 7),
]
)
C = {frozenset([3, 4, 5, 7]), frozenset([1, 2, 8]), frozenset([6])}
cls.gc.append((G, C))
G = nx.DiGraph()
G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)])
C = {frozenset([2, 3, 4]), frozenset([1])}
cls.gc.append((G, C))
G = nx.DiGraph()
G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)])
C = {frozenset([1, 2, 3])}
cls.gc.append((G, C))
# Eppstein's tests
G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []})
C = {
frozenset([0]),
frozenset([1]),
frozenset([2]),
frozenset([3]),
frozenset([4]),
frozenset([5]),
frozenset([6]),
}
cls.gc.append((G, C))
G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]})
C = {frozenset([0, 1, 2]), frozenset([3, 4])}
cls.gc.append((G, C))
def test_tarjan(self):
scc = nx.strongly_connected_components
for G, C in self.gc:
assert {frozenset(g) for g in scc(G)} == C
def test_kosaraju(self):
scc = nx.kosaraju_strongly_connected_components
for G, C in self.gc:
assert {frozenset(g) for g in scc(G)} == C
def test_number_strongly_connected_components(self):
ncc = nx.number_strongly_connected_components
for G, C in self.gc:
assert ncc(G) == len(C)
def test_is_strongly_connected(self):
for G, C in self.gc:
if len(C) == 1:
assert nx.is_strongly_connected(G)
else:
assert not nx.is_strongly_connected(G)
def test_contract_scc1(self):
G = nx.DiGraph()
G.add_edges_from(
[
(1, 2),
(2, 3),
(2, 11),
(2, 12),
(3, 4),
(4, 3),
(4, 5),
(5, 6),
(6, 5),
(6, 7),
(7, 8),
(7, 9),
(7, 10),
(8, 9),
(9, 7),
(10, 6),
(11, 2),
(11, 4),
(11, 6),
(12, 6),
(12, 11),
]
)
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
# DAG
assert nx.is_directed_acyclic_graph(cG)
# nodes
assert sorted(cG.nodes()) == [0, 1, 2, 3]
# edges
mapping = {}
for i, component in enumerate(scc):
for n in component:
mapping[n] = i
edge = (mapping[2], mapping[3])
assert cG.has_edge(*edge)
edge = (mapping[2], mapping[5])
assert cG.has_edge(*edge)
edge = (mapping[3], mapping[5])
assert cG.has_edge(*edge)
def test_contract_scc_isolate(self):
# Bug found and fixed in [1687].
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 1)
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
assert list(cG.nodes()) == [0]
assert list(cG.edges()) == []
def test_contract_scc_edge(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 1)
G.add_edge(2, 3)
G.add_edge(3, 4)
G.add_edge(4, 3)
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
assert sorted(cG.nodes()) == [0, 1]
if 1 in scc[0]:
edge = (0, 1)
else:
edge = (1, 0)
assert list(cG.edges()) == [edge]
def test_condensation_mapping_and_members(self):
G, C = self.gc[1]
C = sorted(C, key=len, reverse=True)
cG = nx.condensation(G)
mapping = cG.graph["mapping"]
assert all(n in G for n in mapping)
assert all(0 == cN for n, cN in mapping.items() if n in C[0])
assert all(1 == cN for n, cN in mapping.items() if n in C[1])
for n, d in cG.nodes(data=True):
assert set(C[n]) == cG.nodes[n]["members"]
def test_null_graph(self):
G = nx.DiGraph()
assert list(nx.strongly_connected_components(G)) == []
assert list(nx.kosaraju_strongly_connected_components(G)) == []
assert len(nx.condensation(G)) == 0
pytest.raises(
nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph()
)
def test_connected_raise(self):
G = nx.Graph()
with pytest.raises(NetworkXNotImplemented):
next(nx.strongly_connected_components(G))
with pytest.raises(NetworkXNotImplemented):
next(nx.kosaraju_strongly_connected_components(G))
pytest.raises(NetworkXNotImplemented, nx.is_strongly_connected, G)
pytest.raises(NetworkXNotImplemented, nx.condensation, G)
strong_cc_methods = (
nx.strongly_connected_components,
nx.kosaraju_strongly_connected_components,
)
@pytest.mark.parametrize("get_components", strong_cc_methods)
def test_connected_mutability(self, get_components):
DG = nx.path_graph(5, create_using=nx.DiGraph)
G = nx.disjoint_union(DG, DG)
seen = set()
for component in get_components(G):
assert len(seen & component) == 0
seen.update(component)
component.clear()
| TestStronglyConnected |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 58190,
"end": 58794
} | class ____(TestCase):
validator = None
def application(self, environ, start_response):
self.assertEqual(environ['HTTP_COOKIE'], 'name1="value1"; name2="value2"')
self.assertEqual(environ['HTTP_COOKIE2'], 'nameA="valueA"; nameB="valueB"')
start_response('200 OK', [])
return []
def test(self):
with self.makefile() as fd:
fd.write('''GET / HTTP/1.1
Host: localhost
Cookie: name1="value1"
Cookie2: nameA="valueA"
Cookie2: nameB="valueB"
Cookie: name2="value2"\n\n'''.replace('\n', '\r\n'))
read_http(fd)
| MultipleCookieHeadersTest |
python | django__django | tests/queries/tests.py | {
"start": 179731,
"end": 182006
} | class ____(TestCase):
@skipUnlessDBFeature("can_distinct_on_fields")
def test_ticket_23622(self):
"""
Make sure __pk__in and __in work the same for related fields when
using a distinct on subquery.
"""
a1 = Ticket23605A.objects.create()
a2 = Ticket23605A.objects.create()
c1 = Ticket23605C.objects.create(field_c0=0.0)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=123,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=23,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=234,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a1,
field_b0=12,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=567,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=76,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=7,
field_b1=True,
modelc_fk=c1,
)
Ticket23605B.objects.create(
modela_fk=a2,
field_b0=56,
field_b1=True,
modelc_fk=c1,
)
qx = Q(
ticket23605b__pk__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
qy = Q(
ticket23605b__in=Ticket23605B.objects.order_by(
"modela_fk", "-field_b1"
).distinct("modela_fk")
) & Q(ticket23605b__field_b0__gte=300)
self.assertEqual(
set(Ticket23605A.objects.filter(qx).values_list("pk", flat=True)),
set(Ticket23605A.objects.filter(qy).values_list("pk", flat=True)),
)
self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
| Ticket23622Tests |
python | keras-team__keras | keras/src/layers/preprocessing/feature_space.py | {
"start": 529,
"end": 1436
} | class ____(KerasSaveable):
def __init__(self, feature_names, crossing_dim, output_mode="one_hot"):
if output_mode not in {"int", "one_hot"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot'}. "
f"Received: output_mode={output_mode}"
)
self.feature_names = tuple(feature_names)
self.crossing_dim = crossing_dim
self.output_mode = output_mode
def _obj_type(self):
return "Cross"
@property
def name(self):
return "_X_".join(self.feature_names)
def get_config(self):
return {
"feature_names": self.feature_names,
"crossing_dim": self.crossing_dim,
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| Cross |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_curve.py | {
"start": 71618,
"end": 75800
} | class ____:
@staticmethod
def test_valid_constructor():
curves = CharacteristicCurveCollection(
tendon_force_length=TendonForceLengthDeGroote2016,
tendon_force_length_inverse=TendonForceLengthInverseDeGroote2016,
fiber_force_length_passive=FiberForceLengthPassiveDeGroote2016,
fiber_force_length_passive_inverse=FiberForceLengthPassiveInverseDeGroote2016,
fiber_force_length_active=FiberForceLengthActiveDeGroote2016,
fiber_force_velocity=FiberForceVelocityDeGroote2016,
fiber_force_velocity_inverse=FiberForceVelocityInverseDeGroote2016,
)
assert curves.tendon_force_length is TendonForceLengthDeGroote2016
assert curves.tendon_force_length_inverse is TendonForceLengthInverseDeGroote2016
assert curves.fiber_force_length_passive is FiberForceLengthPassiveDeGroote2016
assert curves.fiber_force_length_passive_inverse is FiberForceLengthPassiveInverseDeGroote2016
assert curves.fiber_force_length_active is FiberForceLengthActiveDeGroote2016
assert curves.fiber_force_velocity is FiberForceVelocityDeGroote2016
assert curves.fiber_force_velocity_inverse is FiberForceVelocityInverseDeGroote2016
@staticmethod
@pytest.mark.skip(reason='kw_only dataclasses only valid in Python >3.10')
def test_invalid_constructor_keyword_only():
with pytest.raises(TypeError):
_ = CharacteristicCurveCollection(
TendonForceLengthDeGroote2016,
TendonForceLengthInverseDeGroote2016,
FiberForceLengthPassiveDeGroote2016,
FiberForceLengthPassiveInverseDeGroote2016,
FiberForceLengthActiveDeGroote2016,
FiberForceVelocityDeGroote2016,
FiberForceVelocityInverseDeGroote2016,
)
@staticmethod
@pytest.mark.parametrize(
'kwargs',
[
{'tendon_force_length': TendonForceLengthDeGroote2016},
{
'tendon_force_length': TendonForceLengthDeGroote2016,
'tendon_force_length_inverse': TendonForceLengthInverseDeGroote2016,
'fiber_force_length_passive': FiberForceLengthPassiveDeGroote2016,
'fiber_force_length_passive_inverse': FiberForceLengthPassiveInverseDeGroote2016,
'fiber_force_length_active': FiberForceLengthActiveDeGroote2016,
'fiber_force_velocity': FiberForceVelocityDeGroote2016,
'fiber_force_velocity_inverse': FiberForceVelocityInverseDeGroote2016,
'extra_kwarg': None,
},
]
)
def test_invalid_constructor_wrong_number_args(kwargs):
with pytest.raises(TypeError):
_ = CharacteristicCurveCollection(**kwargs)
@staticmethod
def test_instance_is_immutable():
curves = CharacteristicCurveCollection(
tendon_force_length=TendonForceLengthDeGroote2016,
tendon_force_length_inverse=TendonForceLengthInverseDeGroote2016,
fiber_force_length_passive=FiberForceLengthPassiveDeGroote2016,
fiber_force_length_passive_inverse=FiberForceLengthPassiveInverseDeGroote2016,
fiber_force_length_active=FiberForceLengthActiveDeGroote2016,
fiber_force_velocity=FiberForceVelocityDeGroote2016,
fiber_force_velocity_inverse=FiberForceVelocityInverseDeGroote2016,
)
with pytest.raises(AttributeError):
curves.tendon_force_length = None
with pytest.raises(AttributeError):
curves.tendon_force_length_inverse = None
with pytest.raises(AttributeError):
curves.fiber_force_length_passive = None
with pytest.raises(AttributeError):
curves.fiber_force_length_passive_inverse = None
with pytest.raises(AttributeError):
curves.fiber_force_length_active = None
with pytest.raises(AttributeError):
curves.fiber_force_velocity = None
with pytest.raises(AttributeError):
curves.fiber_force_velocity_inverse = None
| TestCharacteristicCurveCollection |
python | walkccc__LeetCode | solutions/1643. Kth Smallest Instructions/1643.py | {
"start": 0,
"end": 477
} | class ____:
def kthSmallestPath(self, destination: list[int], k: int) -> str:
ans = []
v, h = destination
for _ in range(h + v):
# If pick 'H', then we're able to reack 1, 2, ..., availableRank.
availableRank = math.comb(h + v - 1, v)
if availableRank >= k: # Should pick 'H'.
ans.append('H')
h -= 1
else: # Should pick 'V'.
k -= availableRank
ans.append('V')
v -= 1
return ''.join(ans)
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/isort/lines_after_imports_class_after.py | {
"start": 177,
"end": 264
} | class ____(object):
name: str
def __init__(self, name: str):
self.name = name
| Thing |
python | huggingface__transformers | src/transformers/models/switch_transformers/modeling_switch_transformers.py | {
"start": 22681,
"end": 24095
} | class ____(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.EncDecAttention = SwitchTransformersAttention(
config, has_relative_attention_bias=False, layer_idx=layer_idx
)
self.layer_norm = SwitchTransformersLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
past_key_values=None,
use_cache=False,
query_length=None,
output_attentions=False,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
cache_position=cache_position,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
| SwitchTransformersLayerCrossAttention |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 15229,
"end": 15348
} | class ____(SendMessageToScheduler):
op = "reschedule"
__slots__ = ("key",)
key: Key
@dataclass
| RescheduleMsg |
python | EpistasisLab__tpot | tpot/search_spaces/nodes/fss_node.py | {
"start": 1861,
"end": 5399
} | class ____(SklearnIndividual):
def __init__( self,
subsets,
rng=None,
):
"""
An individual for representing a specific FeatureSetSelector.
The FeatureSetSelector selects a feature list of list of predefined feature subsets.
This instance will select one set initially. Mutation and crossover can swap the selected subset with another.
Parameters
----------
subsets : str or list, default=None
Sets the subsets that the FeatureSetSeletor will select from if set as an option in one of the configuration dictionaries.
Features are defined by column names if using a Pandas data frame, or ints corresponding to indexes if using numpy arrays.
- str : If a string, it is assumed to be a path to a csv file with the subsets.
The first column is assumed to be the name of the subset and the remaining columns are the features in the subset.
- list or np.ndarray : If a list or np.ndarray, it is assumed to be a list of subsets (i.e a list of lists).
- dict : A dictionary where keys are the names of the subsets and the values are the list of features.
- int : If an int, it is assumed to be the number of subsets to generate. Each subset will contain one feature.
- None : If None, each column will be treated as a subset. One column will be selected per subset.
rng : int, np.random.Generator, optional
The random number generator. The default is None.
Only used to select the first subset.
Returns
-------
None
"""
subsets = subsets
rng = np.random.default_rng(rng)
if isinstance(subsets, str):
df = pd.read_csv(subsets,header=None,index_col=0)
df['features'] = df.apply(lambda x: list([x[c] for c in df.columns]),axis=1)
self.subset_dict = {}
for row in df.index:
self.subset_dict[row] = df.loc[row]['features']
elif isinstance(subsets, dict):
self.subset_dict = subsets
elif isinstance(subsets, list) or isinstance(subsets, np.ndarray):
self.subset_dict = {str(i):subsets[i] for i in range(len(subsets))}
elif isinstance(subsets, int):
self.subset_dict = {"{0}".format(i):i for i in range(subsets)}
else:
raise ValueError("Subsets must be a string, dictionary, list, int, or numpy array")
self.names_list = list(self.subset_dict.keys())
self.selected_subset_name = rng.choice(self.names_list)
self.sel_subset = self.subset_dict[self.selected_subset_name]
def mutate(self, rng=None):
rng = np.random.default_rng(rng)
#get list of names not including the current one
names = [name for name in self.names_list if name != self.selected_subset_name]
self.selected_subset_name = rng.choice(names)
self.sel_subset = self.subset_dict[self.selected_subset_name]
def crossover(self, other, rng=None):
self.selected_subset_name = other.selected_subset_name
self.sel_subset = other.sel_subset
def export_pipeline(self, **kwargs):
return FeatureSetSelector(sel_subset=self.sel_subset, name=self.selected_subset_name)
def unique_id(self):
id_str = "FeatureSetSelector({0})".format(self.selected_subset_name)
return id_str
| FSSIndividual |
python | sympy__sympy | sympy/simplify/epathtools.py | {
"start": 90,
"end": 10122
} | class ____:
r"""
Manipulate expressions using paths.
EPath grammar in EBNF notation::
literal ::= /[A-Za-z_][A-Za-z_0-9]*/
number ::= /-?\d+/
type ::= literal
attribute ::= literal "?"
all ::= "*"
slice ::= "[" number? (":" number? (":" number?)?)? "]"
range ::= all | slice
query ::= (type | attribute) ("|" (type | attribute))*
selector ::= range | query range?
path ::= "/" selector ("/" selector)*
See the docstring of the epath() function.
"""
__slots__ = ("_path", "_epath")
def __new__(cls, path):
"""Construct new EPath. """
if isinstance(path, EPath):
return path
if not path:
raise ValueError("empty EPath")
_path = path
if path[0] == '/':
path = path[1:]
else:
raise NotImplementedError("non-root EPath")
epath = []
for selector in path.split('/'):
selector = selector.strip()
if not selector:
raise ValueError("empty selector")
index = 0
for c in selector:
if c.isalnum() or c in ('_', '|', '?'):
index += 1
else:
break
attrs = []
types = []
if index:
elements = selector[:index]
selector = selector[index:]
for element in elements.split('|'):
element = element.strip()
if not element:
raise ValueError("empty element")
if element.endswith('?'):
attrs.append(element[:-1])
else:
types.append(element)
span = None
if selector == '*':
pass
else:
if selector.startswith('['):
try:
i = selector.index(']')
except ValueError:
raise ValueError("expected ']', got EOL")
_span, span = selector[1:i], []
if ':' not in _span:
span = int(_span)
else:
for elt in _span.split(':', 3):
if not elt:
span.append(None)
else:
span.append(int(elt))
span = slice(*span)
selector = selector[i + 1:]
if selector:
raise ValueError("trailing characters in selector")
epath.append((attrs, types, span))
obj = object.__new__(cls)
obj._path = _path
obj._epath = epath
return obj
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._path)
def _get_ordered_args(self, expr):
"""Sort ``expr.args`` using printing order. """
if expr.is_Add:
return expr.as_ordered_terms()
elif expr.is_Mul:
return expr.as_ordered_factors()
else:
return expr.args
def _hasattrs(self, expr, attrs) -> bool:
"""Check if ``expr`` has any of ``attrs``. """
return all(hasattr(expr, attr) for attr in attrs)
def _hastypes(self, expr, types):
"""Check if ``expr`` is any of ``types``. """
_types = [ cls.__name__ for cls in expr.__class__.mro() ]
return bool(set(_types).intersection(types))
def _has(self, expr, attrs, types):
"""Apply ``_hasattrs`` and ``_hastypes`` to ``expr``. """
if not (attrs or types):
return True
if attrs and self._hasattrs(expr, attrs):
return True
if types and self._hastypes(expr, types):
return True
return False
def apply(self, expr, func, args=None, kwargs=None):
"""
Modify parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.apply(expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.apply(expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
def _apply(path, expr, func):
if not path:
return func(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
if not expr.is_Atom:
args, basic = self._get_ordered_args(expr), True
else:
return expr
elif hasattr(expr, '__iter__'):
args, basic = expr, False
else:
return expr
args = list(args)
if span is not None:
if isinstance(span, slice):
indices = range(*span.indices(len(args)))
else:
indices = [span]
else:
indices = range(len(args))
for i in indices:
try:
arg = args[i]
except IndexError:
continue
if self._has(arg, attrs, types):
args[i] = _apply(path, arg, func)
if basic:
return expr.func(*args)
else:
return expr.__class__(args)
_args, _kwargs = args or (), kwargs or {}
_func = lambda expr: func(expr, *_args, **_kwargs)
return _apply(self._epath, expr, _func)
def select(self, expr):
"""
Retrieve parts of an expression selected by a path.
Examples
========
>>> from sympy.simplify.epathtools import EPath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = EPath("/*/[0]/Symbol")
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> path.select(expr)
[x, y]
>>> path = EPath("/*/*/Symbol")
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> path.select(expr)
[x, x, y]
"""
result = []
def _select(path, expr):
if not path:
result.append(expr)
else:
selector, path = path[0], path[1:]
attrs, types, span = selector
if isinstance(expr, Basic):
args = self._get_ordered_args(expr)
elif hasattr(expr, '__iter__'):
args = expr
else:
return
if span is not None:
if isinstance(span, slice):
args = args[span]
else:
try:
args = [args[span]]
except IndexError:
return
for arg in args:
if self._has(arg, attrs, types):
_select(path, arg)
_select(self._epath, expr)
return result
def epath(path, expr=None, func=None, args=None, kwargs=None):
r"""
Manipulate parts of an expression selected by a path.
Explanation
===========
This function allows to manipulate large nested expressions in single
line of code, utilizing techniques to those applied in XML processing
standards (e.g. XPath).
If ``func`` is ``None``, :func:`epath` retrieves elements selected by
the ``path``. Otherwise it applies ``func`` to each matching element.
Note that it is more efficient to create an EPath object and use the select
and apply methods of that object, since this will compile the path string
only once. This function should only be used as a convenient shortcut for
interactive use.
This is the supported syntax:
* select all: ``/*``
Equivalent of ``for arg in args:``.
* select slice: ``/[0]`` or ``/[1:5]`` or ``/[1:5:2]``
Supports standard Python's slice syntax.
* select by type: ``/list`` or ``/list|tuple``
Emulates ``isinstance()``.
* select by attribute: ``/__iter__?``
Emulates ``hasattr()``.
Parameters
==========
path : str | EPath
A path as a string or a compiled EPath.
expr : Basic | iterable
An expression or a container of expressions.
func : callable (optional)
A callable that will be applied to matching parts.
args : tuple (optional)
Additional positional arguments to ``func``.
kwargs : dict (optional)
Additional keyword arguments to ``func``.
Examples
========
>>> from sympy.simplify.epathtools import epath
>>> from sympy import sin, cos, E
>>> from sympy.abc import x, y, z, t
>>> path = "/*/[0]/Symbol"
>>> expr = [((x, 1), 2), ((3, y), z)]
>>> epath(path, expr)
[x, y]
>>> epath(path, expr, lambda expr: expr**2)
[((x**2, 1), 2), ((3, y**2), z)]
>>> path = "/*/*/Symbol"
>>> expr = t + sin(x + 1) + cos(x + y + E)
>>> epath(path, expr)
[x, x, y]
>>> epath(path, expr, lambda expr: 2*expr)
t + sin(2*x + 1) + cos(2*x + 2*y + E)
"""
_epath = EPath(path)
if expr is None:
return _epath
if func is None:
return _epath.select(expr)
else:
return _epath.apply(expr, func, args, kwargs)
| EPath |
python | sympy__sympy | sympy/functions/special/error_functions.py | {
"start": 66109,
"end": 71214
} | class ____(FresnelIntegral):
r"""
Fresnel integral S.
Explanation
===========
This function is defined by
.. math:: \operatorname{S}(z) = \int_0^z \sin{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnels
>>> from sympy.abc import z
Several special values are known:
>>> fresnels(0)
0
>>> fresnels(oo)
1/2
>>> fresnels(-oo)
-1/2
>>> fresnels(I*oo)
-I/2
>>> fresnels(-I*oo)
I/2
In general one can pull out factors of -1 and $i$ from the argument:
>>> fresnels(-z)
-fresnels(z)
>>> fresnels(I*z)
-I*fresnels(z)
The Fresnel S integral obeys the mirror symmetry
$\overline{S(z)} = S(\bar{z})$:
>>> from sympy import conjugate
>>> conjugate(fresnels(z))
fresnels(conjugate(z))
Differentiation with respect to $z$ is supported:
>>> from sympy import diff
>>> diff(fresnels(z), z)
sin(pi*z**2/2)
Defining the Fresnel functions via an integral:
>>> from sympy import integrate, pi, sin, expand_func
>>> integrate(sin(pi*z**2/2), z)
3*fresnels(z)*gamma(3/4)/(4*gamma(7/4))
>>> expand_func(integrate(sin(pi*z**2/2), z))
fresnels(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnels(2).evalf(30)
0.343415678363698242195300815958
>>> fresnels(-2*I).evalf(30)
0.343415678363698242195300815958*I
See Also
========
fresnelc: Fresnel cosine integral.
References
==========
.. [1] https://en.wikipedia.org/wiki/Fresnel_integral
.. [2] https://dlmf.nist.gov/7
.. [3] https://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] https://functions.wolfram.com/GammaBetaErf/FresnelS
.. [5] The converging factors for the fresnel integrals
by John W. Wrench Jr. and Vicki Alley
"""
_trigfunc = sin
_sign = -S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 1)/(8*n*(2*n + 1)*(4*n + 3))) * p
else:
return x**3 * (-x**4)**n * (S(2)**(-2*n - 1)*pi**(2*n + 1)) / ((4*n + 3)*factorial(2*n + 1))
def _eval_rewrite_as_erf(self, z, **kwargs):
return (S.One + I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z, **kwargs):
return pi*z**3/6 * hyper([Rational(3, 4)], [Rational(3, 2), Rational(7, 4)], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z, **kwargs):
return (pi*z**Rational(9, 4) / (sqrt(2)*(z**2)**Rational(3, 4)*(-z)**Rational(3, 4))
* meijerg([], [1], [Rational(3, 4)], [Rational(1, 4), 0], -pi**2*z**4/16))
def _eval_rewrite_as_Integral(self, z, **kwargs):
from sympy.integrals.integrals import Integral
t = Dummy(uniquely_named_symbol('t', [z]).name)
return Integral(sin(pi*t**2/2), (t, 0, z))
def _eval_as_leading_term(self, x, logx, cdir):
from sympy.series.order import Order
arg = self.args[0].as_leading_term(x, logx=logx, cdir=cdir)
arg0 = arg.subs(x, 0)
if arg0 is S.ComplexInfinity:
arg0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+')
if arg0.is_zero:
return pi*arg**3/6
elif arg0 in [S.Infinity, S.NegativeInfinity]:
s = 1 if arg0 is S.Infinity else -1
return s*S.Half + Order(x, x)
else:
return self.func(arg0)
def _eval_aseries(self, n, args0, x, logx):
from sympy.series.order import Order
point = args0[0]
# Expansion at oo and -oo
if point in [S.Infinity, -S.Infinity]:
z = self.args[0]
# expansion of S(x) = S1(x*sqrt(pi/2)), see reference[5] page 1-8
# as only real infinities are dealt with, sin and cos are O(1)
p = [S.NegativeOne**k * factorial(4*k + 1) /
(2**(2*k + 2) * z**(4*k + 3) * 2**(2*k)*factorial(2*k))
for k in range(0, n) if 4*k + 3 < n]
q = [1/(2*z)] + [S.NegativeOne**k * factorial(4*k - 1) /
(2**(2*k + 1) * z**(4*k + 1) * 2**(2*k - 1)*factorial(2*k - 1))
for k in range(1, n) if 4*k + 1 < n]
p = [-sqrt(2/pi)*t for t in p]
q = [-sqrt(2/pi)*t for t in q]
s = 1 if point is S.Infinity else -1
# The expansion at oo is 1/2 + some odd powers of z
# To get the expansion at -oo, replace z by -z and flip the sign
# The result -1/2 + the same odd powers of z as before.
return s*S.Half + (sin(z**2)*Add(*p) + cos(z**2)*Add(*q)
).subs(x, sqrt(2/pi)*x) + Order(1/z**n, x)
# All other points are not handled
return super()._eval_aseries(n, args0, x, logx)
| fresnels |
python | geekcomputers__Python | classicIndianCardMatch.py | {
"start": 305,
"end": 740
} | class ____:
def __init__(self, suit, rank):
if (suit in SUITS) and (rank in RANKS):
self.suit = suit
self.rank = rank
else:
self.suit = None
self.rank = None
print("Invalid card: ", suit, rank)
def __str__(self):
return self.suit + self.rank
def getRank(self):
return self.rank
def getSuit(self):
return self.suit
| card |
python | mlflow__mlflow | examples/crewai/tracing.py | {
"start": 4077,
"end": 5313
} | class ____:
def __init__(self, origin, cities, date_range, interests):
self.cities = cities
self.origin = origin
self.interests = interests
self.date_range = date_range
def run(self):
agents = TripAgents()
tasks = TripTasks()
city_selector_agent = agents.city_selection_agent()
local_expert_agent = agents.local_expert()
identify_task = tasks.identify_task(
city_selector_agent, self.origin, self.cities, self.interests, self.date_range
)
gather_task = tasks.gather_task(
local_expert_agent, self.origin, self.interests, self.date_range
)
crew = Crew(
agents=[city_selector_agent, local_expert_agent],
tasks=[identify_task, gather_task],
verbose=True,
memory=True,
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}},
)
result = crew.kickoff()
return result
trip_crew = TripCrew("California", "Tokyo", "Dec 12 - Dec 20", "sports")
result = trip_crew.run()
print("\n\n########################")
print("## Here is you Trip Plan")
print("########################\n")
print(result)
| TripCrew |
python | getlogbook__logbook | src/logbook/queues.py | {
"start": 10565,
"end": 12736
} | class ____(SubscriberBase):
"""A helper that acts as a message queue subscriber and will dispatch
received log records to the active handler setup. There are multiple ways
to use this class.
It can be used to receive log records from a queue::
subscriber = MessageQueueSubscriber("mongodb://localhost:27017/logging")
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = MessageQueueSubscriber("mongodb://localhost:27017/logging")
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = MessageQueueSubscriber("mongodb://localhost:27017/logging")
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
"""
def __init__(self, uri=None, queue="logging"):
try:
import kombu
except ImportError:
raise RuntimeError("The kombu library is required.")
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
self.queue.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout == 0:
try:
rv = self.queue.get(block=False)
except Exception:
return
else:
rv = self.queue.get(timeout=timeout)
log_record = rv.payload
rv.ack()
return LogRecord.from_dict(log_record)
RabbitMQSubscriber = MessageQueueSubscriber
| MessageQueueSubscriber |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 19803,
"end": 20156
} | class ____(AdaptivePool):
"""Adaptive two-dimensional downsampling using maximum for the target shape."""
def __init__(self, target_shape: int | Sequence[int]):
"""**Arguments:**
- `target_shape`: The target output shape.
"""
super().__init__(target_shape, num_spatial_dims=2, operation=jnp.max)
| AdaptiveMaxPool2d |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py | {
"start": 2595,
"end": 15648
} | class ____(BaseJobConfiguration):
"""
The configuration for the Cloud Run worker V2.
The schema for this class is used to populate the `job_body` section of the
default base job template.
"""
credentials: GcpCredentials = Field(
title="GCP Credentials",
default_factory=GcpCredentials,
description=(
"The GCP Credentials used to connect to Cloud Run. "
"If not provided credentials will be inferred from "
"the local environment."
),
)
env_from_secrets: Dict[str, SecretKeySelector] = Field(
default_factory=dict,
title="Environment Variables from Secrets",
description="Environment variables to set from GCP secrets when starting a flow run.",
)
prefect_api_key_secret: Optional[SecretKeySelector] = Field(
default=None,
title="Prefect API Key Secret",
description="The GCP secret to use for the Prefect API key. When provided, the secret will be used instead of the PREFECT_API_KEY environment variable.",
)
prefect_api_auth_string_secret: Optional[SecretKeySelector] = Field(
default=None,
title="Prefect API Auth String Secret",
description="The GCP secret to use for the Prefect API auth string. When provided, the secret will be used instead of the PREFECT_API_AUTH_STRING environment variable.",
)
cloudsql_instances: Optional[List[str]] = Field(
default_factory=list,
title="Cloud SQL Instances",
description="List of Cloud SQL instance connection names to connect to. Format: {project}:{location}:{instance}",
)
job_body: Dict[str, Any] = Field(
json_schema_extra=dict(template=_get_default_job_body_template()),
)
keep_job: bool = Field(
default=False,
title="Keep Job After Completion",
description="Keep the completed Cloud run job on Google Cloud Platform.",
)
region: str = Field(
default="us-central1",
description="The region in which to run the Cloud Run job",
)
timeout: int = Field(
default=600,
gt=0,
le=604800,
description=(
"Max allowed duration the Job may be active before Cloud Run will "
"actively try to mark it failed and kill associated containers (maximum of 604800 seconds, 7 days)."
),
)
_job_name: str = PrivateAttr(default=None)
@property
def project(self) -> str:
"""
Returns the GCP project associated with the credentials.
Returns:
str: The GCP project associated with the credentials.
"""
return self.credentials.project
@property
def job_name(self) -> str:
"""
Returns the name of the job.
Returns:
str: The name of the job.
"""
if self._job_name is None:
base_job_name = slugify_name(self.name)
job_name = f"{base_job_name}-{uuid4().hex}"
self._job_name = job_name
return self._job_name
def _get_flow_run_logger(
self,
flow_run: "FlowRun",
work_pool: "WorkPool | None" = None,
worker_name: str | None = None,
) -> PrefectLogAdapter:
extra = {
"work_pool_name": (work_pool.name if work_pool else "<unknown>"),
"worker_name": worker_name if worker_name else "<unknown>",
"work_pool_id": str(work_pool.id if work_pool else "unknown"),
}
return flow_run_logger(flow_run=flow_run).getChild(
"worker",
extra=extra,
)
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: Optional["DeploymentResponse"] = None,
flow: Optional["Flow"] = None,
work_pool: Optional["WorkPool"] = None,
worker_name: Optional[str] = None,
):
"""
Prepares the job configuration for a flow run.
Ensures that necessary values are present in the job body and that the
job body is valid.
Args:
flow_run: The flow run to prepare the job configuration for
deployment: The deployment associated with the flow run used for
preparation.
flow: The flow associated with the flow run used for preparation.
work_pool: The work pool associated with the flow run used for preparation.
worker_name: The worker name associated with the flow run used for preparation.
"""
super().prepare_for_flow_run(
flow_run=flow_run,
deployment=deployment,
flow=flow,
work_pool=work_pool,
worker_name=worker_name,
)
self._populate_env()
self._warn_about_plaintext_credentials(
flow_run=flow_run,
worker_name=worker_name,
work_pool=work_pool,
)
self._configure_cloudsql_volumes()
self._populate_or_format_command()
self._format_args_if_present()
self._populate_image_if_not_present()
self._populate_timeout()
self._remove_vpc_access_if_unset()
def _populate_timeout(self):
"""
Populates the job body with the timeout.
"""
self.job_body["template"]["template"]["timeout"] = f"{self.timeout}s"
def _warn_about_plaintext_credentials(
self,
flow_run: "FlowRun",
worker_name: str | None = None,
work_pool: "WorkPool | None" = None,
):
"""
Warns about plaintext credentials when no secrets are configured.
"""
if (
"PREFECT_API_KEY" in self.env
and not self.prefect_api_key_secret
and "PREFECT_API_KEY" not in self.env_from_secrets
):
self._get_flow_run_logger(
flow_run=flow_run,
worker_name=worker_name,
work_pool=work_pool,
).warning(
"PREFECT_API_KEY is provided as a plaintext environment variable. "
"For better security, consider providing it as a secret using "
"'prefect_api_key_secret' or 'env_from_secrets' in your base job template."
)
if (
"PREFECT_API_AUTH_STRING" in self.env
and not self.prefect_api_auth_string_secret
and "PREFECT_API_AUTH_STRING" not in self.env_from_secrets
):
self._get_flow_run_logger(
flow_run=flow_run,
worker_name=worker_name,
work_pool=work_pool,
).warning(
"PREFECT_API_AUTH_STRING is provided as a plaintext environment variable. "
"For better security, consider providing it as a secret using "
"'prefect_api_auth_string_secret' or 'env_from_secrets' in your base job template."
)
def _populate_env(self):
"""
Populates the job body with environment variables.
"""
# Filter out plaintext Prefect API key/auth string if secrets are configured
filtered_env = {}
for k, v in self.env.items():
if k == "PREFECT_API_KEY" and (
self.prefect_api_key_secret
or "PREFECT_API_KEY" in self.env_from_secrets
):
continue # Skip plaintext API key if secret is configured
if k == "PREFECT_API_AUTH_STRING" and (
self.prefect_api_auth_string_secret
or "PREFECT_API_AUTH_STRING" in self.env_from_secrets
):
continue # Skip plaintext auth string if secret is configured
filtered_env[k] = v
envs = [{"name": k, "value": v} for k, v in filtered_env.items()]
envs_from_secrets = [
{
"name": k,
"valueSource": {"secretKeyRef": v.model_dump()},
}
for k, v in self.env_from_secrets.items()
]
envs.extend(envs_from_secrets)
# Add Prefect API key from secret if configured
if self.prefect_api_key_secret:
envs.append(
{
"name": "PREFECT_API_KEY",
"valueSource": {
"secretKeyRef": self.prefect_api_key_secret.model_dump()
},
}
)
# Add Prefect API auth string from secret if configured
if self.prefect_api_auth_string_secret:
envs.append(
{
"name": "PREFECT_API_AUTH_STRING",
"valueSource": {
"secretKeyRef": self.prefect_api_auth_string_secret.model_dump()
},
}
)
self.job_body["template"]["template"]["containers"][0]["env"].extend(envs)
def _configure_cloudsql_volumes(self):
"""
Populates volumes and volume mounts for cloudsql instances
"""
if not self.cloudsql_instances:
return
template = self.job_body["template"]["template"]
containers = template["containers"]
if "volumes" not in template:
template["volumes"] = []
template["volumes"].append(
{
"name": "cloudsql",
"cloudSqlInstance": {"instances": self.cloudsql_instances},
}
)
if "volumeMounts" not in containers[0]:
containers[0]["volumeMounts"] = []
containers[0]["volumeMounts"].append(
{"name": "cloudsql", "mountPath": "/cloudsql"}
)
def _populate_image_if_not_present(self):
"""
Populates the job body with the image if not present.
"""
if "image" not in self.job_body["template"]["template"]["containers"][0]:
self.job_body["template"]["template"]["containers"][0]["image"] = (
f"docker.io/{get_prefect_image_name()}"
)
def _populate_or_format_command(self):
"""
Populates the job body with the command if not present.
"""
command = self.job_body["template"]["template"]["containers"][0].get("command")
if command is None:
self.job_body["template"]["template"]["containers"][0]["command"] = (
shlex.split(self._base_flow_run_command())
)
elif isinstance(command, str):
self.job_body["template"]["template"]["containers"][0]["command"] = (
shlex.split(command)
)
def _format_args_if_present(self):
"""
Formats the job body args if present.
"""
args = self.job_body["template"]["template"]["containers"][0].get("args")
if args is not None and isinstance(args, str):
self.job_body["template"]["template"]["containers"][0]["args"] = (
shlex.split(args)
)
def _remove_vpc_access_if_unset(self):
"""
Removes vpcAccess if unset.
"""
if "vpcAccess" not in self.job_body["template"]["template"]:
return
vpc_access = self.job_body["template"]["template"]["vpcAccess"]
# if vpcAccess is unset or connector is unset, remove the entire vpcAccess block
# otherwise leave the user provided value.
if not vpc_access or (
len(vpc_access) == 1
and "connector" in vpc_access
and vpc_access["connector"] is None
):
self.job_body["template"]["template"].pop("vpcAccess")
# noinspection PyMethodParameters
@field_validator("job_body")
@classmethod
def _ensure_job_includes_all_required_components(cls, value: Dict[str, Any]):
"""
Ensures that the job body includes all required components.
Args:
value: The job body to validate.
Returns:
The validated job body.
"""
patch = JsonPatch.from_diff(value, _get_base_job_body())
missing_paths = sorted([op["path"] for op in patch if op["op"] == "add"])
if missing_paths:
raise ValueError(
f"Job body is missing required components: {', '.join(missing_paths)}"
)
return value
# noinspection PyMethodParameters
@field_validator("job_body")
@classmethod
def _ensure_job_has_compatible_values(cls, value: Dict[str, Any]):
"""Ensure that the job body has compatible values."""
patch = JsonPatch.from_diff(value, _get_base_job_body())
incompatible = sorted(
[
f"{op['path']} must have value {op['value']!r}"
for op in patch
if op["op"] == "replace"
]
)
if incompatible:
raise ValueError(
"Job has incompatible values for the following attributes: "
f"{', '.join(incompatible)}"
)
return value
| CloudRunWorkerJobV2Configuration |
python | keras-team__keras | keras/src/legacy/layers.py | {
"start": 7406,
"end": 8396
} | class ____(Layer):
"""DEPRECATED."""
def __init__(self, theta=1.0, **kwargs):
super().__init__(**kwargs)
if theta is None:
raise ValueError(
"Theta of a Thresholded ReLU layer cannot be None, expecting a "
f"float. Received: {theta}"
)
if theta < 0:
raise ValueError(
"The theta value of a Thresholded ReLU layer "
f"should be >=0. Received: {theta}"
)
self.supports_masking = True
self.theta = tf.convert_to_tensor(theta, dtype=self.compute_dtype)
def call(self, inputs):
dtype = self.compute_dtype
return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)
def get_config(self):
config = {"theta": float(self.theta)}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
| ThresholdedReLU |
python | pytorch__pytorch | torch/_subclasses/functional_tensor.py | {
"start": 31991,
"end": 32788
} | class ____(ABC):
@abstractmethod
def wrap_tensors(self, args: tuple[Any]) -> tuple[Any]:
pass
@abstractmethod
def unwrap_tensors(
self, args: Union[torch.Tensor, tuple[torch.Tensor, ...]]
) -> Any:
pass
@abstractmethod
def functionalize(self, inner_f: Callable) -> Callable:
pass
@abstractmethod
def redispatch_to_next(self) -> AbstractContextManager:
pass
@abstractmethod
def replace(self, input_tensor, output_tensor) -> None:
pass
@abstractmethod
def commit_update(self, tensor) -> None:
pass
@abstractmethod
def sync(self, tensor) -> None:
pass
@abstractmethod
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
pass
| BaseFunctionalizeAPI |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 11263,
"end": 13037
} | class ____(HandlerBase):
"""
Handler for `.Patch` instances.
"""
def __init__(self, patch_func=None, **kwargs):
"""
Parameters
----------
patch_func : callable, optional
The function that creates the legend key artist.
*patch_func* should have the signature::
def patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
Subsequently, the created artist will have its ``update_prop``
method called and the appropriate transform will be applied.
**kwargs
Keyword arguments forwarded to `.HandlerBase`.
"""
super().__init__(**kwargs)
self._patch_func = patch_func
def _create_patch(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize):
if self._patch_func is None:
p = Rectangle(xy=(-xdescent, -ydescent),
width=width, height=height)
else:
p = self._patch_func(legend=legend, orig_handle=orig_handle,
xdescent=xdescent, ydescent=ydescent,
width=width, height=height, fontsize=fontsize)
return p
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
# docstring inherited
p = self._create_patch(legend, orig_handle,
xdescent, ydescent, width, height, fontsize)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
| HandlerPatch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.