language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/fallbacks.py
|
{
"start": 1013,
"end": 24463
}
|
class ____(RunnableSerializable[Input, Output]):
"""`Runnable` that can fallback to other `Runnable`s if it fails.
External APIs (e.g., APIs for a language model) may at times experience
degraded performance or even downtime.
In these cases, it can be useful to have a fallback `Runnable` that can be
used in place of the original `Runnable` (e.g., fallback to another LLM provider).
Fallbacks can be defined at the level of a single `Runnable`, or at the level
of a chain of `Runnable`s. Fallbacks are tried in order until one succeeds or
all fail.
While you can instantiate a `RunnableWithFallbacks` directly, it is usually
more convenient to use the `with_fallbacks` method on a `Runnable`.
Example:
```python
from langchain_core.chat_models.openai import ChatOpenAI
from langchain_core.chat_models.anthropic import ChatAnthropic
model = ChatAnthropic(model="claude-3-haiku-20240307").with_fallbacks(
[ChatOpenAI(model="gpt-3.5-turbo-0125")]
)
# Will usually use ChatAnthropic, but fallback to ChatOpenAI
# if ChatAnthropic fails.
model.invoke("hello")
# And you can also use fallbacks at the level of a chain.
# Here if both LLM providers fail, we'll fallback to a good hardcoded
# response.
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parser import StrOutputParser
from langchain_core.runnables import RunnableLambda
def when_all_is_lost(inputs):
return (
"Looks like our LLM providers are down. "
"Here's a nice 🦜️ emoji for you instead."
)
chain_with_fallback = (
PromptTemplate.from_template("Tell me a joke about {topic}")
| model
| StrOutputParser()
).with_fallbacks([RunnableLambda(when_all_is_lost)])
```
"""
runnable: Runnable[Input, Output]
"""The `Runnable` to run first."""
fallbacks: Sequence[Runnable[Input, Output]]
"""A sequence of fallbacks to try."""
exceptions_to_handle: tuple[type[BaseException], ...] = (Exception,)
"""The exceptions on which fallbacks should be tried.
Any exception that is not a subclass of these exceptions will be raised immediately.
"""
exception_key: str | None = None
"""If `string` is specified then handled exceptions will be passed to fallbacks as
part of the input under the specified key.
If `None`, exceptions will not be passed to fallbacks.
If used, the base `Runnable` and its fallbacks must accept a dictionary as input.
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
@override
def InputType(self) -> type[Input]:
return self.runnable.InputType
@property
@override
def OutputType(self) -> type[Output]:
return self.runnable.OutputType
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
return self.runnable.get_input_schema(config)
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
return self.runnable.get_output_schema(config)
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in [self.runnable, *self.fallbacks]
for spec in step.config_specs
)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
@property
def runnables(self) -> Iterator[Runnable[Input, Output]]:
"""Iterator over the `Runnable` and its fallbacks.
Yields:
The `Runnable` then its fallbacks.
"""
yield self.runnable
yield from self.fallbacks
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
if self.exception_key is not None and not isinstance(input, dict):
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
last_error = None
for runnable in self.runnables:
try:
if self.exception_key and last_error is not None:
input[self.exception_key] = last_error # type: ignore[index]
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
output = context.run(
runnable.invoke,
input,
config,
**kwargs,
)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
last_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(output)
return output
if first_error is None:
msg = "No error stored at end of fallbacks."
raise ValueError(msg)
run_manager.on_chain_error(first_error)
raise first_error
@override
async def ainvoke(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Output:
if self.exception_key is not None and not isinstance(input, dict):
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
last_error = None
for runnable in self.runnables:
try:
if self.exception_key and last_error is not None:
input[self.exception_key] = last_error # type: ignore[index]
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
coro = context.run(runnable.ainvoke, input, config, **kwargs)
output = await coro_with_context(coro, context)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
last_error = e
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(output)
return output
if first_error is None:
msg = "No error stored at end of fallbacks."
raise ValueError(msg)
await run_manager.on_chain_error(first_error)
raise first_error
@override
def batch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
if self.exception_key is not None and not all(
isinstance(input_, dict) for input_ in inputs
):
msg = (
"If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input"
)
raise ValueError(msg)
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
None,
input_ if isinstance(input_, dict) else {"input": input_},
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input_, config in zip(
callback_managers, inputs, configs, strict=False
)
]
to_return: dict[int, Any] = {}
run_again = dict(enumerate(inputs))
handled_exceptions: dict[int, BaseException] = {}
first_to_raise = None
for runnable in self.runnables:
outputs = runnable.batch(
[input_ for _, input_ in sorted(run_again.items())],
[
# each step a child run of the corresponding root run
patch_config(configs[i], callbacks=run_managers[i].get_child())
for i in sorted(run_again)
],
return_exceptions=True,
**kwargs,
)
for (i, input_), output in zip(
sorted(run_again.copy().items()), outputs, strict=False
):
if isinstance(output, BaseException) and not isinstance(
output, self.exceptions_to_handle
):
if not return_exceptions:
first_to_raise = first_to_raise or output
else:
handled_exceptions[i] = output
run_again.pop(i)
elif isinstance(output, self.exceptions_to_handle):
if self.exception_key:
input_[self.exception_key] = output # type: ignore[index]
handled_exceptions[i] = output
else:
run_managers[i].on_chain_end(output)
to_return[i] = output
run_again.pop(i)
handled_exceptions.pop(i, None)
if first_to_raise:
raise first_to_raise
if not run_again:
break
sorted_handled_exceptions = sorted(handled_exceptions.items())
for i, error in sorted_handled_exceptions:
run_managers[i].on_chain_error(error)
if not return_exceptions and sorted_handled_exceptions:
raise sorted_handled_exceptions[0][1]
to_return.update(handled_exceptions)
return [output for _, output in sorted(to_return.items())]
@override
async def abatch(
self,
inputs: list[Input],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Output]:
if self.exception_key is not None and not all(
isinstance(input_, dict) for input_ in inputs
):
msg = (
"If 'exception_key' is specified then inputs must be dictionaries."
f"However found a type of {type(inputs[0])} for input"
)
raise ValueError(msg)
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: list[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
None,
input_,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
for cm, input_, config in zip(
callback_managers, inputs, configs, strict=False
)
)
)
to_return: dict[int, Output | BaseException] = {}
run_again = dict(enumerate(inputs))
handled_exceptions: dict[int, BaseException] = {}
first_to_raise = None
for runnable in self.runnables:
outputs = await runnable.abatch(
[input_ for _, input_ in sorted(run_again.items())],
[
# each step a child run of the corresponding root run
patch_config(configs[i], callbacks=run_managers[i].get_child())
for i in sorted(run_again)
],
return_exceptions=True,
**kwargs,
)
for (i, input_), output in zip(
sorted(run_again.copy().items()), outputs, strict=False
):
if isinstance(output, BaseException) and not isinstance(
output, self.exceptions_to_handle
):
if not return_exceptions:
first_to_raise = first_to_raise or output
else:
handled_exceptions[i] = output
run_again.pop(i)
elif isinstance(output, self.exceptions_to_handle):
if self.exception_key:
input_[self.exception_key] = output # type: ignore[index]
handled_exceptions[i] = output
else:
to_return[i] = output
await run_managers[i].on_chain_end(output)
run_again.pop(i)
handled_exceptions.pop(i, None)
if first_to_raise:
raise first_to_raise
if not run_again:
break
sorted_handled_exceptions = sorted(handled_exceptions.items())
await asyncio.gather(
*(
run_managers[i].on_chain_error(error)
for i, error in sorted_handled_exceptions
)
)
if not return_exceptions and sorted_handled_exceptions:
raise sorted_handled_exceptions[0][1]
to_return.update(handled_exceptions)
return [cast("Output", output) for _, output in sorted(to_return.items())]
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
if self.exception_key is not None and not isinstance(input, dict):
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
last_error = None
for runnable in self.runnables:
try:
if self.exception_key and last_error is not None:
input[self.exception_key] = last_error # type: ignore[index]
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
stream = context.run(
runnable.stream,
input,
**kwargs,
)
chunk: Output = context.run(next, stream)
except self.exceptions_to_handle as e:
first_error = e if first_error is None else first_error
last_error = e
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
first_error = None
break
if first_error:
run_manager.on_chain_error(first_error)
raise first_error
yield chunk
output: Output | None = chunk
try:
for chunk in stream:
yield chunk
try:
output = output + chunk # type: ignore[operator]
except TypeError:
output = None
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(output)
@override
async def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
if self.exception_key is not None and not isinstance(input, dict):
msg = (
"If 'exception_key' is specified then input must be a dictionary."
f"However found a type of {type(input)} for input"
)
raise ValueError(msg)
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
first_error = None
last_error = None
for runnable in self.runnables:
try:
if self.exception_key and last_error is not None:
input[self.exception_key] = last_error # type: ignore[index]
child_config = patch_config(config, callbacks=run_manager.get_child())
with set_config_context(child_config) as context:
stream = runnable.astream(
input,
child_config,
**kwargs,
)
chunk = await coro_with_context(py_anext(stream), context)
except self.exceptions_to_handle as e:
first_error = e if first_error is None else first_error
last_error = e
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
first_error = None
break
if first_error:
await run_manager.on_chain_error(first_error)
raise first_error
yield chunk
output: Output | None = chunk
try:
async for chunk in stream:
yield chunk
try:
output = output + chunk # type: ignore[operator]
except TypeError:
output = None
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(output)
def __getattr__(self, name: str) -> Any:
"""Get an attribute from the wrapped `Runnable` and its fallbacks.
Returns:
If the attribute is anything other than a method that outputs a `Runnable`,
returns `getattr(self.runnable, name)`. If the attribute is a method that
does return a new `Runnable` (e.g. `model.bind_tools([...])` outputs a new
`RunnableBinding`) then `self.runnable` and each of the runnables in
`self.fallbacks` is replaced with `getattr(x, name)`.
Example:
```python
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
gpt_4o = ChatOpenAI(model="gpt-4o")
claude_3_sonnet = ChatAnthropic(model="claude-sonnet-4-5-20250929")
model = gpt_4o.with_fallbacks([claude_3_sonnet])
model.model_name
# -> "gpt-4o"
# .bind_tools() is called on both ChatOpenAI and ChatAnthropic
# Equivalent to:
# gpt_4o.bind_tools([...]).with_fallbacks([claude_3_sonnet.bind_tools([...])])
model.bind_tools([...])
# -> RunnableWithFallbacks(
runnable=RunnableBinding(bound=ChatOpenAI(...), kwargs={"tools": [...]}),
fallbacks=[RunnableBinding(bound=ChatAnthropic(...), kwargs={"tools": [...]})],
)
```
""" # noqa: E501
attr = getattr(self.runnable, name)
if _returns_runnable(attr):
@wraps(attr)
def wrapped(*args: Any, **kwargs: Any) -> Any:
new_runnable = attr(*args, **kwargs)
new_fallbacks = []
for fallback in self.fallbacks:
fallback_attr = getattr(fallback, name)
new_fallbacks.append(fallback_attr(*args, **kwargs))
return self.__class__(
**{
**self.model_dump(),
"runnable": new_runnable,
"fallbacks": new_fallbacks,
}
)
return wrapped
return attr
def _returns_runnable(attr: Any) -> bool:
if not callable(attr):
return False
return_type = typing.get_type_hints(attr).get("return")
return bool(return_type and _is_runnable_type(return_type))
def _is_runnable_type(type_: Any) -> bool:
if inspect.isclass(type_):
return issubclass(type_, Runnable)
origin = getattr(type_, "__origin__", None)
if inspect.isclass(origin):
return issubclass(origin, Runnable)
if origin is typing.Union:
return all(_is_runnable_type(t) for t in type_.__args__)
return False
|
RunnableWithFallbacks
|
python
|
oauthlib__oauthlib
|
tests/openid/connect/core/test_server.py
|
{
"start": 609,
"end": 5198
}
|
class ____(TestCase):
def setUp(self):
self.mock_validator = mock.MagicMock()
self.mock_validator.get_code_challenge.return_value = None
self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
auth_code = AuthorizationCodeGrant(request_validator=self.mock_validator)
auth_code.save_authorization_code = mock.MagicMock()
implicit = ImplicitGrant(
request_validator=self.mock_validator)
implicit.save_token = mock.MagicMock()
hybrid = HybridGrant(self.mock_validator)
response_types = {
'code': auth_code,
'token': implicit,
'id_token': implicit,
'id_token token': implicit,
'code token': hybrid,
'code id_token': hybrid,
'code token id_token': hybrid,
'none': auth_code
}
self.expires_in = 1800
token = BearerToken(
self.mock_validator,
expires_in=self.expires_in
)
self.endpoint = AuthorizationEndpoint(
default_response_type='code',
default_token_type=token,
response_types=response_types
)
# TODO: Add hybrid grant test
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_authorization_grant(self):
uri = 'http://i.b/l?response_type=code&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?code=abc&state=xyz')
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_implicit_grant(self):
uri = 'http://i.b/l?response_type=token&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me#access_token=abc&expires_in=' + str(self.expires_in) + '&token_type=Bearer&state=xyz&scope=all+of+them', parse_fragment=True)
def test_none_grant(self):
uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?state=xyz', parse_fragment=True)
self.assertIsNone(body)
self.assertEqual(status_code, 302)
# and without the state parameter
uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me', parse_fragment=True)
self.assertIsNone(body)
self.assertEqual(status_code, 302)
def test_missing_type(self):
uri = 'http://i.b/l?client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
self.mock_validator.validate_request = mock.MagicMock(
side_effect=errors.InvalidRequestError())
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?error=invalid_request&error_description=Missing+response_type+parameter.')
def test_invalid_type(self):
uri = 'http://i.b/l?response_type=invalid&client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
self.mock_validator.validate_request = mock.MagicMock(
side_effect=errors.UnsupportedResponseTypeError())
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?error=unsupported_response_type')
|
AuthorizationEndpointTest
|
python
|
patrick-kidger__equinox
|
equinox/_module/_module.py
|
{
"start": 3468,
"end": 3743
}
|
class ____(eqx.Module):
linear: Callable
def __init__(self, ...):
self.linear = eqx.nn.Linear(...)
def __call__(self, ...):
... = jax.vmap(self.linear)(...)
```
or by using `eqx.filter_vmap` instead (which *does* return a PyTree):
```python
|
MyModule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/origin_info.py
|
{
"start": 1675,
"end": 5152
}
|
class ____(
collections.namedtuple(
'OriginInfo',
('loc', 'function_name', 'source_code_line', 'comment'))):
"""Container for information about the source code before conversion.
Attributes:
loc: Location
function_name: Optional[Text]
source_code_line: Text
comment: Optional[Text]
"""
def as_frame(self):
"""Returns a 4-tuple consistent with the return of traceback.extract_tb."""
return (self.loc.filename, self.loc.lineno, self.function_name,
self.source_code_line)
def __repr__(self):
if self.loc.filename:
return '{}:{}:{}'.format(
os.path.split(self.loc.filename)[1], self.loc.lineno,
self.loc.col_offset)
return '<no file>:{}:{}'.format(self.loc.lineno, self.loc.col_offset)
# TODO(mdan): This source map should be a class - easier to refer to.
def create_source_map(nodes, code, filepath):
"""Creates a source map between an annotated AST and the code it compiles to.
Note: this function assumes nodes nodes, code and filepath correspond to the
same code.
Args:
nodes: Iterable[ast.AST, ...], one or more AST modes.
code: Text, the source code in which nodes are found.
filepath: Text
Returns:
Dict[LineLocation, OriginInfo], mapping locations in code to locations
indicated by origin annotations in node.
"""
reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False)
for node in reparsed_nodes:
resolve(node, code, filepath, node.lineno, node.col_offset)
source_map = {}
try:
for before, after in ast_util.parallel_walk(nodes, reparsed_nodes):
# Note: generated code might not be mapped back to its origin.
# TODO(mdan): Generated code should always be mapped to something.
origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None)
final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None)
if origin_info is None or final_info is None:
continue
# Note: the keys are by line only, excluding the column offset.
line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno)
existing_origin = source_map.get(line_loc)
if existing_origin is not None:
# Overlaps may exist because of child nodes, but almost never to
# different line locations. Exception make decorated functions, where
# both lines are mapped to the same line in the AST.
# Line overlaps: keep bottom node.
if existing_origin.loc.line_loc == origin_info.loc.line_loc:
if existing_origin.loc.lineno >= origin_info.loc.lineno:
continue
# In case of column overlaps, keep the leftmost node.
if existing_origin.loc.col_offset <= origin_info.loc.col_offset:
continue
source_map[line_loc] = origin_info
except ValueError as err:
new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \n'
new_msg += str(err)
new_msg += 'Diff:\n'
for n, rn in zip(nodes, reparsed_nodes):
nodes_str = pretty_printer.fmt(n, color=False, noanno=True)
reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True)
diff = difflib.context_diff(
nodes_str.split('\n'),
reparsed_nodes_str.split('\n'),
fromfile='Original nodes',
tofile='Reparsed nodes',
n=7)
diff = '\n'.join(diff)
new_msg += diff + '\n'
raise ValueError(new_msg)
return source_map
|
OriginInfo
|
python
|
sphinx-doc__sphinx
|
sphinx/ext/autosummary/generate.py
|
{
"start": 3192,
"end": 5834
}
|
class ____:
"""A helper class for rendering."""
def __init__(self, app: Sphinx) -> None:
if isinstance(app, Builder):
msg = 'Expected a Sphinx application object!'
raise TypeError(msg)
system_templates_path = [
package_dir.joinpath('ext', 'autosummary', 'templates')
]
loader = SphinxTemplateLoader(
app.srcdir, app.config.templates_path, system_templates_path
)
self.env = SandboxedEnvironment(loader=loader)
self.env.filters['escape'] = rst.escape
self.env.filters['e'] = rst.escape
self.env.filters['underline'] = _underline
if app.translator:
self.env.add_extension('jinja2.ext.i18n')
# ``install_gettext_translations`` is injected by the ``jinja2.ext.i18n`` extension
self.env.install_gettext_translations(app.translator) # type: ignore[attr-defined]
def render(self, template_name: str, context: dict[str, Any]) -> str:
"""Render a template file."""
try:
template = self.env.get_template(template_name)
except TemplateNotFound:
try:
# objtype is given as template_name
template = self.env.get_template('autosummary/%s.rst' % template_name)
except TemplateNotFound:
# fallback to base.rst
template = self.env.get_template('autosummary/base.rst')
return template.render(context)
def _split_full_qualified_name(name: str) -> tuple[str | None, str]:
"""Split full qualified name to a pair of modname and qualname.
A qualname is an abbreviation for "Qualified name" introduced at PEP-3155
(https://peps.python.org/pep-3155/). It is a dotted path name
from the module top-level.
A "full" qualified name means a string containing both module name and
qualified name.
.. note:: This function actually imports the module to check its existence.
Therefore you need to mock 3rd party modules if needed before
calling this function.
"""
parts = name.split('.')
for i, _part in enumerate(parts, 1):
try:
modname = '.'.join(parts[:i])
importlib.import_module(modname)
except ImportError:
if parts[: i - 1]:
return '.'.join(parts[: i - 1]), '.'.join(parts[i - 1 :])
else:
return None, '.'.join(parts)
except IndexError:
pass
return name, ''
# -- Generating output ---------------------------------------------------------
|
AutosummaryRenderer
|
python
|
google__pytype
|
pytype/blocks/process_blocks.py
|
{
"start": 774,
"end": 3517
}
|
class ____(pyc.CodeVisitor):
"""Collect opcodes that might have annotations attached."""
def __init__(self):
super().__init__()
# A mutable map of line: opcode for STORE_* opcodes. This is modified as the
# visitor runs, and contains the last opcode for each line.
self.store_ops = {}
# A mutable map of start: (end, opcode) for MAKE_FUNCTION opcodes. This is
# modified as the visitor runs, and contains the range of lines that could
# contain function type comments.
self.make_function_ops = {}
def visit_code(self, code):
"""Find STORE_* and MAKE_FUNCTION opcodes for attaching annotations."""
# Offset between function code and MAKE_FUNCTION
# [LOAD_CONST <code>, LOAD_CONST <function name>, MAKE_FUNCTION]
# In 3.11, the LOAD_CONST <function name> opcode is removed.
offset = 1 if code.python_version >= (3, 11) else 2
co_code = list(code.code_iter)
for i, op in enumerate(co_code):
if isinstance(op, opcodes.MAKE_FUNCTION):
code_op = co_code[i - offset]
assert isinstance(code_op, CODE_LOADING_OPCODES), code_op.__class__
fn_code = code.consts[code_op.arg]
if not _is_function_def(fn_code):
continue
# First line of code in body.
end_line = min(
op.line
for op in fn_code.code_iter
if not isinstance(op, opcodes.RESUME)
)
self.make_function_ops[op.line] = (end_line, op)
elif (
isinstance(op, blocks.STORE_OPCODES)
and op.line not in self.make_function_ops
):
# For type comments attached to multi-opcode lines, we want to mark the
# latest 'store' opcode and attach the type comment to it.
# Except that in 3.12+ list/dict/set comprehensions are inlined and end
# with a STORE_FAST opcode, which set the iteration variable to NULL.
# E.g. `foo = [x for x in y]` ends with:
# END_FOR
# STORE_FAST foo
# STORE_FAST x
# In this case we want to attach the comment to the 2nd to last opcode.
#
# Brittleness alert:
# Taking the last opcode in a line is possibly confusing, e.g. for:
# a = ''; b = 1 # type: int
# Matching comprehensions based on the 3 opcodes could probably also
# fail. Feel free to adjust as necessary.
if (
code.python_version >= (3, 12)
and i >= 2
and isinstance(op, opcodes.STORE_FAST)
and isinstance(co_code[i - 1], opcodes.STORE_FAST)
and isinstance(co_code[i - 2], opcodes.END_FOR)
):
continue
self.store_ops[op.line] = op
return code
|
CollectAnnotationTargetsVisitor
|
python
|
PyCQA__pylint
|
pylint/pyreverse/inspector.py
|
{
"start": 12078,
"end": 14205
}
|
class ____(AbstractRelationshipHandler):
"""Handle composition relationships where parent creates child objects."""
def handle(
self, node: nodes.AssignAttr | nodes.AssignName, parent: nodes.ClassDef
) -> None:
# If the node is not part of an assignment, pass to next handler
if not isinstance(node.parent, (nodes.AnnAssign, nodes.Assign)):
super().handle(node, parent)
return
value = node.parent.value
# Extract the name to handle both AssignAttr and AssignName nodes
name = node.attrname if isinstance(node, nodes.AssignAttr) else node.name
# Composition: direct object creation (self.x = P())
if isinstance(value, nodes.Call):
inferred_types = utils.infer_node(node)
element_types = extract_element_types(inferred_types)
# Resolve nodes to actual class definitions
resolved_types = resolve_to_class_def(element_types)
current = set(parent.compositions_type[name])
parent.compositions_type[name] = list(current | resolved_types)
return
# Composition: comprehensions with object creation (self.x = [P() for ...])
if isinstance(
value, (nodes.ListComp, nodes.DictComp, nodes.SetComp, nodes.GeneratorExp)
):
if isinstance(value, nodes.DictComp):
element = value.value
else:
element = value.elt
# If the element is a Call (object creation), it's composition
if isinstance(element, nodes.Call):
inferred_types = utils.infer_node(node)
element_types = extract_element_types(inferred_types)
# Resolve nodes to actual class definitions
resolved_types = resolve_to_class_def(element_types)
current = set(parent.compositions_type[name])
parent.compositions_type[name] = list(current | resolved_types)
return
# Not a composition, pass to next handler
super().handle(node, parent)
|
CompositionsHandler
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 360806,
"end": 361135
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("IssueTimelineItem", graphql_name="node")
|
IssueTimelineItemEdge
|
python
|
python-attrs__attrs
|
typing-examples/mypy.py
|
{
"start": 523,
"end": 587
}
|
class ____:
a: int = attr.ib()
cc = CC(1)
CC(a=1)
@attr.s
|
CC
|
python
|
openai__openai-python
|
src/openai/types/vector_store_search_response.py
|
{
"start": 259,
"end": 408
}
|
class ____(BaseModel):
text: str
"""The text content returned from search."""
type: Literal["text"]
"""The type of content."""
|
Content
|
python
|
pydantic__pydantic
|
pydantic/v1/errors.py
|
{
"start": 6530,
"end": 6663
}
|
class ____(_PathValueError):
code = 'path.not_a_file'
msg_template = 'path "{path}" does not point to a file'
|
PathNotAFileError
|
python
|
django__django
|
tests/admin_views/test_forms.py
|
{
"start": 948,
"end": 1575
}
|
class ____(SimpleTestCase):
def test_repr(self):
fieldsets = (
(
"My fields",
{
"classes": ["collapse"],
"fields": ("url", "title", "content", "sites"),
},
),
)
form = ArticleForm()
admin_form = AdminForm(form, fieldsets, {})
self.assertEqual(
repr(admin_form),
"<AdminForm: form=ArticleForm fieldsets=(('My fields', "
"{'classes': ['collapse'], "
"'fields': ('url', 'title', 'content', 'sites')}),)>",
)
|
AdminFormTests
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterternary/_textfont.py
|
{
"start": 233,
"end": 17139
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.textfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Textfont
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_annotations/allow_nested_overload.py
|
{
"start": 0,
"end": 146
}
|
class ____:
from typing import overload
@overload
def f(self, x: int, y: int) -> None:
...
def f(self, x, y):
pass
|
C
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 139421,
"end": 143369
}
|
class ____(TestCase):
@staticmethod
def _normalize_partition(p):
"""
Return a normalized, hashable, version of a partition using
_FrozenMultiset
"""
return _FrozenMultiset(_FrozenMultiset(g) for g in p)
@staticmethod
def _normalize_partitions(ps):
"""
Return a normalized set of all normalized partitions using
_FrozenMultiset
"""
return _FrozenMultiset(
SetPartitionsTests._normalize_partition(p) for p in ps
)
def test_repeated(self):
it = 'aaa'
actual = mi.set_partitions(it, 2)
expected = [['a', 'aa'], ['a', 'aa'], ['a', 'aa']]
self.assertEqual(
self._normalize_partitions(expected),
self._normalize_partitions(actual),
)
def test_each_correct(self):
a = set(range(6))
for p in mi.set_partitions(a):
total = {e for g in p for e in g}
self.assertEqual(a, total)
def test_duplicates(self):
a = set(range(6))
for p in mi.set_partitions(a):
self.assertFalse(self._normalize_partition(p).has_duplicates())
def test_found_all(self):
"""small example, hand-checked"""
expected = [
[[0], [1], [2, 3, 4]],
[[0], [1, 2], [3, 4]],
[[0], [2], [1, 3, 4]],
[[0], [3], [1, 2, 4]],
[[0], [4], [1, 2, 3]],
[[0], [1, 3], [2, 4]],
[[0], [1, 4], [2, 3]],
[[1], [2], [0, 3, 4]],
[[1], [3], [0, 2, 4]],
[[1], [4], [0, 2, 3]],
[[1], [0, 2], [3, 4]],
[[1], [0, 3], [2, 4]],
[[1], [0, 4], [2, 3]],
[[2], [3], [0, 1, 4]],
[[2], [4], [0, 1, 3]],
[[2], [0, 1], [3, 4]],
[[2], [0, 3], [1, 4]],
[[2], [0, 4], [1, 3]],
[[3], [4], [0, 1, 2]],
[[3], [0, 1], [2, 4]],
[[3], [0, 2], [1, 4]],
[[3], [0, 4], [1, 2]],
[[4], [0, 1], [2, 3]],
[[4], [0, 2], [1, 3]],
[[4], [0, 3], [1, 2]],
]
actual = mi.set_partitions(range(5), 3)
self.assertEqual(
self._normalize_partitions(expected),
self._normalize_partitions(actual),
)
def test_stirling_numbers(self):
"""Check against https://en.wikipedia.org/wiki/
Stirling_numbers_of_the_second_kind#Table_of_values"""
cardinality_by_k_by_n = [
[1],
[1, 1],
[1, 3, 1],
[1, 7, 6, 1],
[1, 15, 25, 10, 1],
[1, 31, 90, 65, 15, 1],
]
for n, cardinality_by_k in enumerate(cardinality_by_k_by_n, 1):
for k, cardinality in enumerate(cardinality_by_k, 1):
self.assertEqual(
cardinality, len(list(mi.set_partitions(range(n), k)))
)
def test_no_group(self):
def helper():
list(mi.set_partitions(range(4), -1))
self.assertRaises(ValueError, helper)
def test_to_many_groups(self):
self.assertEqual([], list(mi.set_partitions(range(4), 5)))
def test_min_size(self):
it = 'abc'
actual = mi.set_partitions(it, min_size=2)
expected = [['abc']]
self.assertEqual(
self._normalize_partitions(expected),
self._normalize_partitions(actual),
)
def test_max_size(self):
it = 'abc'
actual = mi.set_partitions(it, max_size=2)
expected = [['a', 'bc'], ['ab', 'c'], ['b', 'ac'], ['a', 'b', 'c']]
self.assertEqual(
self._normalize_partitions(expected),
self._normalize_partitions(actual),
)
def test_min_max(self):
it = 'abcdefg'
self.assertEqual(
list(mi.set_partitions(it, min_size=4, max_size=3)), []
)
|
SetPartitionsTests
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/tasks/common.py
|
{
"start": 6224,
"end": 6695
}
|
class ____:
"""
Represents the total and indexed number of transactions received by an organization
(in a particular interval of time).
"""
# organization id
org_id: int
# total number of transactions
total: int
# number of transactions indexed (i.e. stored)
indexed: int | None
def is_valid_for_recalibration(self) -> bool:
return self.total > 0 and self.indexed is not None and self.indexed > 0
|
OrganizationDataVolume
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/__init__.py
|
{
"start": 7140,
"end": 11354
}
|
class ____(HubspotTestCase):
def _ms(self, dt) -> int:
return int(dt.timestamp() * 1000)
def request(self, page_token: Optional[Dict[str, str]] = None):
start = self.start_date()
end = self.now()
builder = (
CRMSearchRequestBuilder()
.for_entity(self.OBJECT_TYPE)
.with_properties(list(self.PROPERTIES.keys()))
.with_cursor_range_ms(
cursor_field="hs_lastmodifieddate",
start_ms=self._ms(start),
end_ms=self._ms(end),
)
)
if page_token:
builder = builder.with_page_token(page_token)
return builder.build()
@property
def response_builder(self):
return HubspotStreamResponseBuilder.for_stream(self.STREAM_NAME)
def response(self, id: Optional[str] = None, with_pagination: bool = False):
record = (
self.record_builder(self.STREAM_NAME, FieldPath(self.CURSOR_FIELD))
.with_field(FieldPath(self.CURSOR_FIELD), self.dt_str(self.updated_at()))
.with_field(FieldPath("id"), id if id else self.OBJECT_ID)
)
response = self.response_builder.with_record(record)
if with_pagination:
response = response.with_pagination()
return response.build()
def _set_up_oauth(self, http_mocker: HttpMocker):
self.mock_oauth(http_mocker, self.ACCESS_TOKEN)
def _set_up_requests(
self, http_mocker: HttpMocker, with_oauth: bool = False, with_dynamic_schemas: bool = True, entities: Optional[List[str]] = None
):
if with_oauth:
self._set_up_oauth(http_mocker)
self.mock_custom_objects(http_mocker)
self.mock_properties(http_mocker, self.OBJECT_TYPE, self.MOCK_PROPERTIES_FOR_SCHEMA_LOADER)
if with_dynamic_schemas:
self.mock_dynamic_schema_requests(http_mocker, entities)
def _mock_associations_with_stream_builder(
self,
http_mocker,
parent_entity: str, # e.g. "calls" / "meetings"
association_name: str, # e.g. "contacts"
record_ids: List[str], # primary ids from the page, as strings
to_ids_per_record: Dict[str, List[int]], # map primary id -> list of associated ids
):
"""
Mocks:
POST https://api.hubapi.com/crm/v4/associations/{parent_entity}/{association_name}/batch/read
Response body mirrors HubSpot's shape but only includes:
{ "status": "COMPLETE", "results": [ { "from": {"id": ...}, "to": [ { "toObjectId": ..., "associationTypes": [...] }, ... ] }, ... ] }
We intentionally skip `errors` / `numErrors`.
"""
req = (
AssociationsBatchReadRequestBuilder()
.for_parent(parent_entity)
.for_association(association_name)
.with_ids(record_ids)
.build()
)
results = []
for rid in record_ids:
to_list = [
{
"toObjectId": int(x),
"associationTypes": [
{"category": "HUBSPOT_DEFINED", "typeId": 200, "label": None}
],
}
for x in to_ids_per_record.get(rid, [])
]
results.append({"from": {"id": str(rid)}, "to": to_list})
body = json.dumps({"status": "COMPLETE", "results": results})
self.mock_response(http_mocker, req, HttpResponse(status_code=200, body=body), method="post")
def _mock_all_associations_for_ids(self, http_mocker: HttpMocker, parent_entity: str, record_ids: List[str]):
"""
Convenience wrapper: for each association, create two deterministic associated IDs per record.
"""
to_map = {rid: [int(rid) + 1, int(rid) + 2] for rid in record_ids if rid.isdigit()}
for assoc in self.ASSOCIATIONS:
self._mock_associations_with_stream_builder(
http_mocker,
parent_entity=parent_entity,
association_name=assoc,
record_ids=record_ids,
to_ids_per_record=to_map,
)
|
HubspotCRMSearchStream
|
python
|
GoogleCloudPlatform__python-docs-samples
|
monitoring/snippets/v3/uptime-check-client/snippets.py
|
{
"start": 7172,
"end": 11138
}
|
class ____(Exception):
pass
def project_id() -> str:
"""Retrieves the project id from the environment variable.
Raises:
MissingProjectIdError -- When not set.
Returns:
str -- the project name
"""
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
if not project_id:
raise MissingProjectIdError(
"Set the environment variable "
+ "GCLOUD_PROJECT to your Google Cloud Project Id."
)
return project_id
def project_name() -> str:
return "projects/" + project_id()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Demonstrates Uptime Check API operations."
)
subparsers = parser.add_subparsers(dest="command")
list_uptime_check_configs_parser = subparsers.add_parser(
"list-uptime-check-configs", help=list_uptime_check_configs.__doc__
)
list_uptime_check_ips_parser = subparsers.add_parser(
"list-uptime-check-ips", help=list_uptime_check_ips.__doc__
)
create_uptime_check_config_get_parser = subparsers.add_parser(
"create-uptime-check-get", help=create_uptime_check_config_get.__doc__
)
create_uptime_check_config_get_parser.add_argument(
"-d",
"--display_name",
required=False,
)
create_uptime_check_config_get_parser.add_argument(
"-o",
"--host_name",
required=False,
)
create_uptime_check_config_post_parser = subparsers.add_parser(
"create-uptime-check-post", help=create_uptime_check_config_post.__doc__
)
create_uptime_check_config_post_parser.add_argument(
"-d",
"--display_name",
required=False,
)
create_uptime_check_config_post_parser.add_argument(
"-o",
"--host_name",
required=False,
)
get_uptime_check_config_parser = subparsers.add_parser(
"get-uptime-check-config", help=get_uptime_check_config.__doc__
)
get_uptime_check_config_parser.add_argument(
"-m",
"--name",
required=True,
)
delete_uptime_check_config_parser = subparsers.add_parser(
"delete-uptime-check-config", help=delete_uptime_check_config.__doc__
)
delete_uptime_check_config_parser.add_argument(
"-m",
"--name",
required=True,
)
update_uptime_check_config_parser = subparsers.add_parser(
"update-uptime-check-config", help=update_uptime_check_config.__doc__
)
update_uptime_check_config_parser.add_argument(
"-m",
"--name",
required=True,
)
update_uptime_check_config_parser.add_argument(
"-d",
"--display_name",
required=False,
)
update_uptime_check_config_parser.add_argument(
"-p",
"--uptime_check_path",
required=False,
)
args = parser.parse_args()
if args.command == "list-uptime-check-configs":
list_uptime_check_configs(project_name())
elif args.command == "list-uptime-check-ips":
list_uptime_check_ips()
elif args.command == "create-uptime-check-get":
create_uptime_check_config_get(
project_name(), args.host_name, args.display_name
)
elif args.command == "create-uptime-check-post":
create_uptime_check_config_post(
project_name(), args.host_name, args.display_name
)
elif args.command == "get-uptime-check-config":
get_uptime_check_config(args.name)
elif args.command == "delete-uptime-check-config":
delete_uptime_check_config(args.name)
elif args.command == "update-uptime-check-config":
if not args.display_name and not args.uptime_check_path:
print("Nothing to update. Pass --display_name or " "--uptime_check_path.")
else:
update_uptime_check_config(
args.name, args.display_name, args.uptime_check_path
)
|
MissingProjectIdError
|
python
|
ethereum__web3.py
|
web3/datastructures.py
|
{
"start": 657,
"end": 2471
}
|
class ____(Mapping[TKey, TValue]):
"""
The read attributes for the AttributeDict types
"""
def __init__(
self, dictionary: dict[TKey, TValue], *args: Any, **kwargs: Any
) -> None:
# type ignored on 46/50 b/c dict() expects str index type not TKey
self.__dict__ = dict(dictionary) # type: ignore
self.__dict__.update(dict(*args, **kwargs))
def __getitem__(self, key: TKey) -> TValue:
return self.__dict__[key] # type: ignore
def __iter__(self) -> Iterator[Any]:
return iter(self.__dict__)
def __len__(self) -> int:
return len(self.__dict__)
def __repr__(self) -> str:
return self.__class__.__name__ + f"({self.__dict__!r})"
def _repr_pretty_(self, builder: Any, cycle: bool) -> None:
"""
Custom pretty output for the IPython console
https://ipython.readthedocs.io/en/stable/api/generated/IPython.lib.pretty.html#extending # noqa: E501
"""
builder.text(self.__class__.__name__ + "(")
if cycle:
builder.text("<cycle>")
else:
builder.pretty(self.__dict__)
builder.text(")")
@classmethod
def recursive(cls, value: TValue) -> Any:
"""
Recursively convert mappings to ReadableAttributeDict instances and
process nested collections (e.g., lists, sets, and dictionaries).
"""
if isinstance(value, Mapping):
return cls({k: cls.recursive(v) for k, v in value.items()})
elif isinstance(value, Sequence) and not isinstance(value, (str, bytes)):
return type(value)([cls.recursive(v) for v in value]) # type: ignore
elif isinstance(value, set):
return {cls.recursive(v) for v in value}
return value
|
ReadableAttributeDict
|
python
|
realpython__materials
|
python-maze-solver/source_code_final/src/maze_solver/models/solution.py
|
{
"start": 204,
"end": 904
}
|
class ____:
squares: tuple[Square, ...]
def __post_init__(self) -> None:
assert self.squares[0].role is Role.ENTRANCE
assert self.squares[-1].role is Role.EXIT
reduce(validate_corridor, self.squares)
def __iter__(self) -> Iterator[Square]:
return iter(self.squares)
def __getitem__(self, index: int) -> Square:
return self.squares[index]
def __len__(self) -> int:
return len(self.squares)
def validate_corridor(current: Square, following: Square) -> Square:
assert any(
[current.row == following.row, current.column == following.column]
), "Squares must lie in the same row or column"
return following
|
Solution
|
python
|
django__django
|
django/conf/__init__.py
|
{
"start": 7664,
"end": 9216
}
|
class ____:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__["_deleted"] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if not name.isupper() or name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super().__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s
for s in [*self.__dict__, *dir(self.default_settings)]
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = setting in self._deleted
set_locally = setting in self.__dict__
set_on_default = getattr(
self.default_settings, "is_overridden", lambda s: False
)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return "<%(cls)s>" % {
"cls": self.__class__.__name__,
}
settings = LazySettings()
|
UserSettingsHolder
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 383229,
"end": 384036
}
|
class ____(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
# scope_gil_state_known bool For nogil functions this can be False, since they can also be run with gil
# set to False by GilCheck transform
"""
child_attrs = []
state_temp = None
scope_gil_state_known = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.state_temp:
variable = self.state_temp.result()
else:
variable = None
if self.state == 'gil':
code.put_release_ensured_gil(variable)
else:
code.put_acquire_gil(variable, unknown_gil_state=not self.scope_gil_state_known)
|
GILExitNode
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/package_base_extendee/package.py
|
{
"start": 149,
"end": 352
}
|
class ____(PackageBase):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0")
|
PackageBaseExtendee
|
python
|
ray-project__ray
|
doc/source/ray-overview/examples/mcp-ray-serve/translator_mcp_ray.py
|
{
"start": 1481,
"end": 1650
}
|
class ____:
def __init__(self):
pass
# Ray Serve entry point.
app = TranslatorMCP.bind()
## Run in terminal.
# serve run translator_mcp_ray:app
|
TranslatorMCP
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/events/__init__.py
|
{
"start": 63164,
"end": 63552
}
|
class ____(
NamedTuple("_AssetObservation", [("asset_observation", AssetObservation)])
):
def __new__(cls, asset_observation: AssetObservation):
return super().__new__(
cls,
asset_observation=check.inst_param(
asset_observation, "asset_observation", AssetObservation
),
)
@whitelist_for_serdes
|
AssetObservationData
|
python
|
celery__celery
|
t/unit/concurrency/test_prefork.py
|
{
"start": 1209,
"end": 3597
}
|
class ____:
@staticmethod
def Loader(*args, **kwargs):
loader = Mock(*args, **kwargs)
loader.conf = {}
loader.override_backends = {}
return loader
@patch('celery.platforms.signals')
def test_process_initializer(self, _signals, set_mp_process_title, restore_logging):
from celery import signals
from celery._state import _tls
from celery.concurrency.prefork import WORKER_SIGIGNORE, WORKER_SIGRESET, process_initializer
on_worker_process_init = Mock()
signals.worker_process_init.connect(on_worker_process_init)
with self.Celery(loader=self.Loader) as app:
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, 'awesome.worker.com')
_signals.ignore.assert_any_call(*WORKER_SIGIGNORE)
_signals.reset.assert_any_call(*WORKER_SIGRESET)
assert app.loader.init_worker.call_count
on_worker_process_init.assert_called()
assert _tls.current_app is app
set_mp_process_title.assert_called_with(
'celeryd', hostname='awesome.worker.com',
)
with patch('celery.app.trace.setup_worker_optimizations') as S:
os.environ['FORKED_BY_MULTIPROCESSING'] = '1'
try:
process_initializer(app, 'luke.worker.com')
S.assert_called_with(app, 'luke.worker.com')
finally:
os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
os.environ['CELERY_LOG_FILE'] = 'worker%I.log'
app.log.setup = Mock(name='log_setup')
try:
process_initializer(app, 'luke.worker.com')
finally:
os.environ.pop('CELERY_LOG_FILE', None)
@patch('celery.platforms.set_pdeathsig')
def test_pdeath_sig(self, _set_pdeathsig, set_mp_process_title, restore_logging):
from celery import signals
on_worker_process_init = Mock()
signals.worker_process_init.connect(on_worker_process_init)
from celery.concurrency.prefork import process_initializer
with self.Celery(loader=self.Loader) as app:
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, 'awesome.worker.com')
_set_pdeathsig.assert_called_once_with('SIGKILL')
|
test_process_initializer
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/checkpointing/test_model_checkpoint.py
|
{
"start": 34456,
"end": 34609
}
|
class ____(BoringModel):
def backward(self, loss):
if self.current_epoch == 1:
raise RuntimeError("Trouble!")
|
TroubledModelBackward
|
python
|
qdrant__qdrant-client
|
qdrant_client/local/local_collection.py
|
{
"start": 3193,
"end": 113459
}
|
class ____:
"""
LocalCollection is a class that represents a collection of vectors in the local storage.
"""
LARGE_DATA_THRESHOLD = 20_000
def __init__(
self,
config: models.CreateCollection,
location: Optional[str] = None,
force_disable_check_same_thread: bool = False,
) -> None:
"""
Create or load a collection from the local storage.
Args:
location: path to the collection directory. If None, the collection will be created in memory.
force_disable_check_same_thread: force disable check_same_thread for sqlite3 connection. default: False
"""
self.vectors_config, self.multivectors_config = self._resolve_vectors_config(
config.vectors
)
sparse_vectors_config = config.sparse_vectors
self.vectors: dict[str, types.NumpyArray] = {
name: np.zeros((0, params.size), dtype=np.float32)
for name, params in self.vectors_config.items()
}
self.sparse_vectors: dict[str, list[SparseVector]] = (
{name: [] for name, params in sparse_vectors_config.items()}
if sparse_vectors_config is not None
else {}
)
self.sparse_vectors_idf: dict[
str, dict[int, int]
] = {} # vector_name: {idx_in_vocab: doc frequency}
self.multivectors: dict[str, list[types.NumpyArray]] = {
name: [] for name in self.multivectors_config
}
self.payload: list[models.Payload] = []
self.deleted: types.NumpyArray = np.zeros(0, dtype=bool)
self._all_vectors_keys = (
list(self.vectors.keys())
+ list(self.sparse_vectors.keys())
+ list(self.multivectors.keys())
)
self.deleted_per_vector: dict[str, types.NumpyArray] = {
name: np.zeros(0, dtype=bool) for name in self._all_vectors_keys
}
self.ids: dict[models.ExtendedPointId, int] = {} # Mapping from external id to internal id
self.ids_inv: list[models.ExtendedPointId] = [] # Mapping from internal id to external id
self.persistent = location is not None
self.storage = None
self.config = config
if location is not None:
self.storage = CollectionPersistence(location, force_disable_check_same_thread)
self.load_vectors()
@staticmethod
def _resolve_vectors_config(
vectors: dict[str, models.VectorParams],
) -> tuple[dict[str, models.VectorParams], dict[str, models.VectorParams]]:
vectors_config = {}
multivectors_config = {}
if isinstance(vectors, models.VectorParams):
if vectors.multivector_config is not None:
multivectors_config = {DEFAULT_VECTOR_NAME: vectors}
else:
vectors_config = {DEFAULT_VECTOR_NAME: vectors}
return vectors_config, multivectors_config
for name, params in vectors.items():
if params.multivector_config is not None:
multivectors_config[name] = params
else:
vectors_config[name] = params
return vectors_config, multivectors_config
def close(self) -> None:
if self.storage is not None:
self.storage.close()
def _update_idf_append(self, vector: SparseVector, vector_name: str) -> None:
if vector_name not in self.sparse_vectors_idf:
self.sparse_vectors_idf[vector_name] = defaultdict(int)
for idx in vector.indices:
self.sparse_vectors_idf[vector_name][idx] += 1
def _update_idf_remove(self, vector: SparseVector, vector_name: str) -> None:
for idx in vector.indices:
self.sparse_vectors_idf[vector_name][idx] -= 1
@classmethod
def _compute_idf(cls, df: int, n: int) -> float:
# ((n - df + 0.5) / (df + 0.5) + 1.).ln()
return math.log((n - df + 0.5) / (df + 0.5) + 1)
def _rescore_idf(self, vector: SparseVector, vector_name: str) -> SparseVector:
num_docs = self.count(count_filter=None).count
new_values = []
idf_store = self.sparse_vectors_idf.get(vector_name)
if idf_store is None:
return vector
for idx, value in zip(vector.indices, vector.values):
document_frequency = idf_store.get(idx, 0)
idf = self._compute_idf(document_frequency, num_docs)
new_values.append(value * idf)
return SparseVector(indices=vector.indices, values=new_values)
def load_vectors(self) -> None:
if self.storage is not None:
vectors = defaultdict(list)
sparse_vectors = defaultdict(list)
multivectors = defaultdict(list)
deleted_ids = []
for idx, point in enumerate(self.storage.load()):
# id tracker
self.ids[point.id] = idx
# no gaps in idx
self.ids_inv.append(point.id)
# payload tracker
self.payload.append(to_jsonable_python(point.payload) or {})
# persisted named vectors
loaded_vector = point.vector
# add default name to anonymous dense or multivector
if isinstance(point.vector, list):
loaded_vector = {DEFAULT_VECTOR_NAME: point.vector}
# handle dense vectors
all_dense_vector_names = list(self.vectors.keys())
for name in all_dense_vector_names:
v = loaded_vector.get(name)
if v is not None:
vectors[name].append(v)
else:
vectors[name].append(
np.ones(self.vectors_config[name].size, dtype=np.float32)
)
deleted_ids.append((idx, name))
# handle sparse vectors
all_sparse_vector_names = list(self.sparse_vectors.keys())
for name in all_sparse_vector_names:
v = loaded_vector.get(name)
if v is not None:
sparse_vectors[name].append(v)
else:
sparse_vectors[name].append(empty_sparse_vector())
deleted_ids.append((idx, name))
# handle multivectors
all_multivector_names = list(self.multivectors.keys())
for name in all_multivector_names:
v = loaded_vector.get(name)
if v is not None:
multivectors[name].append(v)
else:
multivectors[name].append(np.array([]))
deleted_ids.append((idx, name))
# setup dense vectors by name
for name, named_vectors in vectors.items():
self.vectors[name] = np.array(named_vectors)
self.deleted_per_vector[name] = np.zeros(len(self.payload), dtype=bool)
# setup sparse vectors by name
for name, named_vectors in sparse_vectors.items():
self.sparse_vectors[name] = named_vectors
self.deleted_per_vector[name] = np.zeros(len(self.payload), dtype=bool)
for vector in named_vectors:
self._update_idf_append(vector, name)
# setup multivectors by name
for name, named_vectors in multivectors.items():
self.multivectors[name] = [np.array(vector) for vector in named_vectors]
self.deleted_per_vector[name] = np.zeros(len(self.payload), dtype=bool)
# track deleted points by named vector
for idx, name in deleted_ids:
self.deleted_per_vector[name][idx] = 1
self.deleted = np.zeros(len(self.payload), dtype=bool)
@classmethod
def _resolve_query_vector_name(
cls,
query_vector: Union[
list[float],
tuple[str, list[float]],
list[list[float]],
tuple[str, list[list[float]]],
types.NamedVector,
types.NamedSparseVector,
DenseQueryVector,
tuple[str, DenseQueryVector],
tuple[str, SparseQueryVector],
MultiQueryVector,
tuple[str, MultiQueryVector],
types.NumpyArray,
],
) -> tuple[
str, Union[DenseQueryVector, SparseQueryVector, MultiQueryVector, types.NumpyArray]
]:
# SparseQueryVector is not in the method's signature, because sparse vectors can only be used as named vectors,
# and there is no default name for them
vector: Union[DenseQueryVector, SparseQueryVector, MultiQueryVector, types.NumpyArray]
if isinstance(query_vector, tuple):
name, query = query_vector
if isinstance(query, list):
vector = np.array(query)
else:
vector = query
elif isinstance(query_vector, np.ndarray):
name = DEFAULT_VECTOR_NAME
vector = query_vector
elif isinstance(query_vector, types.NamedVector):
name = query_vector.name
vector = np.array(query_vector.vector)
elif isinstance(query_vector, types.NamedSparseVector):
name = query_vector.name
vector = query_vector.vector
elif isinstance(query_vector, list):
name = DEFAULT_VECTOR_NAME
vector = np.array(query_vector)
elif isinstance(query_vector, get_args(DenseQueryVector)):
name = DEFAULT_VECTOR_NAME
vector = query_vector
elif isinstance(query_vector, get_args(MultiQueryVector)):
name = DEFAULT_VECTOR_NAME
vector = query_vector
else:
raise ValueError(f"Unsupported vector type {type(query_vector)}")
return name, vector
def get_vector_params(self, name: str) -> models.VectorParams:
if isinstance(self.config.vectors, dict):
if name in self.config.vectors:
return self.config.vectors[name]
else:
raise ValueError(f"Vector {name} is not found in the collection")
if isinstance(self.config.vectors, models.VectorParams):
if name != DEFAULT_VECTOR_NAME:
raise ValueError(f"Vector {name} is not found in the collection")
return self.config.vectors
raise ValueError(f"Malformed config.vectors: {self.config.vectors}")
@classmethod
def _check_include_pattern(cls, pattern: str, key: str) -> bool:
"""
>>> LocalCollection._check_include_pattern('a', 'a')
True
>>> LocalCollection._check_include_pattern('a.b', 'b')
False
>>> LocalCollection._check_include_pattern('a.b', 'a.b')
True
>>> LocalCollection._check_include_pattern('a.b', 'a.b.c')
True
>>> LocalCollection._check_include_pattern('a.b[]', 'a.b[].c')
True
>>> LocalCollection._check_include_pattern('a.b[]', 'a.b.c')
False
>>> LocalCollection._check_include_pattern('a', 'a.b')
True
>>> LocalCollection._check_include_pattern('a.b', 'a')
True
>>> LocalCollection._check_include_pattern('a', 'aa.b.c')
False
>>> LocalCollection._check_include_pattern('a_b', 'a')
False
"""
pattern_parts = pattern.replace(".", "[.").split("[")
key_parts = key.replace(".", "[.").split("[")
return all(p == v for p, v in zip(pattern_parts, key_parts))
@classmethod
def _check_exclude_pattern(cls, pattern: str, key: str) -> bool:
if len(pattern) > len(key):
return False
pattern_parts = pattern.replace(".", "[.").split("[")
key_parts = key.replace(".", "[.").split("[")
return all(p == v for p, v in zip(pattern_parts, key_parts))
@classmethod
def _filter_payload(
cls, payload: Any, predicate: Callable[[str], bool], path: str = ""
) -> Any:
if isinstance(payload, dict):
res = {}
if path != "":
new_path = path + "."
else:
new_path = path
for key, value in payload.items():
if predicate(new_path + key):
res[key] = cls._filter_payload(value, predicate, new_path + key)
return res
elif isinstance(payload, list):
res_array = []
path = path + "[]"
for idx, value in enumerate(payload):
if predicate(path):
res_array.append(cls._filter_payload(value, predicate, path))
return res_array
else:
return payload
@classmethod
def _process_payload(
cls,
payload: dict,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
) -> Optional[dict]:
if not with_payload:
return None
if isinstance(with_payload, bool):
return payload
if isinstance(with_payload, list):
return cls._filter_payload(
payload,
lambda key: any(
map(lambda pattern: cls._check_include_pattern(pattern, key), with_payload) # type: ignore
),
)
if isinstance(with_payload, models.PayloadSelectorInclude):
return cls._filter_payload(
payload,
lambda key: any(
map(
lambda pattern: cls._check_include_pattern(pattern, key),
with_payload.include, # type: ignore
)
),
)
if isinstance(with_payload, models.PayloadSelectorExclude):
return cls._filter_payload(
payload,
lambda key: all(
map(
lambda pattern: not cls._check_exclude_pattern(pattern, key),
with_payload.exclude, # type: ignore
)
),
)
return payload
def _get_payload(
self,
idx: int,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
return_copy: bool = True,
) -> Optional[models.Payload]:
payload = self.payload[idx]
processed_payload = self._process_payload(payload, with_payload)
return deepcopy(processed_payload) if return_copy else processed_payload
def _get_vectors(
self, idx: int, with_vectors: Union[bool, Sequence[str], None] = False
) -> Optional[models.VectorStruct]:
if with_vectors is False or with_vectors is None:
return None
dense_vectors = {
name: self.vectors[name][idx].tolist()
for name in self.vectors
if not self.deleted_per_vector[name][idx]
}
sparse_vectors = {
name: self.sparse_vectors[name][idx]
for name in self.sparse_vectors
if not self.deleted_per_vector[name][idx]
}
multivectors = {
name: self.multivectors[name][idx].tolist()
for name in self.multivectors
if not self.deleted_per_vector[name][idx]
}
# merge vectors
all_vectors = {**dense_vectors, **sparse_vectors, **multivectors}
if isinstance(with_vectors, list):
all_vectors = {name: all_vectors[name] for name in with_vectors if name in all_vectors}
if len(all_vectors) == 1 and DEFAULT_VECTOR_NAME in all_vectors:
return all_vectors[DEFAULT_VECTOR_NAME]
return all_vectors
def _payload_and_non_deleted_mask(
self,
payload_filter: Optional[models.Filter],
vector_name: Optional[str] = None,
) -> np.ndarray:
"""
Calculate mask for filtered payload and non-deleted points. True - accepted, False - rejected
"""
payload_mask = calculate_payload_mask(
payloads=self.payload,
payload_filter=payload_filter,
ids_inv=self.ids_inv,
deleted_per_vector=self.deleted_per_vector,
)
# in deleted: 1 - deleted, 0 - not deleted
# in payload_mask: 1 - accepted, 0 - rejected
# in mask: 1 - ok, 0 - rejected
mask = payload_mask & ~self.deleted
if vector_name is not None:
# in deleted: 1 - deleted, 0 - not deleted
mask = mask & ~self.deleted_per_vector[vector_name]
return mask
def _calculate_has_vector(self, internal_id: int) -> dict[str, bool]:
has_vector: dict[str, bool] = {}
for vector_name, deleted in self.deleted_per_vector.items():
if not deleted[internal_id]:
has_vector[vector_name] = True
return has_vector
def search(
self,
query_vector: Union[
list[float],
tuple[str, list[float]],
list[list[float]],
tuple[str, list[list[float]]],
types.NamedVector,
types.NamedSparseVector,
DenseQueryVector,
tuple[str, DenseQueryVector],
SparseQueryVector,
tuple[str, SparseQueryVector],
MultiQueryVector,
tuple[str, MultiQueryVector],
types.NumpyArray,
],
query_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
) -> list[models.ScoredPoint]:
name, query_vector = self._resolve_query_vector_name(query_vector)
result: list[models.ScoredPoint] = []
sparse_scoring = False
rescore_idf = False
# early exit if the named vector does not exist
if isinstance(query_vector, get_args(SparseQueryVector)):
if name not in self.sparse_vectors:
raise ValueError(f"Sparse vector {name} is not found in the collection")
vectors = self.sparse_vectors[name]
if self.config.sparse_vectors[name].modifier == models.Modifier.IDF:
rescore_idf = True
distance = Distance.DOT
sparse_scoring = True
elif isinstance(query_vector, get_args(MultiQueryVector)) or (
isinstance(query_vector, np.ndarray) and len(query_vector.shape) == 2
):
if name not in self.multivectors:
raise ValueError(f"Multivector {name} is not found in the collection")
vectors = self.multivectors[name]
distance = self.get_vector_params(name).distance
else:
if name not in self.vectors:
raise ValueError(f"Dense vector {name} is not found in the collection")
vectors = self.vectors[name]
distance = self.get_vector_params(name).distance
vectors = vectors[: len(self.payload)]
if isinstance(query_vector, np.ndarray):
if len(query_vector.shape) == 1:
scores = calculate_distance(query_vector, vectors, distance)
else:
scores = calculate_multi_distance(query_vector, vectors, distance)
elif isinstance(query_vector, RecoQuery):
if query_vector.strategy == models.RecommendStrategy.BEST_SCORE:
scores = calculate_recommend_best_scores(query_vector, vectors, distance)
elif query_vector.strategy == models.RecommendStrategy.SUM_SCORES:
scores = calculate_recommend_sum_scores(query_vector, vectors, distance)
else:
raise TypeError(
f"Recommend strategy is expected to be either "
f"BEST_SCORE or SUM_SCORES, got: {query_vector.strategy}"
)
elif isinstance(query_vector, SparseRecoQuery):
if rescore_idf:
query_vector = query_vector.transform_sparse(lambda x: self._rescore_idf(x, name))
if query_vector.strategy == models.RecommendStrategy.BEST_SCORE:
scores = calculate_sparse_recommend_best_scores(query_vector, vectors)
elif query_vector.strategy == models.RecommendStrategy.SUM_SCORES:
scores = calculate_sparse_recommend_sum_scores(query_vector, vectors)
else:
raise TypeError(
f"Recommend strategy is expected to be either "
f"BEST_SCORE or SUM_SCORES, got: {query_vector.strategy}"
)
elif isinstance(query_vector, MultiRecoQuery):
if query_vector.strategy == models.RecommendStrategy.BEST_SCORE:
scores = calculate_multi_recommend_best_scores(query_vector, vectors, distance)
elif query_vector.strategy == models.RecommendStrategy.SUM_SCORES:
scores = calculate_multi_recommend_sum_scores(query_vector, vectors, distance)
else:
raise TypeError(
f"Recommend strategy is expected to be either "
f"BEST_SCORE or SUM_SCORES, got: {query_vector.strategy}"
)
elif isinstance(query_vector, DiscoveryQuery):
scores = calculate_discovery_scores(query_vector, vectors, distance)
elif isinstance(query_vector, SparseDiscoveryQuery):
if rescore_idf:
query_vector = query_vector.transform_sparse(lambda x: self._rescore_idf(x, name))
scores = calculate_sparse_discovery_scores(query_vector, vectors)
elif isinstance(query_vector, MultiDiscoveryQuery):
scores = calculate_multi_discovery_scores(query_vector, vectors, distance)
elif isinstance(query_vector, ContextQuery):
scores = calculate_context_scores(query_vector, vectors, distance)
elif isinstance(query_vector, SparseContextQuery):
if rescore_idf:
query_vector = query_vector.transform_sparse(lambda x: self._rescore_idf(x, name))
scores = calculate_sparse_context_scores(query_vector, vectors)
elif isinstance(query_vector, MultiContextQuery):
scores = calculate_multi_context_scores(query_vector, vectors, distance)
elif isinstance(query_vector, SparseVector):
validate_sparse_vector(query_vector)
if rescore_idf:
query_vector = self._rescore_idf(query_vector, name)
# sparse vector query must be sorted by indices for dot product to work with persisted vectors
query_vector = sort_sparse_vector(query_vector)
scores = calculate_distance_sparse(query_vector, vectors)
else:
raise (ValueError(f"Unsupported query vector type {type(query_vector)}"))
mask = self._payload_and_non_deleted_mask(query_filter, vector_name=name)
required_order = distance_to_order(distance)
if required_order == DistanceOrder.BIGGER_IS_BETTER or isinstance(
query_vector,
(
DiscoveryQuery,
ContextQuery,
RecoQuery,
MultiDiscoveryQuery,
MultiContextQuery,
MultiRecoQuery,
), # sparse structures are not required, sparse always uses DOT
):
order = np.argsort(scores)[::-1]
else:
order = np.argsort(scores)
offset = offset if offset is not None else 0
for idx in order:
if len(result) >= limit + offset:
break
if not mask[idx]:
continue
score = scores[idx]
# skip undefined scores from sparse vectors
if sparse_scoring and score == -np.inf:
continue
point_id = self.ids_inv[idx]
if score_threshold is not None:
if required_order == DistanceOrder.BIGGER_IS_BETTER:
if score < score_threshold:
break
else:
if score > score_threshold:
break
scored_point = construct(
models.ScoredPoint,
id=point_id,
score=float(score),
version=0,
payload=self._get_payload(idx, with_payload),
vector=self._get_vectors(idx, with_vectors),
)
result.append(scored_point)
return result[offset:]
def query_points(
self,
query: Optional[types.Query] = None,
prefetch: Optional[list[types.Prefetch]] = None,
query_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: int = 0,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
using: Optional[str] = None,
**kwargs: Any,
) -> types.QueryResponse:
"""
Queries points in the local collection, resolving any prefetches first.
Assumes all vectors have been homogenized so that there are no ids in the inputs
"""
prefetches = []
if prefetch is not None:
prefetches = prefetch if isinstance(prefetch, list) else [prefetch]
if len(prefetches) > 0:
# It is a hybrid/re-scoring query
sources = [self._prefetch(prefetch) for prefetch in prefetches]
if query is None:
raise ValueError("Query is required for merging prefetches")
# Merge sources
scored_points = self._merge_sources(
sources=sources,
query=query,
limit=limit,
offset=offset,
using=using,
query_filter=query_filter,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
else:
# It is a base query
scored_points = self._query_collection(
query=query,
using=using,
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
return types.QueryResponse(points=scored_points)
def _prefetch(self, prefetch: types.Prefetch) -> list[types.ScoredPoint]:
inner_prefetches = []
if prefetch.prefetch is not None:
inner_prefetches = (
prefetch.prefetch if isinstance(prefetch.prefetch, list) else [prefetch.prefetch]
)
if len(inner_prefetches) > 0:
sources = [self._prefetch(inner_prefetch) for inner_prefetch in inner_prefetches]
if prefetch.query is None:
raise ValueError("Query is required for merging prefetches")
# Merge sources
return self._merge_sources(
sources=sources,
query=prefetch.query,
limit=prefetch.limit if prefetch.limit is not None else 10,
offset=0,
using=prefetch.using,
query_filter=prefetch.filter,
with_payload=False,
with_vectors=False,
score_threshold=prefetch.score_threshold,
)
else:
# Base case: fetch from collection
return self._query_collection(
query=prefetch.query,
using=prefetch.using,
query_filter=prefetch.filter,
limit=prefetch.limit,
offset=0,
with_payload=False,
with_vectors=False,
score_threshold=prefetch.score_threshold,
)
def _merge_sources(
self,
sources: list[list[types.ScoredPoint]],
query: types.Query,
limit: int,
offset: int,
using: Optional[str] = None,
query_filter: Optional[types.Filter] = None,
score_threshold: Optional[float] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> list[types.ScoredPoint]:
if isinstance(query, (models.FusionQuery, models.RrfQuery)):
# Fuse results
if isinstance(query, models.RrfQuery):
fused = reciprocal_rank_fusion(
responses=sources, limit=limit + offset, ranking_constant_k=query.rrf.k
)
else:
if query.fusion == models.Fusion.RRF:
# RRF: Reciprocal Rank Fusion
fused = reciprocal_rank_fusion(responses=sources, limit=limit + offset)
elif query.fusion == models.Fusion.DBSF:
# DBSF: Distribution-Based Score Fusion
fused = distribution_based_score_fusion(
responses=sources, limit=limit + offset
)
else:
raise ValueError(f"Fusion method {query.fusion} does not exist")
# Fetch payload and vectors
ids = [point.id for point in fused]
fetched_points = self.retrieve(
ids, with_payload=with_payload, with_vectors=with_vectors
)
for fetched, scored in zip(fetched_points, fused):
scored.payload = fetched.payload
scored.vector = fetched.vector
return fused[offset:]
elif isinstance(query, models.FormulaQuery):
# Re-score with formula
rescored = self._rescore_with_formula(
query=query,
prefetches_results=sources,
limit=limit + offset,
with_payload=with_payload,
with_vectors=with_vectors,
)
return rescored[offset:]
else:
# Re-score with vector
sources_ids = set()
for source in sources:
for point in source:
sources_ids.add(point.id)
if len(sources_ids) == 0:
# no need to perform a query if there are no matches for the sources
return []
else:
filter_with_sources = _include_ids_in_filter(query_filter, list(sources_ids))
return self._query_collection(
query=query,
using=using,
query_filter=filter_with_sources,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
def _query_collection(
self,
query: Optional[types.Query] = None,
using: Optional[str] = None,
query_filter: Optional[types.Filter] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = False,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
) -> list[types.ScoredPoint]:
"""
Performs the query on the collection, assuming it didn't have any prefetches.
"""
using = using or DEFAULT_VECTOR_NAME
limit = limit or 10
offset = offset or 0
if query is None:
records, _ = self.scroll(
scroll_filter=query_filter,
limit=limit + offset,
with_payload=with_payload,
with_vectors=with_vectors,
)
return [record_to_scored_point(record) for record in records[offset:]]
elif isinstance(query, models.NearestQuery):
if query.mmr is not None:
return self._search_with_mmr(
query_vector=query.nearest,
mmr=query.mmr,
query_filter=query_filter,
limit=limit,
offset=offset,
using=using,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
return self.search(
query_vector=(using, query.nearest),
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
elif isinstance(query, models.RecommendQuery):
return self.recommend(
positive=query.recommend.positive,
negative=query.recommend.negative,
strategy=query.recommend.strategy,
using=using,
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
elif isinstance(query, models.DiscoverQuery):
return self.discover(
target=query.discover.target,
context=query.discover.context,
using=using,
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
elif isinstance(query, models.ContextQuery):
return self.discover(
context=query.context,
using=using,
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
elif isinstance(query, models.OrderByQuery):
records, _ = self.scroll(
scroll_filter=query_filter,
order_by=query.order_by,
limit=limit + offset,
with_payload=with_payload,
with_vectors=with_vectors,
)
return [record_to_scored_point(record) for record in records[offset:]]
elif isinstance(query, models.SampleQuery):
if query.sample == models.Sample.RANDOM:
return self._sample_randomly(
limit=limit,
query_filter=query_filter,
with_payload=with_payload,
with_vectors=with_vectors,
)
else:
raise ValueError(f"Unknown Sample variant: {query.sample}")
elif isinstance(query, models.FusionQuery):
raise ValueError("Cannot perform fusion without prefetches")
elif isinstance(query, models.FormulaQuery):
raise ValueError("Cannot perform formula without prefetches")
elif isinstance(query, models.RrfQuery):
raise ValueError("Cannot perform RRF query without prefetches")
else:
# most likely a VectorInput, delegate to search
return self.search(
query_vector=(using, query),
query_filter=query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
def query_groups(
self,
group_by: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
limit: int = 10,
group_size: int = 3,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
with_lookup_collection: Optional["LocalCollection"] = None,
) -> models.GroupsResult:
max_limit = len(self.ids_inv)
# rewrite prefetch with larger limit
if prefetch is not None:
prefetch = deepcopy(
prefetch
) # we're modifying Prefetch inplace, but we don't want to modify
# the original object, if users want to reuse it somehow
set_prefetch_limit_iteratively(prefetch, max_limit)
points = self.query_points(
query=query,
query_filter=query_filter,
prefetch=prefetch,
using=using,
limit=len(self.ids_inv),
with_payload=True,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
groups = OrderedDict()
for point in points.points:
if not isinstance(point.payload, dict):
continue
group_values = value_by_key(point.payload, group_by)
if group_values is None:
continue
group_values = list(set(v for v in group_values if isinstance(v, (str, int))))
point.payload = self._process_payload(point.payload, with_payload)
for group_value in group_values:
if group_value not in groups:
groups[group_value] = models.PointGroup(id=group_value, hits=[])
if len(groups[group_value].hits) >= group_size:
continue
groups[group_value].hits.append(point)
groups_result: list[models.PointGroup] = list(groups.values())[:limit]
if isinstance(with_lookup, str):
with_lookup = models.WithLookup(
collection=with_lookup,
with_payload=None,
with_vectors=None,
)
if with_lookup is not None and with_lookup_collection is not None:
for group in groups_result:
lookup = with_lookup_collection.retrieve(
ids=[group.id],
with_payload=with_lookup.with_payload,
with_vectors=with_lookup.with_vectors,
)
group.lookup = next(iter(lookup), None)
return models.GroupsResult(groups=groups_result)
def search_groups(
self,
query_vector: Union[
Sequence[float],
list[list[float]],
tuple[
str,
Union[
models.Vector,
RecoQuery,
SparseRecoQuery,
MultiRecoQuery,
types.NumpyArray,
],
],
types.NamedVector,
types.NamedSparseVector,
RecoQuery,
SparseRecoQuery,
MultiRecoQuery,
types.NumpyArray,
],
group_by: str,
query_filter: Optional[models.Filter] = None,
limit: int = 10,
group_size: int = 1,
with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
with_lookup_collection: Optional["LocalCollection"] = None,
) -> models.GroupsResult:
points = self.search(
query_vector=query_vector,
query_filter=query_filter,
limit=len(self.ids_inv),
with_payload=True,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
groups = OrderedDict()
for point in points:
if not isinstance(point.payload, dict):
continue
group_values = value_by_key(point.payload, group_by)
if group_values is None:
continue
group_values = list(set(v for v in group_values if isinstance(v, (str, int))))
point.payload = self._process_payload(point.payload, with_payload)
for group_value in group_values:
if group_value not in groups:
groups[group_value] = models.PointGroup(id=group_value, hits=[])
if len(groups[group_value].hits) >= group_size:
continue
groups[group_value].hits.append(point)
groups_result: list[models.PointGroup] = list(groups.values())[:limit]
if isinstance(with_lookup, str):
with_lookup = models.WithLookup(
collection=with_lookup,
with_payload=None,
with_vectors=None,
)
if with_lookup is not None and with_lookup_collection is not None:
for group in groups_result:
lookup = with_lookup_collection.retrieve(
ids=[group.id],
with_payload=with_lookup.with_payload,
with_vectors=with_lookup.with_vectors,
)
group.lookup = next(iter(lookup), None)
return models.GroupsResult(groups=groups_result)
def facet(
self,
key: str,
facet_filter: Optional[types.Filter] = None,
limit: int = 10,
) -> types.FacetResponse:
facet_hits: dict[types.FacetValue, int] = defaultdict(int)
mask = self._payload_and_non_deleted_mask(facet_filter)
for idx, payload in enumerate(self.payload):
if not mask[idx]:
continue
if not isinstance(payload, dict):
continue
values = value_by_key(payload, key)
if values is None:
continue
# Only count the same value for each point once
values_set: set[types.FacetValue] = set()
# Sanitize to use only valid values
for v in values:
if type(v) not in get_args_subscribed(types.FacetValue):
continue
# If values are UUIDs, format with hyphens
as_uuid = parse_uuid(v)
if as_uuid:
v = str(as_uuid)
values_set.add(v)
for v in values_set:
facet_hits[v] += 1
hits = [
models.FacetValueHit(value=value, count=count)
for value, count in sorted(
facet_hits.items(),
# order by count descending, then by value ascending
key=lambda x: (-x[1], x[0]),
)[:limit]
]
return types.FacetResponse(hits=hits)
def retrieve(
self,
ids: Sequence[types.PointId],
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> list[models.Record]:
result = []
ids = [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in ids]
for point_id in ids:
if point_id not in self.ids:
continue
idx = self.ids[point_id]
if self.deleted[idx] == 1:
continue
result.append(
models.Record(
id=point_id,
payload=self._get_payload(idx, with_payload),
vector=self._get_vectors(idx, with_vectors),
)
)
return result
def _preprocess_recommend_input(
self,
positive: Optional[Sequence[models.VectorInput]] = None,
negative: Optional[Sequence[models.VectorInput]] = None,
strategy: Optional[types.RecommendStrategy] = None,
query_filter: Optional[types.Filter] = None,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
) -> tuple[
list[list[float]],
list[list[float]],
list[models.SparseVector],
list[models.SparseVector],
list[list[list[float]]],
list[list[list[float]]],
types.Filter,
]:
def examples_into_vectors(
examples: Sequence[models.VectorInput],
acc: Union[list[list[float]], list[models.SparseVector], list[list[list[float]]]],
) -> None:
for example in examples:
if isinstance(example, get_args(types.PointId)):
if example not in collection.ids:
raise ValueError(f"Point {example} is not found in the collection")
idx = collection.ids[example]
vec = collection_vectors[vector_name][idx]
if isinstance(vec, np.ndarray):
vec = vec.tolist()
acc.append(vec)
if collection == self:
mentioned_ids.append(example)
else:
acc.append(example)
collection = lookup_from_collection if lookup_from_collection is not None else self
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
vector_name = (
lookup_from_vector_name
if lookup_from_vector_name is not None
else search_in_vector_name
)
positive = positive if positive is not None else []
negative = negative if negative is not None else []
# Validate input depending on strategy
if strategy == types.RecommendStrategy.AVERAGE_VECTOR:
if len(positive) == 0:
raise ValueError("Positive list is empty")
elif (
strategy == types.RecommendStrategy.BEST_SCORE
or strategy == types.RecommendStrategy.SUM_SCORES
):
if len(positive) == 0 and len(negative) == 0:
raise ValueError("No positive or negative examples given")
# Turn every example into vectors
positive_vectors: list[list[float]] = []
negative_vectors: list[list[float]] = []
sparse_positive_vectors: list[models.SparseVector] = []
sparse_negative_vectors: list[models.SparseVector] = []
positive_multivectors: list[list[list[float]]] = []
negative_multivectors: list[list[list[float]]] = []
mentioned_ids: list[ExtendedPointId] = []
sparse = vector_name in collection.sparse_vectors
multi = vector_name in collection.multivectors
if sparse:
collection_vectors = collection.sparse_vectors
examples_into_vectors(positive, sparse_positive_vectors)
examples_into_vectors(negative, sparse_negative_vectors)
elif multi:
collection_vectors = collection.multivectors
examples_into_vectors(positive, positive_multivectors)
examples_into_vectors(negative, negative_multivectors)
else:
collection_vectors = collection.vectors
examples_into_vectors(positive, positive_vectors)
examples_into_vectors(negative, negative_vectors)
# Edit query filter
query_filter = ignore_mentioned_ids_filter(query_filter, mentioned_ids)
return (
positive_vectors,
negative_vectors,
sparse_positive_vectors,
sparse_negative_vectors,
positive_multivectors,
negative_multivectors,
query_filter,
)
@staticmethod
def _recommend_average_dense(
positive_vectors: list[list[float]], negative_vectors: list[list[float]]
) -> types.NumpyArray:
positive_vectors_np = np.stack(positive_vectors)
negative_vectors_np = np.stack(negative_vectors) if len(negative_vectors) > 0 else None
mean_positive_vector = np.mean(positive_vectors_np, axis=0)
if negative_vectors_np is not None:
vector = (
mean_positive_vector + mean_positive_vector - np.mean(negative_vectors_np, axis=0)
)
else:
vector = mean_positive_vector
return vector
@staticmethod
def _recommend_average_sparse(
positive_vectors: list[models.SparseVector],
negative_vectors: list[models.SparseVector],
) -> models.SparseVector:
for i, vector in enumerate(positive_vectors):
validate_sparse_vector(vector)
positive_vectors[i] = sort_sparse_vector(vector)
for i, vector in enumerate(negative_vectors):
validate_sparse_vector(vector)
negative_vectors[i] = sort_sparse_vector(vector)
mean_positive_vector = sparse_avg(positive_vectors)
if negative_vectors:
mean_negative_vector = sparse_avg(negative_vectors)
vector = merge_positive_and_negative_avg(mean_positive_vector, mean_negative_vector)
else:
vector = mean_positive_vector
return vector
@staticmethod
def _recommend_average_multi(
positive_vectors: list[list[list[float]]], negative_vectors: list[list[list[float]]]
) -> list[list[float]]:
recommend_vector = [vector for multi_vector in positive_vectors for vector in multi_vector]
if len(negative_vectors) > 0:
for multi_vector in negative_vectors:
for vector in multi_vector:
recommend_vector.append([-value for value in vector])
return recommend_vector
def _construct_recommend_query(
self,
positive: Optional[Sequence[models.VectorInput]] = None,
negative: Optional[Sequence[models.VectorInput]] = None,
query_filter: Optional[types.Filter] = None,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
strategy: Optional[types.RecommendStrategy] = None,
) -> tuple[
Union[RecoQuery, SparseRecoQuery, MultiRecoQuery, models.SparseVector, types.NumpyArray],
types.Filter,
]:
strategy = strategy if strategy is not None else types.RecommendStrategy.AVERAGE_VECTOR
(
positive_vectors,
negative_vectors,
sparse_positive_vectors,
sparse_negative_vectors,
multi_positive_vectors,
multi_negative_vectors,
edited_query_filter,
) = self._preprocess_recommend_input(
positive,
negative,
strategy,
query_filter,
using,
lookup_from_collection,
lookup_from_vector_name,
)
if strategy == types.RecommendStrategy.AVERAGE_VECTOR:
# Validate input
if positive_vectors:
query_vector = self._recommend_average_dense(
positive_vectors,
negative_vectors,
)
elif sparse_positive_vectors:
query_vector = self._recommend_average_sparse(
sparse_positive_vectors,
sparse_negative_vectors,
)
elif multi_positive_vectors:
query_vector = self._recommend_average_multi(
multi_positive_vectors, multi_negative_vectors
)
else:
raise ValueError("No positive examples given with 'average_vector' strategy")
elif (
strategy == types.RecommendStrategy.BEST_SCORE
or strategy == types.RecommendStrategy.SUM_SCORES
):
if positive_vectors or negative_vectors:
query_vector = RecoQuery(
positive=positive_vectors,
negative=negative_vectors,
strategy=strategy,
)
elif sparse_positive_vectors or sparse_negative_vectors:
query_vector = SparseRecoQuery(
positive=sparse_positive_vectors,
negative=sparse_negative_vectors,
strategy=strategy,
)
elif multi_positive_vectors or multi_negative_vectors:
query_vector = MultiRecoQuery(
positive=multi_positive_vectors,
negative=multi_negative_vectors,
strategy=strategy,
)
else:
raise ValueError(
f"No positive or negative examples given with '{strategy}' strategy"
)
else:
raise ValueError(
f"strategy `{strategy}` is not a valid strategy, choose one from {types.RecommendStrategy}"
)
return query_vector, edited_query_filter
def recommend(
self,
positive: Optional[Sequence[models.VectorInput]] = None,
negative: Optional[Sequence[models.VectorInput]] = None,
query_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: int = 0,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
strategy: Optional[types.RecommendStrategy] = None,
) -> list[models.ScoredPoint]:
query_vector, edited_query_filter = self._construct_recommend_query(
positive,
negative,
query_filter,
using,
lookup_from_collection,
lookup_from_vector_name,
strategy,
)
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
return self.search(
query_vector=(search_in_vector_name, query_vector),
query_filter=edited_query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
def recommend_groups(
self,
group_by: str,
positive: Optional[Sequence[models.VectorInput]] = None,
negative: Optional[Sequence[models.VectorInput]] = None,
query_filter: Optional[models.Filter] = None,
limit: int = 10,
group_size: int = 1,
score_threshold: Optional[float] = None,
with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
with_lookup_collection: Optional["LocalCollection"] = None,
strategy: Optional[types.RecommendStrategy] = None,
) -> types.GroupsResult:
strategy = strategy if strategy is not None else types.RecommendStrategy.AVERAGE_VECTOR
query_vector, edited_query_filter = self._construct_recommend_query(
positive,
negative,
query_filter,
using,
lookup_from_collection,
lookup_from_vector_name,
strategy,
)
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
return self.search_groups(
query_vector=(search_in_vector_name, query_vector),
query_filter=edited_query_filter,
group_by=group_by,
group_size=group_size,
limit=limit,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
with_lookup=with_lookup,
with_lookup_collection=with_lookup_collection,
)
def search_matrix_offsets(
self,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
) -> types.SearchMatrixOffsetsResponse:
ids, all_scores = self._search_distance_matrix(
query_filter=query_filter, limit=limit, sample=sample, using=using
)
offsets_row = []
offsets_col = []
offset_by_id = {point_id: idx for idx, point_id in enumerate(ids)}
for row_offset, scored_points in enumerate(all_scores):
for scored_point in scored_points:
offsets_row.append(row_offset)
offsets_col.append(offset_by_id[scored_point.id])
# flatten the scores
scores = []
for sample_scores in all_scores:
for score in sample_scores:
scores.append(score.score)
return types.SearchMatrixOffsetsResponse(
offsets_row=offsets_row,
offsets_col=offsets_col,
scores=scores,
ids=ids,
)
def search_matrix_pairs(
self,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
) -> types.SearchMatrixPairsResponse:
ids, all_scores = self._search_distance_matrix(
query_filter=query_filter, limit=limit, sample=sample, using=using
)
pairs = []
for sample_id, sample_scores in list(zip(ids, all_scores)):
for sample_score in sample_scores:
pairs.append(
types.SearchMatrixPair(
a=sample_id, b=sample_score.id, score=sample_score.score
)
)
return types.SearchMatrixPairsResponse(
pairs=pairs,
)
def _search_distance_matrix(
self,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
) -> tuple[list[ExtendedPointId], list[list[ScoredPoint]]]:
samples: list[ScoredPoint] = []
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
# Sample random points from the whole collection to filter out the ones without vectors
# TODO: use search_filter once with have an HasVector like condition
candidates = self._sample_randomly(
len(self.ids), query_filter, False, search_in_vector_name
)
for candidate in candidates:
# check if enough samples are collected
if len(samples) == sample:
break
# check if the candidate has a vector
if candidate.vector is not None:
samples.append(candidate)
# can't build a matrix with less than 2 results
if len(samples) < 2:
return [], []
# sort samples by id
samples = sorted(samples, key=lambda x: x.id)
# extract the ids
ids = [sample.id for sample in samples]
scores: list[list[ScoredPoint]] = []
# Query `limit` neighbors for each sample
for sampled_id_index, sampled in enumerate(samples):
ids_to_includes = [x for (i, x) in enumerate(ids) if i != sampled_id_index]
sampling_filter = _include_ids_in_filter(query_filter, ids_to_includes)
sampled_vector = sampled.vector
search_vector = (
sampled_vector[search_in_vector_name]
if isinstance(sampled_vector, dict)
else sampled_vector
)
samples_scores = self.search(
query_vector=(search_in_vector_name, search_vector),
query_filter=sampling_filter,
limit=limit,
with_payload=False,
with_vectors=False,
)
scores.append(samples_scores)
return ids, scores
@staticmethod
def _preprocess_target(
target: Optional[models.VectorInput], collection: "LocalCollection", vector_name: str
) -> tuple[models.Vector, Optional[types.PointId]]:
if isinstance(target, get_args(types.PointId)):
if target not in collection.ids:
raise ValueError(f"Point {target} is not found in the collection")
idx = collection.ids[target]
if vector_name in collection.vectors:
target_vector = collection.vectors[vector_name][idx].tolist()
elif vector_name in collection.sparse_vectors:
target_vector = collection.sparse_vectors[vector_name][idx]
else:
target_vector = collection.multivectors[vector_name][idx].tolist()
return target_vector, target
return target, None
def _preprocess_context(
self, context: list[models.ContextPair], collection: "LocalCollection", vector_name: str
) -> tuple[
list[ContextPair], list[SparseContextPair], list[MultiContextPair], list[types.PointId]
]:
mentioned_ids = []
dense_context_vectors = []
sparse_context_vectors = []
multi_context_vectors = []
for pair in context:
pair_vectors = []
for example in [pair.positive, pair.negative]:
if isinstance(example, get_args(types.PointId)):
if example not in collection.ids:
raise ValueError(f"Point {example} is not found in the collection")
idx = collection.ids[example]
if vector_name in collection.vectors:
vector = collection.vectors[vector_name][idx].tolist()
elif vector_name in collection.sparse_vectors:
vector = collection.sparse_vectors[vector_name][idx]
else:
vector = collection.multivectors[vector_name][idx].tolist()
pair_vectors.append(vector)
if collection == self:
mentioned_ids.append(example)
else:
pair_vectors.append(example)
if isinstance(pair_vectors[0], SparseVector) and isinstance(
pair_vectors[1], SparseVector
):
sparse_context_vectors.append(
SparseContextPair(positive=pair_vectors[0], negative=pair_vectors[1])
)
elif isinstance(pair_vectors[0], list) and isinstance(pair_vectors[1], list):
if isinstance(pair_vectors[0][0], float) and isinstance(pair_vectors[1][0], float):
dense_context_vectors.append(
ContextPair(positive=pair_vectors[0], negative=pair_vectors[1])
)
elif isinstance(pair_vectors[0][0], list) and isinstance(pair_vectors[1][0], list):
multi_context_vectors.append(
MultiContextPair(positive=pair_vectors[0], negative=pair_vectors[1])
)
else:
raise ValueError(
"Context example pair must be of the same type: dense, sparse or multi vectors"
)
else:
raise ValueError(
"Context example pair must be of the same type: dense, sparse or multi vectors"
)
if (
sum(
[
bool(sparse_context_vectors),
bool(dense_context_vectors),
bool(multi_context_vectors),
]
)
> 1
):
raise ValueError(
"All context example pairs must be either dense or sparse or multi vectors"
)
return dense_context_vectors, sparse_context_vectors, multi_context_vectors, mentioned_ids
def _preprocess_discover(
self,
target: Optional[models.VectorInput] = None,
context: Optional[Sequence[models.ContextPair]] = None,
query_filter: Optional[types.Filter] = None,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
) -> tuple[
Optional[models.Vector],
list[ContextPair],
list[SparseContextPair],
list[MultiContextPair],
types.Filter,
]:
if target is None and not context:
raise ValueError("No target or context given")
collection = lookup_from_collection if lookup_from_collection is not None else self
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
vector_name = (
lookup_from_vector_name
if lookup_from_vector_name is not None
else search_in_vector_name
)
target_vector, target_id = self._preprocess_target(target, collection, vector_name)
context = list(context) if context is not None else []
dense_context_vectors, sparse_context_vectors, multi_context_vectors, mentioned_ids = (
self._preprocess_context(context, collection, vector_name)
)
if target_id is not None and collection == self:
mentioned_ids.append(target_id)
# Edit query filter
query_filter = ignore_mentioned_ids_filter(query_filter, mentioned_ids)
return (
target_vector,
dense_context_vectors,
sparse_context_vectors,
multi_context_vectors,
query_filter,
) # type: ignore
def discover(
self,
target: Optional[models.VectorInput] = None,
context: Optional[Sequence[models.ContextPair]] = None,
query_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: int = 0,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
using: Optional[str] = None,
lookup_from_collection: Optional["LocalCollection"] = None,
lookup_from_vector_name: Optional[str] = None,
score_threshold: Optional[float] = None,
) -> list[models.ScoredPoint]:
(
target_vector,
dense_context_vectors,
sparse_context_vectors,
multi_context_vectors,
edited_query_filter,
) = self._preprocess_discover(
target,
context,
query_filter,
using,
lookup_from_collection,
lookup_from_vector_name,
)
query_vector: Union[DenseQueryVector, SparseQueryVector, MultiQueryVector]
# Discovery search
if target_vector is not None:
if isinstance(target_vector, list):
if isinstance(target_vector[0], float):
query_vector = DiscoveryQuery(target_vector, dense_context_vectors)
else:
query_vector = MultiDiscoveryQuery(target_vector, multi_context_vectors)
elif isinstance(target_vector, SparseVector):
query_vector = SparseDiscoveryQuery(target_vector, sparse_context_vectors)
else:
raise ValueError("Unsupported target vector type")
# Context search
elif target_vector is None and dense_context_vectors:
query_vector = ContextQuery(dense_context_vectors)
elif target_vector is None and sparse_context_vectors:
query_vector = SparseContextQuery(sparse_context_vectors)
elif target_vector is None and multi_context_vectors:
query_vector = MultiContextQuery(multi_context_vectors)
else:
raise ValueError("No target or context given")
search_in_vector_name = using if using is not None else DEFAULT_VECTOR_NAME
return self.search(
query_vector=(search_in_vector_name, query_vector),
query_filter=edited_query_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
@classmethod
def _universal_id(cls, point_id: models.ExtendedPointId) -> tuple[str, int]:
if isinstance(point_id, str):
return point_id, 0
elif isinstance(point_id, int):
return "", point_id
raise TypeError(f"Incompatible point id type: {type(point_id)}")
def scroll(
self,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
order_by: Optional[types.OrderBy] = None,
offset: Optional[types.PointId] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> tuple[list[types.Record], Optional[types.PointId]]:
if len(self.ids) == 0:
return [], None
if order_by is None:
# order by id (default)
return self._scroll_by_id(
scroll_filter=scroll_filter,
limit=limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
)
# order by value
if offset is not None:
raise ValueError(
"Offset is not supported in conjunction with `order_by` scroll parameter"
)
return self._scroll_by_value(
order_by=order_by,
scroll_filter=scroll_filter,
limit=limit,
with_payload=with_payload,
with_vectors=with_vectors,
)
def count(self, count_filter: Optional[types.Filter] = None) -> models.CountResult:
mask = self._payload_and_non_deleted_mask(count_filter)
return models.CountResult(count=np.count_nonzero(mask))
def _scroll_by_id(
self,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: Optional[types.PointId] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> tuple[list[types.Record], Optional[types.PointId]]:
sorted_ids = sorted(self.ids.items(), key=lambda x: self._universal_id(x[0]))
result: list[types.Record] = []
mask = self._payload_and_non_deleted_mask(scroll_filter)
for point_id, idx in sorted_ids:
if offset is not None and self._universal_id(point_id) < self._universal_id(offset):
continue
if len(result) >= limit + 1:
break
if not mask[idx]:
continue
result.append(
models.Record(
id=point_id,
payload=self._get_payload(idx, with_payload),
vector=self._get_vectors(idx, with_vectors),
)
)
if len(result) > limit:
return result[:limit], result[limit].id
else:
return result, None
def _scroll_by_value(
self,
order_by: types.OrderBy,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> tuple[list[types.Record], Optional[types.PointId]]:
if isinstance(order_by, grpc.OrderBy):
order_by = GrpcToRest.convert_order_by(order_by)
if isinstance(order_by, str):
order_by = models.OrderBy(key=order_by)
value_and_ids: list[tuple[OrderValue, ExtendedPointId, int]] = []
for external_id, internal_id in self.ids.items():
# get order-by values for id
payload_values = value_by_key(self.payload[internal_id], order_by.key)
if payload_values is None:
continue
# replicate id for each value it has
for value in payload_values:
ordering_value = to_order_value(value)
if ordering_value is not None:
value_and_ids.append((ordering_value, external_id, internal_id))
direction = order_by.direction if order_by.direction is not None else models.Direction.ASC
should_reverse = direction == models.Direction.DESC
# sort by value only
value_and_ids.sort(key=lambda x: x[0], reverse=should_reverse)
mask = self._payload_and_non_deleted_mask(scroll_filter)
result: list[types.Record] = []
start_from = to_order_value(order_by.start_from)
# dedup by (value, external_id)
seen_tuples: set[tuple[OrderValue, ExtendedPointId]] = set()
for value, external_id, internal_id in value_and_ids:
if start_from is not None:
if direction == models.Direction.ASC:
if value < start_from:
continue
elif direction == models.Direction.DESC:
if value > start_from:
continue
if len(result) >= limit:
break
if not mask[internal_id]:
continue
if (value, external_id) in seen_tuples:
continue
seen_tuples.add((value, external_id))
result.append(
models.Record(
id=external_id,
payload=self._get_payload(internal_id, with_payload),
vector=self._get_vectors(internal_id, with_vectors),
order_value=value,
)
)
return result, None
def _sample_randomly(
self,
limit: int,
query_filter: Optional[types.Filter],
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
) -> list[types.ScoredPoint]:
mask = self._payload_and_non_deleted_mask(query_filter)
random_scores = np.random.rand(len(self.ids))
random_order = np.argsort(random_scores)
result: list[types.ScoredPoint] = []
for idx in random_order:
if len(result) >= limit:
break
if not mask[idx]:
continue
point_id = self.ids_inv[idx]
scored_point = construct(
models.ScoredPoint,
id=point_id,
score=float(1.0),
version=0,
payload=self._get_payload(idx, with_payload),
vector=self._get_vectors(idx, with_vectors),
)
result.append(scored_point)
return result
def _search_with_mmr(
self,
query_vector: Union[
list[float],
SparseVector,
list[list[float]],
],
mmr: types.Mmr,
using: Optional[str],
query_filter: Optional[types.Filter] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
) -> list[models.ScoredPoint]:
search_limit = mmr.candidates_limit if mmr.candidates_limit is not None else limit
using = using or DEFAULT_VECTOR_NAME
search_results = self.search(
query_vector=(using, query_vector),
query_filter=query_filter,
limit=search_limit,
offset=offset,
with_payload=with_payload,
with_vectors=with_vectors,
score_threshold=score_threshold,
)
diversity = mmr.diversity if mmr.diversity is not None else 0.5
lambda_ = 1.0 - diversity
return self._mmr(search_results, query_vector, using, lambda_, limit)
def _mmr(
self,
search_results: list[models.ScoredPoint],
query_vector: Union[list[float], SparseVector, list[list[float]]],
using: str,
lambda_: float,
limit: int,
) -> list[models.ScoredPoint]:
if lambda_ < 0.0 or lambda_ > 1.0:
raise ValueError("MMR lambda must be between 0.0 and 1.0")
if not search_results:
return []
# distance matrix between candidates
candidate_distance_matrix: dict[
tuple[models.ExtendedPointId, models.ExtendedPointId], float
] = {}
candidate_vectors = []
candidate_ids = []
candidates: list[models.ScoredPoint] = []
# `with_vectors` might be different from `using`, thus we need to retrieve vectors explicitly
for point in search_results:
idx = self.ids[point.id]
candidate_vector = self._get_vectors(idx, [using])
if isinstance(candidate_vector, dict):
candidate_vector = candidate_vector.get(using)
candidate_vectors.append(candidate_vector)
candidate_ids.append(point.id)
candidates.append(point)
query_raw_similarities: dict[models.ExtendedPointId, float] = {}
id_to_point = {point.id: point for point in search_results}
distance = (
self.get_vector_params(using).distance if using not in self.sparse_vectors else None
)
query_vector = (
np.array(query_vector)
if not isinstance(query_vector, SparseVector)
else sort_sparse_vector(query_vector)
)
for candidate_id, candidate_vector in zip(candidate_ids, candidate_vectors):
if isinstance(candidate_vector, list) and isinstance(candidate_vector[0], float):
candidate_vector_np = np.array(candidate_vector)
if not isinstance(candidate_vectors, np.ndarray):
candidate_vectors = np.array(candidate_vectors)
nearest_candidates = calculate_distance_core(
candidate_vector_np, candidate_vectors, distance
).tolist()
query_raw_similarities[candidate_id] = calculate_distance_core(
query_vector, candidate_vector_np[np.newaxis, :], distance
).tolist()[0]
elif isinstance(candidate_vector, SparseVector):
nearest_candidates = calculate_distance_sparse(
candidate_vector,
candidate_vectors,
empty_is_zero=True,
).tolist()
query_raw_similarities[candidate_id] = calculate_distance_sparse(
query_vector,
[candidate_vector],
empty_is_zero=True,
).tolist()[0]
else:
candidate_vector_np = np.array(candidate_vector)
if not isinstance(candidate_vectors[0], np.ndarray):
candidate_vectors = [np.array(multivec) for multivec in candidate_vectors]
nearest_candidates = calculate_multi_distance_core(
candidate_vector_np,
candidate_vectors,
distance,
).tolist()
query_raw_similarities[candidate_id] = calculate_multi_distance_core(
query_vector, [candidate_vector_np], distance
).tolist()[0]
for i in range(len(candidate_ids)):
candidate_distance_matrix[(candidate_id, candidate_ids[i])] = nearest_candidates[i]
selected = [candidate_ids[0]]
pending = candidate_ids[1:]
while len(selected) < limit and len(pending) > 0:
mmr_scores = []
for pending_id in pending:
relevance_score = query_raw_similarities[pending_id]
similarities_to_selected = []
for selected_id in selected:
similarities_to_selected.append(
candidate_distance_matrix[(pending_id, selected_id)]
)
max_similarity_to_selected = max(similarities_to_selected)
mmr_score = (
lambda_ * relevance_score - (1.0 - lambda_) * max_similarity_to_selected
)
mmr_scores.append(mmr_score)
if all(
[np.isneginf(sim) for sim in mmr_scores]
): # no points left passing score threshold
break
best_candidate_index = np.argmax(mmr_scores).item()
selected.append(pending.pop(best_candidate_index))
return [id_to_point[candidate_id] for candidate_id in selected]
def _rescore_with_formula(
self,
query: models.FormulaQuery,
prefetches_results: list[list[models.ScoredPoint]],
limit: int,
with_payload: Union[bool, Sequence[str], types.PayloadSelector],
with_vectors: Union[bool, Sequence[str]],
) -> list[models.ScoredPoint]:
# collect prefetches in vec of dicts for faster lookup
prefetches_scores = [
dict((point.id, point.score) for point in prefetch) for prefetch in prefetches_results
]
defaults = query.defaults or {}
points_to_rescore: set[models.ExtendedPointId] = set()
for prefetch in prefetches_results:
for point in prefetch:
points_to_rescore.add(point.id)
# Evaluate formula for each point
rescored: list[models.ScoredPoint] = []
for point_id in points_to_rescore:
internal_id = self.ids[point_id]
payload = self._get_payload(internal_id, True) or {}
has_vector = self._calculate_has_vector(internal_id)
score = evaluate_expression(
expression=query.formula,
point_id=point_id,
scores=prefetches_scores,
payload=payload,
has_vector=has_vector,
defaults=defaults,
)
# Turn score into np.float32 to match core
with warnings.catch_warnings():
warnings.simplefilter("ignore")
score_f32 = np.float32(score)
if not np.isfinite(score_f32):
raise_non_finite_error(f"{score} as f32 = {score_f32}")
point = construct(
models.ScoredPoint,
id=point_id,
score=float(score_f32),
version=0,
payload=self._get_payload(internal_id, with_payload),
vector=self._get_vectors(internal_id, with_vectors),
)
rescored.append(point)
rescored.sort(key=lambda x: x.score, reverse=True)
return rescored[:limit]
def _update_point(self, point: models.PointStruct) -> None:
if isinstance(point.id, uuid.UUID):
point.id = str(point.id)
idx = self.ids[point.id]
self.payload[idx] = deepcopy(
to_jsonable_python(point.payload) if point.payload is not None else {}
)
if isinstance(point.vector, list):
vectors = {DEFAULT_VECTOR_NAME: point.vector}
else:
vectors = point.vector
# dense vectors
for vector_name, _named_vectors in self.vectors.items():
vector = vectors.get(vector_name)
if vector is not None:
params = self.get_vector_params(vector_name)
assert not np.isnan(vector).any(), "Vector contains NaN values"
if params.distance == models.Distance.COSINE:
norm = np.linalg.norm(vector)
vector = np.array(vector) / norm if norm > EPSILON else vector
self.vectors[vector_name][idx] = vector
self.deleted_per_vector[vector_name][idx] = 0
else:
self.deleted_per_vector[vector_name][idx] = 1
# sparse vectors
for vector_name, _named_vectors in self.sparse_vectors.items():
vector = vectors.get(vector_name)
was_deleted = self.deleted_per_vector[vector_name][idx]
if not was_deleted:
previous_vector = self.sparse_vectors[vector_name][idx]
self._update_idf_remove(previous_vector, vector_name)
if vector is not None:
self.sparse_vectors[vector_name][idx] = vector
self.deleted_per_vector[vector_name][idx] = 0
self._update_idf_append(vector, vector_name)
else:
self.deleted_per_vector[vector_name][idx] = 1
# multivectors
for vector_name, _named_vector in self.multivectors.items():
vector = vectors.get(vector_name)
if vector is not None:
params = self.get_vector_params(vector_name)
assert not np.isnan(vector).any(), "Vector contains NaN values"
if params.distance == models.Distance.COSINE:
vector_norm = np.linalg.norm(vector, axis=-1)[:, np.newaxis]
vector /= np.where(vector_norm != 0.0, vector_norm, EPSILON)
self.multivectors[vector_name][idx] = np.array(vector)
self.deleted_per_vector[vector_name][idx] = 0
else:
self.deleted_per_vector[vector_name][idx] = 1
self.deleted[idx] = 0
def _add_point(self, point: models.PointStruct) -> None:
idx = len(self.ids)
if isinstance(point.id, uuid.UUID):
point.id = str(point.id)
self.ids[point.id] = idx
self.ids_inv.append(point.id)
self.payload.append(
deepcopy(to_jsonable_python(point.payload) if point.payload is not None else {})
)
assert len(self.payload) == len(self.ids_inv), "Payload and ids_inv must be the same size"
self.deleted = np.append(self.deleted, 0)
if isinstance(point.vector, list):
vectors = {DEFAULT_VECTOR_NAME: point.vector}
else:
vectors = point.vector
# dense vectors
for vector_name, named_vectors in self.vectors.items():
vector = vectors.get(vector_name)
if named_vectors.shape[0] <= idx:
named_vectors = np.resize(named_vectors, (idx * 2 + 1, named_vectors.shape[1]))
if vector is None:
# Add fake vector and mark as removed
fake_vector = np.ones(named_vectors.shape[1])
named_vectors[idx] = fake_vector
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 1
)
else:
vector_np = np.array(vector, dtype=np.float32)
assert not np.isnan(vector_np).any(), "Vector contains NaN values"
params = self.get_vector_params(vector_name)
if params.distance == models.Distance.COSINE:
norm = np.linalg.norm(vector_np)
vector_np = vector_np / norm if norm > EPSILON else vector_np
named_vectors[idx] = vector_np
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 0
)
self.vectors[vector_name] = named_vectors
# sparse vectors
for vector_name, named_vectors in self.sparse_vectors.items():
vector = vectors.get(vector_name)
if len(named_vectors) <= idx:
diff = idx - len(named_vectors) + 1
for _ in range(diff):
named_vectors.append(empty_sparse_vector())
if vector is None:
# Add fake vector and mark as removed
fake_vector = empty_sparse_vector()
named_vectors[idx] = fake_vector
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 1
)
else:
named_vectors[idx] = vector
self._update_idf_append(vector, vector_name)
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 0
)
self.sparse_vectors[vector_name] = named_vectors
# multi vectors
for vector_name, named_vectors in self.multivectors.items():
vector = vectors.get(vector_name)
if len(named_vectors) <= idx:
diff = idx - len(named_vectors) + 1
for _ in range(diff):
named_vectors.append(np.array([]))
if vector is None:
# Add fake vector and mark as removed
named_vectors[idx] = np.array([])
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 1
)
else:
vector_np = np.array(vector, dtype=np.float32)
assert not np.isnan(vector_np).any(), "Vector contains NaN values"
params = self.get_vector_params(vector_name)
if params.distance == models.Distance.COSINE:
vector_norm = np.linalg.norm(vector_np, axis=-1)[:, np.newaxis]
vector_np /= np.where(vector_norm != 0.0, vector_norm, EPSILON)
named_vectors[idx] = vector_np
self.deleted_per_vector[vector_name] = np.append(
self.deleted_per_vector[vector_name], 0
)
self.multivectors[vector_name] = named_vectors
def _upsert_point(
self,
point: models.PointStruct,
update_filter: Optional[types.Filter] = None,
) -> None:
if isinstance(point.id, str):
# try to parse as UUID
try:
_uuid = uuid.UUID(point.id)
except ValueError as e:
raise ValueError(f"Point id {point.id} is not a valid UUID") from e
if isinstance(point.vector, dict):
updated_sparse_vectors = {}
for vector_name, vector in point.vector.items():
if vector_name not in self._all_vectors_keys:
raise ValueError(f"Wrong input: Not existing vector name error: {vector_name}")
if isinstance(vector, SparseVector):
# validate sparse vector
validate_sparse_vector(vector)
# sort sparse vector by indices before persistence
updated_sparse_vectors[vector_name] = sort_sparse_vector(vector)
# update point.vector with the modified values after iteration
point.vector.update(updated_sparse_vectors)
else:
vector_names = list(self.vectors.keys())
multivector_names = list(self.multivectors.keys())
if (vector_names and vector_names != [""]) or (
multivector_names and multivector_names != [""]
):
raise ValueError(
"Wrong input: Unnamed vectors are not allowed when a collection has named vectors or multivectors: "
f"{vector_names}, {multivector_names}"
)
if not self.vectors and not self.multivectors:
raise ValueError("Wrong input: Not existing vector name error")
if isinstance(point.id, uuid.UUID):
point.id = str(point.id)
if point.id in self.ids:
idx = self.ids[point.id]
if not self.deleted[idx] and update_filter is not None:
has_vector = {}
for vector_name, deleted in self.deleted_per_vector.items():
if not deleted[idx]:
has_vector[vector_name] = True
if not check_filter(
update_filter, self.payload[idx], self.ids_inv[idx], has_vector
):
return None
self._update_point(point)
else:
self._add_point(point)
if self.storage is not None:
self.storage.persist(point)
def upsert(
self,
points: Union[Sequence[models.PointStruct], models.Batch],
update_filter: Optional[types.Filter] = None,
) -> None:
if isinstance(points, list):
for point in points:
self._upsert_point(point, update_filter=update_filter)
elif isinstance(points, models.Batch):
batch = points
if isinstance(batch.vectors, list):
vectors = {DEFAULT_VECTOR_NAME: batch.vectors}
else:
vectors = batch.vectors
for idx, point_id in enumerate(batch.ids):
payload = None
if batch.payloads is not None:
payload = batch.payloads[idx]
vector = {name: v[idx] for name, v in vectors.items()}
self._upsert_point(
models.PointStruct(
id=point_id,
payload=payload,
vector=vector,
),
update_filter=update_filter,
)
else:
raise ValueError(f"Unsupported type: {type(points)}")
if len(self.ids) > self.LARGE_DATA_THRESHOLD:
show_warning_once(
f"Local mode is not recommended for collections with more than {self.LARGE_DATA_THRESHOLD:,} "
f"points. Current collection contains {len(self.ids)} points. "
"Consider using Qdrant in Docker or Qdrant Cloud for better performance with large datasets.",
category=UserWarning,
idx="large-local-collection",
stacklevel=6,
)
def _update_named_vectors(
self, idx: int, vectors: dict[str, Union[list[float], SparseVector, list[list[float]]]]
) -> None:
for vector_name, vector in vectors.items():
if vector_name not in self._all_vectors_keys:
raise ValueError(f"Wrong input: Not existing vector name error: {vector_name}")
self.deleted_per_vector[vector_name][idx] = 0
if isinstance(vector, SparseVector):
validate_sparse_vector(vector)
old_vector = self.sparse_vectors[vector_name][idx]
self._update_idf_remove(old_vector, vector_name)
new_vector = sort_sparse_vector(vector)
self.sparse_vectors[vector_name][idx] = new_vector
self._update_idf_append(new_vector, vector_name)
continue
vector_np = np.array(vector, dtype=np.float32)
assert not np.isnan(vector_np).any(), "Vector contains NaN values"
params = self.get_vector_params(vector_name)
if vector_name in self.vectors:
if params.distance == models.Distance.COSINE:
norm = np.linalg.norm(vector_np)
vector_np = vector_np / norm if norm > EPSILON else vector_np
self.vectors[vector_name][idx] = vector_np
else:
if params.distance == models.Distance.COSINE:
vector_norm = np.linalg.norm(vector_np, axis=-1)[:, np.newaxis]
vector_np /= np.where(vector_norm != 0.0, vector_norm, EPSILON)
self.multivectors[vector_name][idx] = vector_np
def update_vectors(
self, points: Sequence[types.PointVectors], update_filter: Optional[types.Filter] = None
) -> None:
for point in points:
point_id = str(point.id) if isinstance(point.id, uuid.UUID) else point.id
idx = self.ids[point_id]
vector_struct = point.vector
if isinstance(vector_struct, list):
fixed_vectors = {DEFAULT_VECTOR_NAME: vector_struct}
else:
fixed_vectors = vector_struct
if not self.deleted[idx] and update_filter is not None:
has_vector = {}
for vector_name, deleted in self.deleted_per_vector.items():
if not deleted[idx]:
has_vector[vector_name] = True
if not check_filter(
update_filter, self.payload[idx], self.ids_inv[idx], has_vector
):
return None
self._update_named_vectors(idx, fixed_vectors)
self._persist_by_id(point_id)
def delete_vectors(
self,
vectors: Sequence[str],
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> None:
ids = self._selector_to_ids(selector)
for point_id in ids:
idx = self.ids[point_id]
for vector_name in vectors:
self.deleted_per_vector[vector_name][idx] = 1
self._persist_by_id(point_id)
def _delete_ids(self, ids: list[types.PointId]) -> None:
for point_id in ids:
if point_id in self.ids:
idx = self.ids[point_id]
self.deleted[idx] = 1
if self.storage is not None:
for point_id in ids:
if point_id in self.ids:
self.storage.delete(point_id)
def _filter_to_ids(self, delete_filter: types.Filter) -> list[models.ExtendedPointId]:
mask = self._payload_and_non_deleted_mask(delete_filter)
ids = [point_id for point_id, idx in self.ids.items() if mask[idx]]
return ids
def _selector_to_ids(
self,
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> list[models.ExtendedPointId]:
if isinstance(selector, list):
return [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in selector]
elif isinstance(selector, models.Filter):
return self._filter_to_ids(selector)
elif isinstance(selector, models.PointIdsList):
return [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in selector.points]
elif isinstance(selector, models.FilterSelector):
return self._filter_to_ids(selector.filter)
else:
raise ValueError(f"Unsupported selector type: {type(selector)}")
def delete(
self,
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> None:
ids = self._selector_to_ids(selector)
self._delete_ids(ids)
def _persist_by_id(self, point_id: models.ExtendedPointId) -> None:
if self.storage is not None:
idx = self.ids[point_id]
point = models.PointStruct(
id=point_id,
payload=self._get_payload(idx, with_payload=True, return_copy=False),
vector=self._get_vectors(idx, with_vectors=True),
)
self.storage.persist(point)
def set_payload(
self,
payload: models.Payload,
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
key: Optional[str] = None,
) -> None:
ids = self._selector_to_ids(selector)
jsonable_payload = deepcopy(to_jsonable_python(payload))
keys: Optional[list[JsonPathItem]] = parse_json_path(key) if key is not None else None
for point_id in ids:
idx = self.ids[point_id]
if keys is None:
self.payload[idx] = {**self.payload[idx], **jsonable_payload}
else:
if self.payload[idx] is not None:
set_value_by_key(payload=self.payload[idx], value=jsonable_payload, keys=keys)
self._persist_by_id(point_id)
def overwrite_payload(
self,
payload: models.Payload,
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> None:
ids = self._selector_to_ids(selector)
for point_id in ids:
idx = self.ids[point_id]
self.payload[idx] = deepcopy(to_jsonable_python(payload)) or {}
self._persist_by_id(point_id)
def delete_payload(
self,
keys: Sequence[str],
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> None:
ids = self._selector_to_ids(selector)
for point_id in ids:
idx = self.ids[point_id]
for key in keys:
if key in self.payload[idx]:
self.payload[idx].pop(key)
self._persist_by_id(point_id)
def clear_payload(
self,
selector: Union[
models.Filter,
list[models.ExtendedPointId],
models.FilterSelector,
models.PointIdsList,
],
) -> None:
ids = self._selector_to_ids(selector)
for point_id in ids:
idx = self.ids[point_id]
self.payload[idx] = {}
self._persist_by_id(point_id)
def batch_update_points(
self,
update_operations: Sequence[types.UpdateOperation],
) -> None:
for update_op in update_operations:
if isinstance(update_op, models.UpsertOperation):
upsert_struct = update_op.upsert
if isinstance(upsert_struct, models.PointsBatch):
self.upsert(upsert_struct.batch, update_filter=upsert_struct.update_filter)
elif isinstance(upsert_struct, models.PointsList):
self.upsert(upsert_struct.points, update_filter=upsert_struct.update_filter)
else:
raise ValueError(f"Unsupported upsert type: {type(update_op.upsert)}")
elif isinstance(update_op, models.DeleteOperation):
self.delete(update_op.delete)
elif isinstance(update_op, models.SetPayloadOperation):
points_selector = update_op.set_payload.points or update_op.set_payload.filter
self.set_payload(
update_op.set_payload.payload, points_selector, update_op.set_payload.key
)
elif isinstance(update_op, models.OverwritePayloadOperation):
points_selector = (
update_op.overwrite_payload.points or update_op.overwrite_payload.filter
)
self.overwrite_payload(update_op.overwrite_payload.payload, points_selector)
elif isinstance(update_op, models.DeletePayloadOperation):
points_selector = (
update_op.delete_payload.points or update_op.delete_payload.filter
)
self.delete_payload(update_op.delete_payload.keys, points_selector)
elif isinstance(update_op, models.ClearPayloadOperation):
self.clear_payload(update_op.clear_payload)
elif isinstance(update_op, models.UpdateVectorsOperation):
update_vectors = update_op.update_vectors
self.update_vectors(
update_vectors.points, update_filter=update_vectors.update_filter
)
elif isinstance(update_op, models.DeleteVectorsOperation):
points_selector = (
update_op.delete_vectors.points or update_op.delete_vectors.filter
)
self.delete_vectors(update_op.delete_vectors.vector, points_selector)
else:
raise ValueError(f"Unsupported update operation: {type(update_op)}")
def update_sparse_vectors_config(
self, vector_name: str, new_config: models.SparseVectorParams
) -> None:
if vector_name not in self.sparse_vectors:
raise ValueError(f"Vector {vector_name} does not exist in the collection")
self.config.sparse_vectors[vector_name] = new_config
def info(self) -> models.CollectionInfo:
return models.CollectionInfo(
status=models.CollectionStatus.GREEN,
optimizer_status=models.OptimizersStatusOneOf.OK,
indexed_vectors_count=0, # LocalCollection does not do indexing
points_count=self.count().count,
segments_count=1,
payload_schema={},
config=models.CollectionConfig(
params=models.CollectionParams(
vectors=self.config.vectors,
shard_number=self.config.shard_number,
replication_factor=self.config.replication_factor,
write_consistency_factor=self.config.write_consistency_factor,
on_disk_payload=self.config.on_disk_payload,
sparse_vectors=self.config.sparse_vectors,
),
hnsw_config=models.HnswConfig(
m=16,
ef_construct=100,
full_scan_threshold=10000,
),
wal_config=models.WalConfig(
wal_capacity_mb=32,
wal_segments_ahead=0,
),
optimizer_config=models.OptimizersConfig(
deleted_threshold=0.2,
vacuum_min_vector_number=1000,
default_segment_number=0,
indexing_threshold=20000,
flush_interval_sec=5,
max_optimization_threads=1,
),
quantization_config=None,
metadata=self.config.metadata,
),
)
def ignore_mentioned_ids_filter(
query_filter: Optional[types.Filter], mentioned_ids: list[types.PointId]
) -> types.Filter:
if len(mentioned_ids) == 0:
return query_filter
ignore_mentioned_ids = models.HasIdCondition(has_id=mentioned_ids)
if query_filter is None:
query_filter = models.Filter(must_not=[ignore_mentioned_ids])
else:
# as of mypy v1.11.0 mypy is complaining on deep-copied structures with None
query_filter = deepcopy(query_filter)
assert query_filter is not None # educating mypy
if query_filter.must_not is None:
query_filter.must_not = [ignore_mentioned_ids]
elif isinstance(query_filter.must_not, list):
query_filter.must_not.append(ignore_mentioned_ids)
else:
query_filter.must_not = [query_filter.must_not, ignore_mentioned_ids]
return query_filter
def _include_ids_in_filter(
query_filter: Optional[types.Filter], ids: list[types.PointId]
) -> types.Filter:
if len(ids) == 0:
return query_filter
include_ids = models.HasIdCondition(has_id=ids)
if query_filter is None:
query_filter = models.Filter(must=[include_ids])
else:
# as of mypy v1.11.0 mypy is complaining on deep-copied structures with None
query_filter = deepcopy(query_filter)
assert query_filter is not None # educating mypy
if query_filter.must is None:
query_filter.must = [include_ids]
elif isinstance(query_filter.must, list):
query_filter.must.append(include_ids)
else:
query_filter.must = [query_filter.must, include_ids]
return query_filter
def record_to_scored_point(record: types.Record) -> types.ScoredPoint:
return types.ScoredPoint(
id=record.id,
version=0,
score=1.0,
payload=record.payload,
vector=record.vector,
order_value=record.order_value,
)
def set_prefetch_limit_iteratively(
prefetch: Union[types.Prefetch, list[types.Prefetch]], limit: int
) -> None:
"""Set .limit on all nested Prefetch objects without recursion."""
stack: list[Union[types.Prefetch, list[types.Prefetch]]] = [prefetch]
while stack:
current = stack.pop()
if isinstance(current, list):
# add all Prefetch items in the list to the stack
stack.extend(current)
continue
# must be a Prefetch instance
current.limit = limit
# process its nested prefetch field
nested = current.prefetch
if nested is None:
continue
if isinstance(nested, list):
stack.extend(nested)
elif isinstance(nested, types.Prefetch):
stack.append(nested)
|
LocalCollection
|
python
|
encode__django-rest-framework
|
rest_framework/fields.py
|
{
"start": 1888,
"end": 7671
}
|
class ____(Exception):
"""
Built-in function signatures are not inspectable. This exception is raised
so the serializer can raise a helpful error message.
"""
pass
def is_simple_callable(obj):
"""
True if the object is a callable that takes no arguments.
"""
if not callable(obj):
return False
# Bail early since we cannot inspect built-in function signatures.
if inspect.isbuiltin(obj):
raise BuiltinSignatureError(
'Built-in function signatures are not inspectable. '
'Wrap the function call in a simple, pure Python function.')
if not (inspect.isfunction(obj) or inspect.ismethod(obj) or isinstance(obj, functools.partial)):
return False
sig = inspect.signature(obj)
params = sig.parameters.values()
return all(
param.kind == param.VAR_POSITIONAL or
param.kind == param.VAR_KEYWORD or
param.default != param.empty
for param in params
)
def get_attribute(instance, attrs):
"""
Similar to Python's built in `getattr(instance, attr)`,
but takes a list of nested attributes, instead of a single attribute.
Also accepts either attribute lookup on objects or dictionary lookups.
"""
for attr in attrs:
try:
if isinstance(instance, Mapping):
instance = instance[attr]
else:
instance = getattr(instance, attr)
except ObjectDoesNotExist:
return None
if is_simple_callable(instance):
try:
instance = instance()
except (AttributeError, KeyError) as exc:
# If we raised an Attribute or KeyError here it'd get treated
# as an omitted field in `Field.get_attribute()`. Instead we
# raise a ValueError to ensure the exception is not masked.
raise ValueError(f'Exception raised in callable attribute "{attr}"; original exception was: {exc}')
return instance
def to_choices_dict(choices):
"""
Convert choices into key/value dicts.
to_choices_dict([1]) -> {1: 1}
to_choices_dict([(1, '1st'), (2, '2nd')]) -> {1: '1st', 2: '2nd'}
to_choices_dict([('Group', ((1, '1st'), 2))]) -> {'Group': {1: '1st', 2: '2'}}
"""
# Allow single, paired or grouped choices style:
# choices = [1, 2, 3]
# choices = [(1, 'First'), (2, 'Second'), (3, 'Third')]
# choices = [('Category', ((1, 'First'), (2, 'Second'))), (3, 'Third')]
ret = {}
for choice in choices:
if not isinstance(choice, (list, tuple)):
# single choice
ret[choice] = choice
else:
key, value = choice
if isinstance(value, (list, tuple)):
# grouped choices (category, sub choices)
ret[key] = to_choices_dict(value)
else:
# paired choice (key, display value)
ret[key] = value
return ret
def flatten_choices_dict(choices):
"""
Convert a group choices dict into a flat dict of choices.
flatten_choices_dict({1: '1st', 2: '2nd'}) -> {1: '1st', 2: '2nd'}
flatten_choices_dict({'Group': {1: '1st', 2: '2nd'}}) -> {1: '1st', 2: '2nd'}
"""
ret = {}
for key, value in choices.items():
if isinstance(value, dict):
# grouped choices (category, sub choices)
for sub_key, sub_value in value.items():
ret[sub_key] = sub_value
else:
# choice (key, display value)
ret[key] = value
return ret
def iter_options(grouped_choices, cutoff=None, cutoff_text=None):
"""
Helper function for options and option groups in templates.
"""
class StartOptionGroup:
start_option_group = True
end_option_group = False
def __init__(self, label):
self.label = label
class EndOptionGroup:
start_option_group = False
end_option_group = True
class Option:
start_option_group = False
end_option_group = False
def __init__(self, value, display_text, disabled=False):
self.value = value
self.display_text = display_text
self.disabled = disabled
count = 0
for key, value in grouped_choices.items():
if cutoff and count >= cutoff:
break
if isinstance(value, dict):
yield StartOptionGroup(label=key)
for sub_key, sub_value in value.items():
if cutoff and count >= cutoff:
break
yield Option(value=sub_key, display_text=sub_value)
count += 1
yield EndOptionGroup()
else:
yield Option(value=key, display_text=value)
count += 1
if cutoff and count >= cutoff and cutoff_text:
cutoff_text = cutoff_text.format(count=cutoff)
yield Option(value='n/a', display_text=cutoff_text, disabled=True)
def get_error_detail(exc_info):
"""
Given a Django ValidationError, return a list of ErrorDetail,
with the `code` populated.
"""
code = getattr(exc_info, 'code', None) or 'invalid'
try:
error_dict = exc_info.error_dict
except AttributeError:
return [
ErrorDetail((error.message % error.params) if error.params else error.message,
code=error.code if error.code else code)
for error in exc_info.error_list]
return {
k: [
ErrorDetail((error.message % error.params) if error.params else error.message,
code=error.code if error.code else code)
for error in errors
] for k, errors in error_dict.items()
}
|
BuiltinSignatureError
|
python
|
astropy__astropy
|
astropy/units/tests/test_structured.py
|
{
"start": 1512,
"end": 9031
}
|
class ____(StructuredTestBase):
def test_initialization_and_keying(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert su["p"] is self.p_unit
assert su["v"] is self.v_unit
su2 = StructuredUnit((su, self.t_unit), ("pv", "t"))
assert isinstance(su2["pv"], StructuredUnit)
assert su2["pv"]["p"] is self.p_unit
assert su2["pv"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
assert su2["pv"] == su
su3 = StructuredUnit(("AU", "AU/day"), ("p", "v"))
assert isinstance(su3["p"], UnitBase)
assert isinstance(su3["v"], UnitBase)
su4 = StructuredUnit("AU, AU/day", ("p", "v"))
assert su4["p"] == u.AU
assert su4["v"] == u.AU / u.day
su5 = StructuredUnit(("AU", "AU/day"))
assert su5.field_names == ("f0", "f1")
assert su5["f0"] == u.AU
assert su5["f1"] == u.AU / u.day
def test_recursive_initialization(self):
su = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (("p", "v"), "t")
)
assert isinstance(su["pv"], StructuredUnit)
assert su["pv"]["p"] is self.p_unit
assert su["pv"]["v"] is self.v_unit
assert su["t"] is self.t_unit
su2 = StructuredUnit(
((self.p_unit, self.v_unit), self.t_unit), (["p_v", ("p", "v")], "t")
)
assert isinstance(su2["p_v"], StructuredUnit)
assert su2["p_v"]["p"] is self.p_unit
assert su2["p_v"]["v"] is self.v_unit
assert su2["t"] is self.t_unit
su3 = StructuredUnit((("AU", "AU/day"), "yr"), (["p_v", ("p", "v")], "t"))
assert isinstance(su3["p_v"], StructuredUnit)
assert su3["p_v"]["p"] == u.AU
assert su3["p_v"]["v"] == u.AU / u.day
assert su3["t"] == u.yr
su4 = StructuredUnit("(AU, AU/day), yr", (("p", "v"), "t"))
assert isinstance(su4["pv"], StructuredUnit)
assert su4["pv"]["p"] == u.AU
assert su4["pv"]["v"] == u.AU / u.day
assert su4["t"] == u.yr
def test_extreme_recursive_initialization(self):
su = StructuredUnit(
"(yr,(AU,AU/day,(km,(day,day))),m)",
("t", ("p", "v", ("h", ("d1", "d2"))), "l"),
)
assert su.field_names == (
't', ['pvhd1d2',
('p', 'v',
['hd1d2',
('h',
['d1d2',
('d1', 'd2')])])],
'l',
) # fmt: skip
dt = np.dtype(
[("t", "f8"),
("pvhd1d2",
([("p", "f8"), ("v", "f8"), ("hd1d2",
[("h", "f8"), ("d1d2",
[("d1", "f8"), ("d2", "f8")]),
]),
], (5, 5))), # Note: structured subarray to improve test!
("l", "f8")
]) # fmt: skip
su2 = StructuredUnit("(yr,(AU,AU/day,(km,(day,day))),m)", dt)
assert su2.field_names == su.field_names
assert su2 == su
@pytest.mark.parametrize(
"names, invalid",
[
[("t", ["p", "v"]), "['p', 'v']"],
[("t", ["pv", "p", "v"]), "['pv', 'p', 'v']"],
[("t", ["pv", ["p", "v"]]), "['pv', ['p', 'v']"],
[("t", ()), "()"],
[("t", ("p", None)), "None"],
[("t", ["pv", ("p", "")]), "''"],
],
)
def test_initialization_names_invalid_list_errors(self, names, invalid):
with pytest.raises(ValueError) as exc:
StructuredUnit("yr,(AU,AU/day)", names)
assert f"invalid entry {invalid}" in str(exc)
def test_looks_like_unit(self):
su = StructuredUnit((self.p_unit, self.v_unit), ("p", "v"))
assert Unit(su) is su
def test_initialize_with_float_dtype(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert isinstance(su["p"], UnitBase)
assert isinstance(su["v"], UnitBase)
assert su["p"] == u.AU
assert su["v"] == u.AU / u.day
su = StructuredUnit((("km", "km/s"), "yr"), self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
su = StructuredUnit("(km, km/s), yr", self.pv_t_dtype)
assert isinstance(su["pv"], StructuredUnit)
assert isinstance(su["pv"]["p"], UnitBase)
assert isinstance(su["t"], UnitBase)
assert su["pv"]["v"] == u.km / u.s
def test_initialize_with_structured_unit_for_names(self):
su = StructuredUnit(("AU", "AU/d"), names=("p", "v"))
su2 = StructuredUnit(("km", "km/s"), names=su)
assert su2.field_names == ("p", "v")
assert su2["p"] == u.km
assert su2["v"] == u.km / u.s
def test_initialize_single_field(self):
su = StructuredUnit("AU", "p")
assert isinstance(su, StructuredUnit)
assert isinstance(su["p"], UnitBase)
assert su["p"] == u.AU
su = StructuredUnit("AU")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert su["f0"] == u.AU
def test_equality(self):
su = StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su == StructuredUnit(("AU", "AU/d"), self.pv_dtype)
assert su != StructuredUnit(("m", "AU/d"), self.pv_dtype)
# Names should be ignored.
assert su == StructuredUnit(("AU", "AU/d"))
assert su == StructuredUnit(("AU", "AU/d"), names=("q", "w"))
assert su != StructuredUnit(("m", "m/s"))
def test_parsing(self):
su = Unit("AU, AU/d")
assert isinstance(su, StructuredUnit)
assert isinstance(su["f0"], UnitBase)
assert isinstance(su["f1"], UnitBase)
assert su["f0"] == u.AU
assert su["f1"] == u.AU / u.day
su2 = Unit("AU, AU/d, yr")
assert isinstance(su2, StructuredUnit)
assert su2 == StructuredUnit(("AU", "AU/d", "yr"))
su2a = Unit("(AU, AU/d, yr)")
assert isinstance(su2a, StructuredUnit)
assert su2a == su2
su3 = Unit("(km, km/s), yr")
assert isinstance(su3, StructuredUnit)
assert su3 == StructuredUnit((("km", "km/s"), "yr"))
su4 = Unit("km,")
assert isinstance(su4, StructuredUnit)
assert su4 == StructuredUnit((u.km,))
su5 = Unit("(m,s),")
assert isinstance(su5, StructuredUnit)
assert su5 == StructuredUnit(((u.m, u.s),))
ldbody_unit = Unit("Msun, 0.5rad^2, (au, au/day)")
assert ldbody_unit == StructuredUnit(
(u.Msun, Unit(u.rad**2 / 2), (u.AU, u.AU / u.day))
)
def test_to_string(self):
su = StructuredUnit((u.km, u.km / u.s))
latex_str = r"$(\mathrm{km}, \mathrm{\frac{km}{s}})$"
assert su.to_string(format="latex") == latex_str
latex_str = r"$(\mathrm{km}, \mathrm{km\,s^{-1}})$"
assert su.to_string(format="latex_inline") == latex_str
def test_str(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert str(su) == "((km, km / s), yr)"
assert Unit(str(su)) == su
def test_repr(self):
su = StructuredUnit(((u.km, u.km / u.s), u.yr))
assert repr(su) == 'Unit("((km, km / s), yr)")'
assert eval(repr(su)) == su
|
TestStructuredUnitBasics
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam_video/modeling_edgetam_video.py
|
{
"start": 65494,
"end": 66228
}
|
class ____(ModelOutput):
r"""
object_ids (`list[int]`, *optional*):
List of object IDs being tracked in the current frame.
pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`):
The predicted masks stored at the model's resolution.
object_score_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):
Logits for the object scores, indicating if objects are present.
frame_idx (`int`):
The frame index of the video.
"""
object_ids: Optional[list[int]] = None
pred_masks: Optional[torch.FloatTensor] = None
object_score_logits: Optional[torch.FloatTensor] = None
frame_idx: Optional[int] = None
|
EdgeTamVideoSegmentationOutput
|
python
|
tiangolo__fastapi
|
tests/test_dependency_class.py
|
{
"start": 250,
"end": 367
}
|
class ____:
def __call__(self, value: str) -> Generator[str, None, None]:
yield value
|
CallableGenDependency
|
python
|
vyperlang__vyper
|
vyper/exceptions.py
|
{
"start": 8826,
"end": 8904
}
|
class ____(VyperException):
"""Type is invalid for an action."""
|
InvalidType
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/compiler.py
|
{
"start": 8312,
"end": 9047
}
|
class ____(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names: t.Iterable[str]) -> None:
self.names = set(names)
self.undeclared: t.Set[str] = set()
def visit_Name(self, node: nodes.Name) -> None:
if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting a blocks."""
|
UndeclaredNameVisitor
|
python
|
pytorch__pytorch
|
torch/nn/modules/distance.py
|
{
"start": 140,
"end": 2038
}
|
class ____(Module):
r"""
Computes the pairwise distance between input vectors, or between columns of input matrices.
Distances are computed using ``p``-norm, with constant ``eps`` added to avoid division by zero
if ``p`` is negative, i.e.:
.. math ::
\mathrm{dist}\left(x, y\right) = \left\Vert x-y + \epsilon e \right\Vert_p,
where :math:`e` is the vector of ones and the ``p``-norm is given by.
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Args:
p (real, optional): the norm degree. Can be negative. Default: 2
eps (float, optional): Small value to avoid division by zero.
Default: 1e-6
keepdim (bool, optional): Determines whether or not to keep the vector dimension.
Default: False
Shape:
- Input1: :math:`(N, D)` or :math:`(D)` where `N = batch dimension` and `D = vector dimension`
- Input2: :math:`(N, D)` or :math:`(D)`, same shape as the Input1
- Output: :math:`(N)` or :math:`()` based on input dimension.
If :attr:`keepdim` is ``True``, then :math:`(N, 1)` or :math:`(1)` based on input dimension.
Examples:
>>> pdist = nn.PairwiseDistance(p=2)
>>> input1 = torch.randn(100, 128)
>>> input2 = torch.randn(100, 128)
>>> output = pdist(input1, input2)
"""
__constants__ = ["norm", "eps", "keepdim"]
norm: float
eps: float
keepdim: bool
def __init__(
self, p: float = 2.0, eps: float = 1e-6, keepdim: bool = False
) -> None:
super().__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
|
PairwiseDistance
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_syntax_extensions.py
|
{
"start": 2984,
"end": 4439
}
|
class ____(SyntaxExtension, ClauseElement):
_traverse_internals: _TraverseInternalsType = [
("_exprs", InternalTraversal.dp_clauseelement_tuple),
]
def __init__(self, *exprs):
self._exprs = tuple(
coercions.expect(roles.ByOfRole, e, apply_propagate_attrs=self)
for e in exprs
)
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [*existing, self],
"post_select",
)
@compiles(PostSelectClause)
def _compile_psk(element, compiler, **kw):
return "POST SELECT KEYWORD"
@compiles(PreColumnsClause)
def _compile_pcc(element, compiler, **kw):
return "PRE COLUMNS"
@compiles(PostCriteriaClause)
def _compile_psc(element, compiler, **kw):
return "POST CRITERIA"
@compiles(PostCriteriaClause2)
def _compile_psc2(element, compiler, **kw):
return "2 POST CRITERIA 2"
@compiles(PostCriteriaClause3)
def _compile_psc3(element, compiler, **kw):
return "3 POST CRITERIA 3"
@compiles(PostBodyClause)
def _compile_psb(element, compiler, **kw):
return "POST SELECT BODY"
@compiles(PostValuesClause)
def _compile_pvc(element, compiler, **kw):
return "POST VALUES"
@compiles(ColumnExpressionExt)
def _compile_cee(element, compiler, **kw):
inner = ", ".join(compiler.process(elem, **kw) for elem in element._exprs)
return f"COLUMN EXPRESSIONS ({inner})"
|
ColumnExpressionExt
|
python
|
spyder-ide__spyder
|
spyder/plugins/outlineexplorer/main_widget.py
|
{
"start": 1146,
"end": 9607
}
|
class ____(PluginMainWidget):
"""Class browser"""
edit_goto = Signal(str, int, str)
edit = Signal(str)
is_visible = Signal()
sig_update_configuration = Signal()
ENABLE_SPINNER = True
CONF_SECTION = 'outline_explorer'
def __init__(self, name, plugin, parent=None, context=None):
if context is not None:
self.CONTEXT_NAME = context
super().__init__(name, plugin, parent)
self.in_maximized_editor = False
self.treewidget = OutlineExplorerTreeWidget(self)
self.treewidget.sig_display_spinner.connect(self.start_spinner)
self.treewidget.sig_hide_spinner.connect(self.stop_spinner)
self.treewidget.sig_update_configuration.connect(
self.sig_update_configuration)
self.treewidget.header().hide()
layout = QHBoxLayout()
layout.addWidget(self.treewidget)
self.setLayout(layout)
# ---- PluginMainWidget API
# -------------------------------------------------------------------------
def get_focus_widget(self):
"""Define the widget to focus."""
return self.treewidget
def get_title(self):
"""Return the title of the plugin tab."""
return _("Outline")
def setup(self):
"""Performs the setup of plugin's menu and actions."""
# Toolbar buttons
toolbar = self.get_main_toolbar()
fromcursor_btn = self.create_toolbutton(
OutlineExplorerToolbuttons.GoToCursor,
icon=self.create_icon('fromcursor'),
tip=_('Go to cursor position'),
triggered=self.treewidget.go_to_cursor_position)
for item in [fromcursor_btn,
self.treewidget.collapse_all_action,
self.treewidget.expand_all_action,
self.treewidget.restore_action,
self.treewidget.collapse_selection_action,
self.treewidget.expand_selection_action]:
self.add_item_to_toolbar(item, toolbar=toolbar,
section=OutlineExplorerSections.Main)
# Actions
fromcursor_act = self.create_action(
OutlineExplorerActions.GoToCursor,
text=_('Go to cursor position'),
icon=self.create_icon('fromcursor'),
triggered=self.treewidget.go_to_cursor_position)
fullpath_act = self.create_action(
OutlineExplorerActions.ShowFullPath,
text=_('Show absolute path'),
toggled=True,
option='show_fullpath')
allfiles_act = self.create_action(
OutlineExplorerActions.ShowAllFiles,
text=_('Show all files'),
toggled=True,
option='show_all_files')
comment_act = self.create_action(
OutlineExplorerActions.ShowSpecialComments,
text=_('Show special comments'),
toggled=True,
option='show_comments')
group_cells_act = self.create_action(
OutlineExplorerActions.GroupCodeCells,
text=_('Group code cells'),
toggled=True,
option='group_cells')
display_variables_act = self.create_action(
OutlineExplorerActions.DisplayVariables,
text=_('Display variables and attributes'),
toggled=True,
option='display_variables'
)
follow_cursor_act = self.create_action(
OutlineExplorerActions.FollowCursor,
text=_('Follow cursor position'),
toggled=True,
option='follow_cursor'
)
sort_files_alphabetically_act = self.create_action(
OutlineExplorerActions.SortFiles,
text=_('Sort files alphabetically'),
toggled=True,
option='sort_files_alphabetically'
)
actions = [fullpath_act, allfiles_act, group_cells_act,
display_variables_act, follow_cursor_act, comment_act,
sort_files_alphabetically_act, fromcursor_act]
option_menu = self.get_options_menu()
for action in actions:
self.add_item_to_menu(
action,
option_menu,
section=OutlineExplorerSections.DisplayOptions,
)
def update_actions(self):
if self.in_maximized_editor or self.windowwidget:
for action in [self.undock_action, self.lock_unlock_action]:
if action.isVisible():
action.setVisible(False)
else:
# Avoid error at startup because these actions are not available
# at that time.
try:
for action in [self.undock_action, self.lock_unlock_action]:
if not action.isVisible():
action.setVisible(True)
except AttributeError:
pass
def change_visibility(self, enable, force_focus=None):
"""Reimplemented to tell treewidget what the visibility state is."""
super().change_visibility(enable, force_focus)
if self.windowwidget is not None:
# When the plugin is undocked Qt changes its visibility to False,
# probably because it's not part of the main window anymore. So, we
# need to set the treewidget visibility to True for it to be
# updated after writing new content in the editor.
# Fixes spyder-ide/spyder#16634
self.change_tree_visibility(True)
else:
self.change_tree_visibility(self.is_visible)
def create_window(self):
"""
Reimplemented to tell treewidget what the visibility of the undocked
plugin is.
"""
super().create_window()
self.windowwidget.sig_window_state_changed.connect(
self._handle_undocked_window_state
)
@Slot()
def close_dock(self):
"""
Reimplemented to preserve the widget's visible state when editor is
maximized.
"""
if self.in_maximized_editor:
self.set_conf('show_with_maximized_editor', False)
super().close_dock()
def toggle_view(self, checked):
"""Reimplemented to handle the case when the editor is maximized."""
if self.in_maximized_editor:
self.set_conf('show_with_maximized_editor', checked)
if checked:
self._plugin.dock_with_maximized_editor()
return
super().toggle_view(checked)
# ---- Public API
# -------------------------------------------------------------------------
def set_current_editor(self, editor, update, clear):
if clear:
self.remove_editor(editor)
if editor is not None:
self.treewidget.set_current_editor(editor, update)
def remove_editor(self, editor):
self.treewidget.remove_editor(editor)
def register_editor(self, editor):
self.treewidget.register_editor(editor)
def file_renamed(self, editor, new_filename):
self.treewidget.file_renamed(editor, new_filename)
def start_symbol_services(self, language):
"""Enable LSP symbols functionality."""
self.treewidget.start_symbol_services(language)
def stop_symbol_services(self, language):
"""Disable LSP symbols functionality."""
self.treewidget.stop_symbol_services(language)
def update_all_editors(self):
"""Update all editors with an associated LSP server."""
self.treewidget.update_all_editors()
def get_supported_languages(self):
"""List of languages with symbols support."""
return self.treewidget._languages
def change_tree_visibility(self, is_visible):
"Change treewidget's visibility."
self.treewidget.change_visibility(is_visible)
# ---- Private API
# -------------------------------------------------------------------------
@Slot(object)
def _handle_undocked_window_state(self, window_state):
"""
Change treewidget visibility when the plugin is undocked and its
window state changes.
"""
if window_state == Qt.WindowMinimized:
# There's no need to update the treewidget when the plugin is
# minimized.
self.change_tree_visibility(False)
else:
self.change_tree_visibility(True)
|
OutlineExplorerWidget
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 6781,
"end": 7029
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
def forward(self, x):
return test_functions.constant3(torch.sigmoid(self.linear1(x)), x)
|
ViaModuleCall
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/testing/compare.py
|
{
"start": 1553,
"end": 2639
}
|
class ____:
def __init__(self):
self._proc = None
# Explicitly register deletion from an atexit handler because if we
# wait until the object is GC'd (which occurs later), then some module
# globals (e.g. signal.SIGKILL) has already been set to None, and
# kill() doesn't work anymore...
atexit.register(self.__del__)
def __del__(self):
if self._proc:
self._proc.kill()
self._proc.wait()
for stream in filter(None, [self._proc.stdin,
self._proc.stdout,
self._proc.stderr]):
stream.close()
self._proc = None
def _read_until(self, terminator):
"""Read until the prompt is reached."""
buf = bytearray()
while True:
c = self._proc.stdout.read(1)
if not c:
raise _ConverterError(os.fsdecode(bytes(buf)))
buf.extend(c)
if buf.endswith(terminator):
return bytes(buf)
|
_Converter
|
python
|
networkx__networkx
|
networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py
|
{
"start": 4942,
"end": 7343
}
|
class ____:
"""
A test class for the directed time-respecting graph matcher.
"""
def provide_g1_topology(self):
G1 = nx.DiGraph()
G1.add_edges_from(provide_g1_edgelist())
return G1
def provide_g2_path_3edges(self):
G2 = nx.DiGraph()
G2.add_edges_from([(0, 1), (1, 2), (2, 3)])
return G2
def test_timdelta_zero_same_dates_returns_true(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_same_time(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingDiGraphMatcher(G1, G2, temporal_name, d)
assert gm.subgraph_is_isomorphic()
def test_attNameStrange_timdelta_zero_same_dates_returns_true(self):
G1 = self.provide_g1_topology()
temporal_name = "strange"
G1 = put_same_time(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingDiGraphMatcher(G1, G2, temporal_name, d)
assert gm.subgraph_is_isomorphic()
def test_timdelta_one_config0_returns_no_embeddings(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_0(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingDiGraphMatcher(G1, G2, temporal_name, d)
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 0
def test_timdelta_one_config1_returns_one_embedding(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_1(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingDiGraphMatcher(G1, G2, temporal_name, d)
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 1
def test_timdelta_one_config2_returns_two_embeddings(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_2(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingDiGraphMatcher(G1, G2, temporal_name, d)
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 2
|
TestDiTimeRespectingGraphMatcher
|
python
|
keon__algorithms
|
algorithms/tree/bst/array_to_bst.py
|
{
"start": 108,
"end": 449
}
|
class ____(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def array_to_bst(nums):
if not nums:
return None
mid = len(nums)//2
node = TreeNode(nums[mid])
node.left = array_to_bst(nums[:mid])
node.right = array_to_bst(nums[mid+1:])
return node
|
TreeNode
|
python
|
mlflow__mlflow
|
mlflow/models/resources.py
|
{
"start": 10346,
"end": 12224
}
|
class ____:
"""
Private builder class to build the resources dictionary.
"""
@staticmethod
def from_resources(
resources: list[Resource], api_version: str = DEFAULT_API_VERSION
) -> dict[str, dict[ResourceType, list[dict[str, Any]]]]:
resource_dict = {}
for resource in resources:
resource_data = resource.to_dict()
for resource_type, values in resource_data.items():
target_dict = resource_dict.setdefault(resource.target_uri, {})
target_list = target_dict.setdefault(resource_type, [])
target_list.extend(values)
resource_dict["api_version"] = api_version
return resource_dict
@staticmethod
def from_dict(data) -> dict[str, dict[ResourceType, list[dict[str, Any]]]]:
resources = []
api_version = data.pop("api_version")
if api_version == "1":
for target_uri, config in data.items():
for resource_type, values in config.items():
if resource_class := _get_resource_class_by_type(target_uri, resource_type):
resources.extend(resource_class.from_dict(value) for value in values)
else:
raise ValueError(f"Unsupported resource type: {resource_type}")
else:
raise ValueError(f"Unsupported API version: {api_version}")
return _ResourceBuilder.from_resources(resources, api_version)
@staticmethod
def from_yaml_file(path: str) -> dict[str, dict[ResourceType, list[dict[str, Any]]]]:
if not os.path.exists(path):
raise OSError(f"No such file or directory: '{path}'")
path = os.path.abspath(path)
with open(path) as file:
data = yaml.safe_load(file)
return _ResourceBuilder.from_dict(data)
|
_ResourceBuilder
|
python
|
optuna__optuna
|
optuna/search_space/group_decomposed.py
|
{
"start": 225,
"end": 1163
}
|
class ____:
def __init__(self) -> None:
self._search_spaces: list[dict[str, BaseDistribution]] = []
@property
def search_spaces(self) -> list[dict[str, BaseDistribution]]:
return self._search_spaces
def add_distributions(self, distributions: dict[str, BaseDistribution]) -> None:
dist_keys = set(distributions.keys())
next_search_spaces = []
for search_space in self._search_spaces:
keys = set(search_space.keys())
next_search_spaces.append({name: search_space[name] for name in keys & dist_keys})
next_search_spaces.append({name: search_space[name] for name in keys - dist_keys})
dist_keys -= keys
next_search_spaces.append({name: distributions[name] for name in dist_keys})
self._search_spaces = list(
filter(lambda search_space: len(search_space) > 0, next_search_spaces)
)
|
_SearchSpaceGroup
|
python
|
huggingface__transformers
|
src/transformers/models/biogpt/modeling_biogpt.py
|
{
"start": 10280,
"end": 14195
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: BioGptConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = BioGptAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_probs_dropout_prob,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.hidden_dropout_prob
self.activation_fn = ACT2FN[config.hidden_act]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
position_ids: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
position_ids=position_ids,
cache_position=cache_position,
**kwargs,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
@auto_docstring
|
BioGptDecoderLayer
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/_bayes.py
|
{
"start": 16243,
"end": 28995
}
|
class ____(RegressorMixin, LinearModel):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
max_iter : int, default=300
Maximum number of iterations.
.. versionchanged:: 1.3
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
compute_score : bool, default=False
If True, compute the objective function at each step of the model.
threshold_lambda : float, default=10 000
Threshold for removing (pruning) weights with high precision from
the computation.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array-like of shape (n_features,)
estimated precisions of the weights.
sigma_ : array-like of shape (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
.. versionadded:: 1.3
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
X_offset_ : float
If `fit_intercept=True`, offset subtracted for centering data to a
zero mean. Set to np.zeros(n_features) otherwise.
X_scale_ : float
Set to np.ones(n_features).
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
BayesianRidge : Bayesian ridge regression.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
- :ref:`sphx_glr_auto_examples_linear_model_plot_ard.py` demonstrates ARD
Regression.
- :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_and_elasticnet.py`
showcases ARD Regression alongside Lasso and Elastic-Net for sparse,
correlated signals, in the presence of noise.
"""
_parameter_constraints: dict = {
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"alpha_1": [Interval(Real, 0, None, closed="left")],
"alpha_2": [Interval(Real, 0, None, closed="left")],
"lambda_1": [Interval(Real, 0, None, closed="left")],
"lambda_2": [Interval(Real, 0, None, closed="left")],
"compute_score": ["boolean"],
"threshold_lambda": [Interval(Real, 0, None, closed="left")],
"fit_intercept": ["boolean"],
"copy_X": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
max_iter=300,
tol=1.0e-3,
alpha_1=1.0e-6,
alpha_2=1.0e-6,
lambda_1=1.0e-6,
lambda_2=1.0e-6,
compute_score=False,
threshold_lambda=1.0e4,
fit_intercept=True,
copy_X=True,
verbose=False,
):
self.max_iter = max_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
"""
X, y = validate_data(
self,
X,
y,
dtype=[np.float64, np.float32],
force_writeable=True,
y_numeric=True,
ensure_min_samples=2,
)
dtype = X.dtype
n_samples, n_features = X.shape
coef_ = np.zeros(n_features, dtype=dtype)
X, y, X_offset_, y_offset_, X_scale_, _ = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=self.copy_X
)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero.
# Explicitly set dtype to avoid unintended type promotion with numpy 2.
alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype)
lambda_ = np.ones(n_features, dtype=dtype)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot(
[sigma_, X[:, keep_lambda].T, y]
)
return coef_
update_sigma = (
self._update_sigma
if n_samples >= n_features
else self._update_sigma_woodbury
)
# Iterative procedure of ARDRegression
for iter_ in range(self.max_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
sse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / (
(coef_[keep_lambda]) ** 2 + 2.0 * lambda_2
)
alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / (sse_ + 2.0 * alpha_2)
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (
fast_logdet(sigma_)
+ n_samples * log(alpha_)
+ np.sum(np.log(lambda_))
)
s -= 0.5 * (alpha_ * sse_ + (lambda_ * coef_**2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
self.n_iter_ = iter_ + 1
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples < n_features and will invert
# a matrix of shape (n_samples, n_samples) making use of the
# woodbury formula:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
n_samples = X.shape[0]
X_keep = X[:, keep_lambda]
inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
sigma_ = pinvh(
np.eye(n_samples, dtype=X.dtype) / alpha_
+ np.dot(X_keep * inv_lambda, X_keep.T)
)
sigma_ = np.dot(sigma_, X_keep * inv_lambda)
sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda]
return sigma_
def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples >= n_features and will
# invert a matrix of shape (n_features, n_features)
X_keep = X[:, keep_lambda]
gram = np.dot(X_keep.T, X_keep)
eye = np.eye(gram.shape[0], dtype=X.dtype)
sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
sigma_ = pinvh(sigma_inv)
return sigma_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
col_index = self.lambda_ < self.threshold_lambda
X = _safe_indexing(X, indices=col_index, axis=1)
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_))
return y_mean, y_std
|
ARDRegression
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/virtual/freebsd.py
|
{
"start": 862,
"end": 3360
}
|
class ____(Virtual, VirtualSysctlDetectionMixin):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
virtual_facts = {}
host_tech = set()
guest_tech = set()
# Set empty values as default
virtual_facts['virtualization_type'] = ''
virtual_facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
guest_tech.add('xen')
virtual_facts['virtualization_type'] = 'xen'
virtual_facts['virtualization_role'] = 'guest'
kern_vm_guest = self.detect_virt_product('kern.vm_guest')
guest_tech.update(kern_vm_guest['virtualization_tech_guest'])
host_tech.update(kern_vm_guest['virtualization_tech_host'])
hw_hv_vendor = self.detect_virt_product('hw.hv_vendor')
guest_tech.update(hw_hv_vendor['virtualization_tech_guest'])
host_tech.update(hw_hv_vendor['virtualization_tech_host'])
sec_jail_jailed = self.detect_virt_product('security.jail.jailed')
guest_tech.update(sec_jail_jailed['virtualization_tech_guest'])
host_tech.update(sec_jail_jailed['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
# We call update here, then re-set virtualization_tech_host/guest
# later.
for sysctl in [kern_vm_guest, hw_hv_vendor, sec_jail_jailed]:
if sysctl:
virtual_facts.update(sysctl)
virtual_vendor_facts = self.detect_virt_vendor('hw.model')
guest_tech.update(virtual_vendor_facts['virtualization_tech_guest'])
host_tech.update(virtual_vendor_facts['virtualization_tech_host'])
if virtual_facts['virtualization_type'] == '':
virtual_facts.update(virtual_vendor_facts)
# if vmm.ko kernel module is loaded
kldstat_bin = self.module.get_bin_path('kldstat')
if kldstat_bin is not None:
(rc, out, err) = self.module.run_command('%s -q -m vmm' % kldstat_bin)
if rc == 0:
host_tech.add('bhyve')
virtual_facts['virtualization_type'] = 'bhyve'
virtual_facts['virtualization_role'] = 'host'
virtual_facts['virtualization_tech_guest'] = guest_tech
virtual_facts['virtualization_tech_host'] = host_tech
return virtual_facts
|
FreeBSDVirtual
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-braintree/source_braintree/schemas/subscription.py
|
{
"start": 261,
"end": 1091
}
|
class ____(CatalogModel):
add_ons: List[AddOn]
balance: Decimal
billing_day_of_month: Decimal
billing_period_start_date: date
billing_period_end_date: date
created_at: datetime
current_billing_cycle: Decimal
days_past_due: Decimal
description: str
discounts: List[Discount]
failure_count: Decimal
first_billing_date: date
id: str
merchant_account_id: str
never_expires: bool
next_bill_amount: Decimal
next_billing_date: date
next_billing_period_amount: Decimal
number_of_billing_cycles: Decimal
paid_through_date: date
payment_method_token: str
plan_id: str
price: Decimal
status: str
transactions: List[Transaction]
trial_duration: Decimal
trial_duration_unit: str
trial_period: bool
updated_at: datetime
|
Subscription
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_regression.py
|
{
"start": 172,
"end": 1077
}
|
class ____:
def test_newton_x0_is_0(self):
# Regression test for gh-1601
tgt = 1
res = scipy.optimize.newton(lambda x: x - 1, 0)
assert_almost_equal(res, tgt)
def test_newton_integers(self):
# Regression test for gh-1741
root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
fprime=lambda x: 2*x)
assert_almost_equal(root, 1.0)
def test_lmdif_errmsg(self):
# This shouldn't cause a crash on Python 3
class SomeError(Exception):
pass
counter = [0]
def func(x):
counter[0] += 1
if counter[0] < 3:
return x**2 - np.array([9, 10, 11])
else:
raise SomeError()
assert_raises(SomeError,
scipy.optimize.leastsq,
func, [1, 2, 3])
|
TestRegression
|
python
|
dask__distributed
|
distributed/shuffle/_rechunk.py
|
{
"start": 7905,
"end": 28882
}
|
class ____(Layer):
name: str
token: str
chunks: ChunkedAxes
chunks_input: ChunkedAxes
name_input: str
disk: bool
keepmap: np.ndarray
_cached_dict: _T_LowLevelGraph | None
def __init__(
self,
name: str,
token: str,
chunks: ChunkedAxes,
chunks_input: ChunkedAxes,
name_input: str,
disk: bool,
keepmap: np.ndarray | None = None,
annotations: Mapping[str, Any] | None = None,
):
import numpy as np
self.name = name
self.token = token
self.chunks = chunks
self.chunks_input = chunks_input
self.name_input = name_input
self.disk = disk
if keepmap is not None:
self.keepmap = keepmap
else:
shape = tuple(len(axis) for axis in chunks)
self.keepmap = np.ones(shape, dtype=bool)
self._cached_dict = None
super().__init__(annotations=annotations)
def __repr__(self) -> str:
return f"{type(self).__name__}<name='{self.name}', chunks={self.chunks}>"
def get_output_keys(self) -> set[Key]:
import numpy as np
return {
(self.name,) + nindex
for nindex in np.ndindex(tuple(len(axis) for axis in self.chunks))
if self.keepmap[nindex]
}
def is_materialized(self) -> bool:
return self._cached_dict is not None
@property
def _dict(self) -> _T_LowLevelGraph:
"""Materialize full dict representation"""
dsk: _T_LowLevelGraph
if self._cached_dict is not None:
return self._cached_dict
else:
dsk = self._construct_graph()
self._cached_dict = dsk
return self._cached_dict
def __getitem__(self, key: Key) -> tuple:
return self._dict[key]
def __iter__(self) -> Iterator[Key]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
def _cull(self, keepmap: np.ndarray) -> P2PRechunkLayer:
return P2PRechunkLayer(
name=self.name,
token=self.token,
chunks=self.chunks,
chunks_input=self.chunks_input,
name_input=self.name_input,
disk=self.disk,
keepmap=keepmap,
annotations=self.annotations,
)
def _keys_to_indices(self, keys: Iterable[Key]) -> set[tuple[int, ...]]:
"""Simple utility to convert keys to chunk indices."""
chunks = set()
for key in keys:
if not isinstance(key, tuple) or len(key) < 2 or key[0] != self.name:
continue
chunk = cast(tuple[int, ...], key[1:])
assert all(isinstance(index, int) for index in chunk)
chunks.add(chunk)
return chunks
def cull(
self, keys: set[Key], all_keys: Collection[Key]
) -> tuple[P2PRechunkLayer, dict]:
"""Cull a P2PRechunkLayer HighLevelGraph layer.
The underlying graph will only include the necessary
tasks to produce the keys (indices) included in `keepmap`.
Therefore, "culling" the layer only requires us to reset this
parameter.
"""
import numpy as np
from dask.array.rechunk import old_to_new
keepmap = np.zeros_like(self.keepmap, dtype=bool)
indices_to_keep = self._keys_to_indices(keys)
_old_to_new = old_to_new(self.chunks_input, self.chunks)
for ndindex in indices_to_keep:
keepmap[ndindex] = True
culled_deps = {}
# Identify the individual partial rechunks
for ndpartial in _split_partials(_old_to_new):
# Cull partials for which we do not keep any output tasks
if not np.any(keepmap[ndpartial.new]):
continue
# Within partials, we have all-to-all communication.
# Thus, all output tasks share the same input tasks.
deps = frozenset(
(self.name_input,) + ndindex
for ndindex in _ndindices_of_slice(ndpartial.old)
)
for ndindex in _ndindices_of_slice(ndpartial.new):
culled_deps[(self.name,) + ndindex] = deps
if np.array_equal(keepmap, self.keepmap):
return self, culled_deps
else:
culled_layer = self._cull(keepmap)
return culled_layer, culled_deps
def _construct_graph(self) -> _T_LowLevelGraph:
import numpy as np
from dask.array.rechunk import old_to_new
dsk: _T_LowLevelGraph = {}
_old_to_new = old_to_new(self.chunks_input, self.chunks)
for ndpartial in _split_partials(_old_to_new):
partial_keepmap = self.keepmap[ndpartial.new]
output_count = np.sum(partial_keepmap)
if output_count == 0:
continue
elif output_count == 1:
# Single output chunk
dsk.update(
partial_concatenate(
input_name=self.name_input,
input_chunks=self.chunks_input,
ndpartial=ndpartial,
token=self.token,
keepmap=self.keepmap,
old_to_new=_old_to_new,
)
)
else:
dsk.update(
partial_rechunk(
input_name=self.name_input,
input_chunks=self.chunks_input,
chunks=self.chunks,
ndpartial=ndpartial,
token=self.token,
disk=self.disk,
keepmap=self.keepmap,
)
)
return dsk
def _calculate_prechunking(
old_chunks: ChunkedAxes,
new_chunks: ChunkedAxes,
dtype: np.dtype,
block_size_limit: int | None,
) -> ChunkedAxes:
"""Calculate how to perform the pre-rechunking step
During the pre-rechunking step, we
1. Split input chunks along partial boundaries to make partials completely independent of one another
2. Merge small chunks within partials to reduce the number of transfer tasks and corresponding overhead
"""
split_axes = _split_chunks_along_partial_boundaries(old_chunks, new_chunks)
# We can only determine how to concatenate chunks if we can calculate block sizes.
has_nans = (any(math.isnan(y) for y in x) for x in old_chunks)
if len(new_chunks) <= 1 or not all(new_chunks) or any(has_nans):
return tuple(tuple(chain(*axis)) for axis in split_axes)
if dtype is None or dtype.hasobject or dtype.itemsize == 0:
return tuple(tuple(chain(*axis)) for axis in split_axes)
# We made sure that there are no NaNs in split_axes above
return _concatenate_small_chunks(
split_axes, old_chunks, new_chunks, dtype, block_size_limit # type: ignore[arg-type]
)
def _concatenate_small_chunks(
split_axes: list[list[list[int]]],
old_chunks: ChunkedAxes,
new_chunks: ChunkedAxes,
dtype: np.dtype,
block_size_limit: int | None,
) -> ChunkedAxes:
"""Concatenate small chunks within partials.
By concatenating chunks within partials, we reduce the number of P2P transfer tasks and their
corresponding overhead.
The algorithm used in this function is very similar to :func:`dask.array.rechunk.find_merge_rechunk`,
the main difference is that we have to make sure only to merge chunks within partials.
"""
import numpy as np
block_size_limit = block_size_limit or dask.config.get("array.chunk-size")
if isinstance(block_size_limit, str):
block_size_limit = parse_bytes(block_size_limit)
# Make it a number of elements
block_size_limit //= dtype.itemsize
# We verified earlier that we do not have any NaNs
largest_old_block = _largest_block_size(old_chunks) # type: ignore[arg-type]
largest_new_block = _largest_block_size(new_chunks) # type: ignore[arg-type]
block_size_limit = max([block_size_limit, largest_old_block, largest_new_block])
old_largest_width = [max(chain(*axis)) for axis in split_axes]
new_largest_width = [max(c) for c in new_chunks]
# This represents how much each dimension increases (>1) or reduces (<1)
# the graph size during rechunking
graph_size_effect = {
dim: len(new_axis) / sum(map(len, split_axis))
for dim, (split_axis, new_axis) in enumerate(zip(split_axes, new_chunks))
}
ndim = len(old_chunks)
# This represents how much each dimension increases (>1) or reduces (<1) the
# largest block size during rechunking
block_size_effect = {
dim: new_largest_width[dim] / (old_largest_width[dim] or 1)
for dim in range(ndim)
}
# Our goal is to reduce the number of nodes in the rechunk graph
# by concatenating some adjacent chunks, so consider dimensions where we can
# reduce the # of chunks
candidates = [dim for dim in range(ndim) if graph_size_effect[dim] <= 1.0]
# Concatenating along each dimension reduces the graph size by a certain factor
# and increases memory largest block size by a certain factor.
# We want to optimize the graph size while staying below the given
# block_size_limit. This is in effect a knapsack problem, except with
# multiplicative values and weights. Just use a greedy algorithm
# by trying dimensions in decreasing value / weight order.
def key(k: int) -> float:
gse = graph_size_effect[k]
bse = block_size_effect[k]
if bse == 1:
bse = 1 + 1e-9
return (np.log(gse) / np.log(bse)) if bse > 0 else 0
sorted_candidates = sorted(candidates, key=key)
concatenated_axes: list[list[int]] = [[] for i in range(ndim)]
# Sim all the axes that are no candidates
for i in range(ndim):
if i in candidates:
continue
concatenated_axes[i] = list(chain(*split_axes[i]))
# We want to concatenate chunks
for axis_index in sorted_candidates:
concatenated_axis = concatenated_axes[axis_index]
multiplier = math.prod(
old_largest_width[:axis_index] + old_largest_width[axis_index + 1 :]
)
axis_limit = block_size_limit // multiplier
for partial in split_axes[axis_index]:
current = partial[0]
for chunk in partial[1:]:
if (current + chunk) > axis_limit:
concatenated_axis.append(current)
current = chunk
else:
current += chunk
concatenated_axis.append(current)
old_largest_width[axis_index] = max(concatenated_axis)
return tuple(tuple(axis) for axis in concatenated_axes)
def _split_chunks_along_partial_boundaries(
old_chunks: ChunkedAxes, new_chunks: ChunkedAxes
) -> list[list[list[float]]]:
"""Split the old chunks along the boundaries of partials, i.e., groups of new chunks that share the same inputs.
By splitting along the boundaries before rechunkin their input tasks become disjunct and each partial conceptually
operates on an independent sub-array.
"""
from dask.array.rechunk import old_to_new
_old_to_new = old_to_new(old_chunks, new_chunks)
partials = _slice_new_chunks_into_partials(_old_to_new)
split_axes = []
# Along each axis, we want to figure out how we have to split input chunks in order to make
# partials disjunct. We then group the resulting input chunks per partial before returning.
for axis_index, slices in enumerate(partials):
old_to_new_axis = _old_to_new[axis_index]
old_axis = old_chunks[axis_index]
split_axis = []
partial_chunks = []
for slice_ in slices:
first_new_chunk = slice_.start
first_old_chunk, first_old_slice = old_to_new_axis[first_new_chunk][0]
last_new_chunk = slice_.stop - 1
last_old_chunk, last_old_slice = old_to_new_axis[last_new_chunk][-1]
first_chunk_size = old_axis[first_old_chunk]
last_chunk_size = old_axis[last_old_chunk]
if first_old_chunk == last_old_chunk:
chunk_size = first_chunk_size
if (
last_old_slice.stop is not None
and last_old_slice.stop != last_chunk_size
):
chunk_size = last_old_slice.stop
if first_old_slice.start != 0:
chunk_size -= first_old_slice.start
partial_chunks.append(chunk_size)
else:
partial_chunks.append(first_chunk_size - first_old_slice.start)
partial_chunks.extend(old_axis[first_old_chunk + 1 : last_old_chunk])
if last_old_slice.stop is not None:
chunk_size = last_old_slice.stop
else:
chunk_size = last_chunk_size
partial_chunks.append(chunk_size)
split_axis.append(partial_chunks)
partial_chunks = []
if partial_chunks:
split_axis.append(partial_chunks)
split_axes.append(split_axis)
return split_axes
def _largest_block_size(chunks: tuple[tuple[int, ...], ...]) -> int:
return math.prod(map(max, chunks))
def _split_partials(
old_to_new: list[Any],
) -> Generator[_NDPartial]:
"""Split the rechunking into partials that can be performed separately"""
partials_per_axis = _split_partials_per_axis(old_to_new)
indices_per_axis = (range(len(partials)) for partials in partials_per_axis)
for nindex, partial_per_axis in zip(
product(*indices_per_axis), product(*partials_per_axis)
):
old, new = zip(*partial_per_axis)
yield _NDPartial(old, new, nindex)
def _split_partials_per_axis(old_to_new: list[Any]) -> tuple[tuple[_Partial, ...], ...]:
"""Split the rechunking into partials that can be performed separately
on each axis"""
sliced_axes = _slice_new_chunks_into_partials(old_to_new)
partial_axes = []
for axis_index, slices in enumerate(sliced_axes):
partials = []
for slice_ in slices:
last_old_chunk: int
first_old_chunk, _ = old_to_new[axis_index][slice_.start][0]
last_old_chunk, _ = old_to_new[axis_index][slice_.stop - 1][-1]
partials.append(
_Partial(
old=slice(first_old_chunk, last_old_chunk + 1),
new=slice_,
)
)
partial_axes.append(tuple(partials))
return tuple(partial_axes)
def _slice_new_chunks_into_partials(
old_to_new: list[list[list[tuple[int, slice]]]],
) -> SlicedAxes:
"""Slice the new chunks into partials that can be computed separately"""
sliced_axes = []
chunk_shape = tuple(len(axis) for axis in old_to_new)
for axis_index, old_to_new_axis in enumerate(old_to_new):
# Two consecutive output chunks A and B belong to the same partial rechunk
# if A and B share the same input chunks, i.e., separating A and B would not
# allow us to cull more input tasks.
# Index of the last input chunk of this partial rechunk
first_old_chunk: int | None = None
partial_splits = [0]
recipe: list[tuple[int, slice]]
for new_chunk_index, recipe in enumerate(old_to_new_axis):
if len(recipe) == 0:
continue
current_first_old_chunk, _ = recipe[0]
current_last_old_chunk, _ = recipe[-1]
if first_old_chunk is None:
first_old_chunk = current_first_old_chunk
elif first_old_chunk != current_last_old_chunk:
partial_splits.append(new_chunk_index)
first_old_chunk = current_first_old_chunk
partial_splits.append(chunk_shape[axis_index])
sliced_axes.append(
tuple(slice(a, b) for a, b in toolz.sliding_window(2, partial_splits))
)
return tuple(sliced_axes)
def _ndindices_of_slice(ndslice: NDSlice) -> Iterator[NDIndex]:
return product(*(range(slc.start, slc.stop) for slc in ndslice))
def _partial_index(global_index: NDIndex, partial_offset: NDIndex) -> NDIndex:
return tuple(index - offset for index, offset in zip(global_index, partial_offset))
def partial_concatenate(
input_name: str,
input_chunks: ChunkedAxes,
ndpartial: _NDPartial,
token: str,
keepmap: np.ndarray,
old_to_new: list[Any],
) -> dict[Key, Any]:
import numpy as np
from dask.array.chunk import getitem
from dask.array.core import concatenate3
dsk: dict[Key, Any] = {}
slice_group = f"rechunk-slice-{token}"
partial_keepmap = keepmap[ndpartial.new]
assert np.sum(partial_keepmap) == 1
partial_new_index = np.argwhere(partial_keepmap)[0]
global_new_index = tuple(
int(ix) + slc.start for ix, slc in zip(partial_new_index, ndpartial.new)
)
inputs = tuple(
old_to_new_axis[ix] for ix, old_to_new_axis in zip(global_new_index, old_to_new)
)
shape = tuple(len(axis) for axis in inputs)
rec_cat_arg = np.empty(shape, dtype="O")
for old_partial_index in np.ndindex(shape):
old_global_index, old_slice = zip(
*(input_axis[index] for index, input_axis in zip(old_partial_index, inputs))
)
original_shape = tuple(
old_axis[index] for index, old_axis in zip(old_global_index, input_chunks)
)
if _slicing_is_necessary(old_slice, original_shape):
key = (slice_group,) + ndpartial.ix + old_global_index
dsk[key] = t = Task(
key,
getitem,
TaskRef((input_name,) + old_global_index),
old_slice,
)
rec_cat_arg[old_partial_index] = t.ref()
else:
rec_cat_arg[old_partial_index] = TaskRef((input_name,) + old_global_index)
concat_task = Task(
(rechunk_name(token),) + global_new_index,
concatenate3,
parse_input(rec_cat_arg.tolist()),
)
dsk[concat_task.key] = concat_task
return dsk
def _slicing_is_necessary(slice: NDSlice, shape: tuple[int | None, ...]) -> bool:
"""Return True if applying the slice alters the shape, False otherwise."""
return not all(
slc.start == 0 and (size is None and slc.stop is None or slc.stop == size)
for slc, size in zip(slice, shape)
)
def partial_rechunk(
input_name: str,
input_chunks: ChunkedAxes,
chunks: ChunkedAxes,
ndpartial: _NDPartial,
token: str,
disk: bool,
keepmap: np.ndarray,
) -> dict[Key, Any]:
dsk: dict[Key, Any] = {}
old_partial_offset = tuple(slice_.start for slice_ in ndpartial.old)
partial_token = tokenize(token, ndpartial.ix)
# Use `token` to generate a canonical group for the entire rechunk
transfer_group = f"rechunk-transfer-{token}"
unpack_group = rechunk_name(token)
# We can use `partial_token` here because the barrier task share their
# group across all P2P shuffle-like operations
# FIXME: Make this group unique per individual P2P shuffle-like operation
_barrier_key = barrier_key(ShuffleId(partial_token))
ndim = len(input_chunks)
partial_old = tuple(
chunk_axis[partial_axis]
for partial_axis, chunk_axis in zip(ndpartial.old, input_chunks)
)
partial_new: ChunkedAxes = tuple(
chunks[axis_index][ndpartial.new[axis_index]] for axis_index in range(ndim)
)
transfer_keys = []
for global_index in _ndindices_of_slice(ndpartial.old):
partial_index = _partial_index(global_index, old_partial_offset)
input_key = TaskRef((input_name,) + global_index)
key = (transfer_group,) + ndpartial.ix + global_index
dsk[key] = t = Task(
key,
rechunk_transfer,
input_key,
ShuffleId(partial_token),
partial_index,
)
transfer_keys.append(t.ref())
dsk[_barrier_key] = barrier = P2PBarrierTask(
_barrier_key,
p2p_barrier,
partial_token,
*transfer_keys,
spec=ArrayRechunkSpec(
id=ShuffleId(partial_token), new=partial_new, old=partial_old, disk=disk
),
)
new_partial_offset = tuple(axis.start for axis in ndpartial.new)
for global_index in _ndindices_of_slice(ndpartial.new):
partial_index = _partial_index(global_index, new_partial_offset)
if keepmap[global_index]:
k = (unpack_group,) + global_index
dsk[k] = Task(
k,
rechunk_unpack,
ShuffleId(partial_token),
partial_index,
barrier.ref(),
)
return dsk
|
P2PRechunkLayer
|
python
|
doocs__leetcode
|
solution/2800-2899/2825.Make String a Subsequence Using Cyclic Increments/Solution.py
|
{
"start": 0,
"end": 275
}
|
class ____:
def canMakeSubsequence(self, str1: str, str2: str) -> bool:
i = 0
for c in str1:
d = "a" if c == "z" else chr(ord(c) + 1)
if i < len(str2) and str2[i] in (c, d):
i += 1
return i == len(str2)
|
Solution
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_config_files/logging_config_test.py
|
{
"start": 200,
"end": 666
}
|
class ____:
def __call__(self):
logger.debug("this_is_debug_info")
logger.info("this_is_access_log", extra={"serve_access_log": True})
log_file = logger.handlers[1].target.baseFilename
return {
"log_file": log_file,
"replica": serve.get_replica_context().replica_id.to_full_id_str(),
"log_level": logger.level,
"num_handlers": len(logger.handlers),
}
@serve.deployment
|
Model
|
python
|
huggingface__transformers
|
src/transformers/models/swin/modeling_swin.py
|
{
"start": 44822,
"end": 47193
}
|
class ____(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.swin = SwinModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.swin.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, SwinImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SwinImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
Swin backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
|
SwinForImageClassification
|
python
|
apache__airflow
|
airflow-core/tests/unit/utils/test_db_cleanup.py
|
{
"start": 2549,
"end": 29701
}
|
class ____:
@pytest.fixture(autouse=True)
def clear_airflow_tables(self):
drop_tables_with_prefix("_airflow_")
@pytest.mark.parametrize(
("kwargs", "called"),
[
pytest.param(dict(confirm=True), True, id="true"),
pytest.param(dict(), True, id="not supplied"),
pytest.param(dict(confirm=False), False, id="false"),
],
)
@patch("airflow.utils.db_cleanup._cleanup_table", new=MagicMock())
@patch("airflow.utils.db_cleanup._confirm_delete")
def test_run_cleanup_confirm(self, confirm_delete_mock, kwargs, called):
"""Test that delete confirmation input is called when appropriate"""
run_cleanup(
clean_before_timestamp=None,
table_names=None,
dry_run=None,
verbose=None,
**kwargs,
)
if called:
confirm_delete_mock.assert_called()
else:
confirm_delete_mock.assert_not_called()
@pytest.mark.parametrize(
("kwargs", "should_skip"),
[
pytest.param(dict(skip_archive=True), True, id="true"),
pytest.param(dict(), False, id="not supplied"),
pytest.param(dict(skip_archive=False), False, id="false"),
],
)
@patch("airflow.utils.db_cleanup._cleanup_table")
def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip):
"""Test that delete confirmation input is called when appropriate"""
run_cleanup(
clean_before_timestamp=None,
table_names=["log"],
dry_run=None,
verbose=None,
confirm=False,
**kwargs,
)
assert cleanup_table_mock.call_args.kwargs["skip_archive"] is should_skip
@patch("airflow.utils.db_cleanup._cleanup_table")
def test_run_cleanup_batch_size_propagation(self, cleanup_table_mock):
"""Ensure batch_size is forwarded from run_cleanup to _cleanup_table."""
run_cleanup(
clean_before_timestamp=None,
table_names=["log"],
dry_run=None,
verbose=None,
confirm=False,
batch_size=1234,
)
cleanup_table_mock.assert_called_once()
assert cleanup_table_mock.call_args.kwargs["batch_size"] == 1234
@pytest.mark.parametrize(
"table_names",
[
["xcom", "log"],
None,
],
)
@patch("airflow.utils.db_cleanup._cleanup_table")
@patch("airflow.utils.db_cleanup._confirm_delete", new=MagicMock())
def test_run_cleanup_tables(self, clean_table_mock, table_names):
"""
``_cleanup_table`` should be called for each table in subset if one
is provided else should be called for all tables.
"""
base_kwargs = dict(
clean_before_timestamp=None,
dry_run=None,
verbose=None,
)
run_cleanup(**base_kwargs, table_names=table_names)
assert clean_table_mock.call_count == len(table_names) if table_names else len(config_dict)
@patch("airflow.utils.db_cleanup._cleanup_table")
@patch("airflow.utils.db_cleanup._confirm_delete")
def test_validate_tables_all_invalid(self, confirm_delete_mock, clean_table_mock):
"""If only invalid tables are provided, don't try cleaning anything"""
base_kwargs = dict(
clean_before_timestamp=None,
dry_run=None,
verbose=None,
)
with pytest.raises(SystemExit) as execinfo:
run_cleanup(**base_kwargs, table_names=["all", "fake"])
assert "No tables selected for db cleanup" in str(execinfo.value)
confirm_delete_mock.assert_not_called()
@pytest.mark.parametrize(
"dry_run",
[None, True, False],
)
@patch("airflow.utils.db_cleanup._build_query", MagicMock())
@patch("airflow.utils.db_cleanup._confirm_delete", MagicMock())
@patch("airflow.utils.db_cleanup._check_for_rows")
@patch("airflow.utils.db_cleanup._do_delete")
def test_run_cleanup_dry_run(self, do_delete, check_rows_mock, dry_run):
"""Delete should only be called when not dry_run"""
check_rows_mock.return_value = 10
base_kwargs = dict(
table_names=["log"],
clean_before_timestamp=None,
dry_run=dry_run,
verbose=None,
)
run_cleanup(
**base_kwargs,
)
if dry_run:
do_delete.assert_not_called()
else:
do_delete.assert_called()
@pytest.mark.parametrize(
("table_name", "date_add_kwargs", "expected_to_delete", "run_type"),
[
pytest.param("task_instance", dict(days=0), 0, DagRunType.SCHEDULED, id="beginning"),
pytest.param("task_instance", dict(days=4), 4, DagRunType.SCHEDULED, id="middle"),
pytest.param("task_instance", dict(days=9), 9, DagRunType.SCHEDULED, id="end_exactly"),
pytest.param(
"task_instance", dict(days=9, microseconds=1), 10, DagRunType.SCHEDULED, id="beyond_end"
),
pytest.param(
"dag_run", dict(days=9, microseconds=1), 9, DagRunType.SCHEDULED, id="beyond_end_dr"
),
pytest.param(
"dag_run", dict(days=9, microseconds=1), 10, DagRunType.MANUAL, id="beyond_end_dr_external"
),
],
)
def test__build_query(self, table_name, date_add_kwargs, expected_to_delete, run_type):
"""
Verify that ``_build_query`` produces a query that would delete the right
task instance records depending on the value of ``clean_before_timestamp``.
DagRun is a special case where we always keep the last dag run even if
the ``clean_before_timestamp`` is in the future, except for
externally-triggered dag runs. That is, only the last non-externally-triggered
dag run is kept.
"""
base_date = pendulum.DateTime(2022, 1, 1, tzinfo=pendulum.timezone("UTC"))
create_tis(
base_date=base_date,
num_tis=10,
run_type=run_type,
)
target_table_name = "_airflow_temp_table_name"
with create_session() as session:
clean_before_date = base_date.add(**date_add_kwargs)
query = _build_query(
**config_dict[table_name].__dict__,
clean_before_timestamp=clean_before_date,
session=session,
)
stmt = CreateTableAs(target_table_name, query.selectable)
session.execute(stmt)
res = session.execute(text(f"SELECT COUNT(1) FROM {target_table_name}"))
for row in res:
assert row[0] == expected_to_delete
@pytest.mark.parametrize(
("table_name", "date_add_kwargs", "expected_to_delete", "run_type"),
[
pytest.param("task_instance", dict(days=0), 0, DagRunType.SCHEDULED, id="beginning"),
pytest.param("task_instance", dict(days=4), 4, DagRunType.SCHEDULED, id="middle"),
pytest.param("task_instance", dict(days=9), 9, DagRunType.SCHEDULED, id="end_exactly"),
pytest.param(
"task_instance", dict(days=9, microseconds=1), 10, DagRunType.SCHEDULED, id="beyond_end"
),
pytest.param(
"dag_run", dict(days=9, microseconds=1), 9, DagRunType.SCHEDULED, id="beyond_end_dr"
),
pytest.param(
"dag_run", dict(days=9, microseconds=1), 10, DagRunType.MANUAL, id="beyond_end_dr_external"
),
],
)
def test__cleanup_table(self, table_name, date_add_kwargs, expected_to_delete, run_type):
"""
Verify that _cleanup_table actually deletes the rows it should.
TaskInstance represents the "normal" case. DagRun is the odd case where we want
to keep the last non-externally-triggered DagRun record even if it should be
deleted according to the provided timestamp.
We also verify that the "on delete cascade" behavior is as expected. Some tables
have foreign keys defined so for example if we delete a dag run, all its associated
task instances should be purged as well. But if we delete task instances the
associated dag runs should remain.
"""
base_date = pendulum.DateTime(2022, 1, 1, tzinfo=pendulum.timezone("UTC"))
num_tis = 10
create_tis(
base_date=base_date,
num_tis=num_tis,
run_type=run_type,
)
with create_session() as session:
clean_before_date = base_date.add(**date_add_kwargs)
_cleanup_table(
**config_dict[table_name].__dict__,
clean_before_timestamp=clean_before_date,
dry_run=False,
session=session,
table_names=["dag_run", "task_instance"],
)
model = config_dict[table_name].orm_model
expected_remaining = num_tis - expected_to_delete
assert len(session.query(model).all()) == expected_remaining
if model.name == "task_instance":
assert len(session.query(DagRun).all()) == num_tis
elif model.name == "dag_run":
assert len(session.query(TaskInstance).all()) == expected_remaining
else:
raise Exception("unexpected")
@pytest.mark.parametrize(
("table_name", "expected_archived"),
[
(
"dag_run",
{"dag_run", "task_instance"}, # Only these are populated
),
],
)
def test_run_cleanup_archival_integration(self, table_name, expected_archived):
"""
Integration test that verifies:
1. Recursive FK-dependent tables are resolved via _effective_table_names().
2. run_cleanup() archives only tables with data.
3. Archive tables are not created for empty dependent tables.
"""
base_date = pendulum.datetime(2022, 1, 1, tz="UTC")
num_tis = 5
# Create test data for DAG Run and TIs
if table_name in {"dag_run", "task_instance"}:
create_tis(base_date=base_date, num_tis=num_tis, run_type=DagRunType.MANUAL)
clean_before_date = base_date.add(days=10)
with create_session() as session:
run_cleanup(
clean_before_timestamp=clean_before_date,
table_names=[table_name],
dry_run=False,
confirm=False,
session=session,
)
# Inspect archive tables created
inspector = inspect(session.bind)
archive_tables = {
name for name in inspector.get_table_names() if name.startswith(ARCHIVE_TABLE_PREFIX)
}
actual_archived = {t.split("__", 1)[-1].split("__")[0] for t in archive_tables}
assert expected_archived <= actual_archived, (
f"Expected archive tables not found: {expected_archived - actual_archived}"
)
@pytest.mark.parametrize(
("skip_archive", "expected_archives"),
[pytest.param(True, 0, id="skip_archive"), pytest.param(False, 1, id="do_archive")],
)
def test__skip_archive(self, skip_archive, expected_archives):
"""
Verify that running cleanup_table with drops the archives when requested.
Archived tables from DB migration should be kept when skip_archive is True.
"""
base_date = pendulum.DateTime(2022, 1, 1, tzinfo=pendulum.timezone("UTC"))
num_tis = 10
create_tis(
base_date=base_date,
num_tis=num_tis,
)
with create_session() as session:
# cleanup any existing archived tables
for name in _get_archived_table_names(["dag_run"], session):
session.execute(text(f"DROP TABLE IF EXISTS {name}"))
clean_before_date = base_date.add(days=5)
_cleanup_table(
**config_dict["dag_run"].__dict__,
clean_before_timestamp=clean_before_date,
dry_run=False,
session=session,
table_names=["dag_run"],
skip_archive=skip_archive,
)
model = config_dict["dag_run"].orm_model
assert len(session.query(model).all()) == 5
assert len(_get_archived_table_names(["dag_run"], session)) == expected_archives
@patch("airflow.utils.db.reflect_tables")
def test_skip_archive_failure_will_remove_table(self, reflect_tables_mock):
"""
Verify that running cleanup_table with skip_archive = True, and failure happens.
The archive table should be removed from db if any exception.
"""
reflect_tables_mock.side_effect = SQLAlchemyError("Deletion failed")
base_date = pendulum.DateTime(2022, 1, 1, tzinfo=pendulum.timezone("UTC"))
num_tis = 10
create_tis(
base_date=base_date,
num_tis=num_tis,
)
try:
with create_session() as session:
# cleanup any existing archived tables
for name in _get_archived_table_names(["dag_run"], session):
session.execute(text(f"DROP TABLE IF EXISTS {name}"))
clean_before_date = base_date.add(days=5)
_cleanup_table(
**config_dict["dag_run"].__dict__,
clean_before_timestamp=clean_before_date,
dry_run=False,
session=session,
table_names=["dag_run"],
skip_archive=True,
)
except SQLAlchemyError:
pass
archived_table_names = _get_archived_table_names(["dag_run"], session)
assert len(archived_table_names) == 0
def test_no_models_missing(self):
"""
1. Verify that for all tables in `airflow.models`, we either have them enabled in db cleanup,
or documented in the exclusion list in this test.
2. Verify that no table is enabled for db cleanup and also in exclusion list.
"""
import pkgutil
proj_root = Path(__file__).parents[2].resolve()
mods = list(
f"airflow.models.{name}"
for _, name, _ in pkgutil.iter_modules([str(proj_root / "airflow/models")])
)
all_models = {}
for mod_name in mods:
mod = import_module(mod_name)
for class_ in mod.__dict__.values():
if isinstance(class_, DeclarativeMeta):
with suppress(AttributeError):
all_models.update({class_.__tablename__: class_})
exclusion_list = {
"backfill", # todo: AIP-78
"backfill_dag_run", # todo: AIP-78
"ab_user",
"variable", # leave alone
"asset_active", # not good way to know if "stale"
"asset", # not good way to know if "stale"
"asset_alias", # not good way to know if "stale"
"task_map", # keys to TI, so no need
"serialized_dag", # handled through FK to Dag
"log_template", # not a significant source of data; age not indicative of staleness
"dag_tag", # not a significant source of data; age not indicative of staleness,
"dag_owner_attributes", # not a significant source of data; age not indicative of staleness,
"dag_code", # self-maintaining
"dag_warning", # self-maintaining
"connection", # leave alone
"slot_pool", # leave alone
"dag_schedule_asset_reference", # leave alone for now
"dag_schedule_asset_alias_reference", # leave alone for now
"dag_schedule_asset_name_reference", # leave alone for now
"dag_schedule_asset_uri_reference", # leave alone for now
"task_outlet_asset_reference", # leave alone for now
"asset_dag_run_queue", # self-managed
"asset_event_dag_run", # foreign keys
"task_instance_note", # foreign keys
"dag_run_note", # foreign keys
"rendered_task_instance_fields", # foreign key with TI
"dag_priority_parsing_request", # Records are purged once per DAG Processing loop, not a
# significant source of data.
"dag_bundle", # leave alone - not appropriate for cleanup
}
from airflow.utils.db_cleanup import config_dict
print(f"all_models={set(all_models)}")
print(f"excl+conf={exclusion_list.union(config_dict)}")
assert set(all_models) - exclusion_list.union(config_dict) == set()
assert exclusion_list.isdisjoint(config_dict)
def test_no_failure_warnings(self, caplog):
"""
Ensure every table we have configured (and that is present in the db) can be cleaned successfully.
For example, this checks that the recency column is actually a column.
"""
run_cleanup(clean_before_timestamp=timezone.utcnow(), dry_run=True)
assert "Encountered error when attempting to clean table" not in caplog.text
# Lets check we have the right error message just in case
caplog.clear()
with patch("airflow.utils.db_cleanup._cleanup_table", side_effect=OperationalError("oops", {}, None)):
run_cleanup(clean_before_timestamp=timezone.utcnow(), table_names=["task_instance"], dry_run=True)
assert "Encountered error when attempting to clean table" in caplog.text
@pytest.mark.parametrize(
"drop_archive",
[True, False],
)
@patch("airflow.utils.db_cleanup._dump_table_to_file")
@patch("airflow.utils.db_cleanup._confirm_drop_archives")
@patch("airflow.utils.db_cleanup.inspect")
def test_confirm_drop_called_when_drop_archives_is_true_and_archive_exists(
self, inspect_mock, confirm_drop_mock, _dump_table_to_file_mock, drop_archive
):
"""Test that drop confirmation input is called when appropriate"""
inspector = inspect_mock.return_value
inspector.get_table_names.return_value = [f"{ARCHIVE_TABLE_PREFIX}dag_run__233"]
export_archived_records(
export_format="csv", output_path="path", drop_archives=drop_archive, session=MagicMock()
)
if drop_archive:
confirm_drop_mock.assert_called()
else:
confirm_drop_mock.assert_not_called()
@pytest.mark.parametrize(
"tables",
[
["table1", "table2"],
["table1", "table2", "table3"],
["table1", "table2", "table3", "table4"],
],
)
@patch("airflow.utils.db_cleanup.ask_yesno")
def test_confirm_drop_archives(self, mock_ask_yesno, tables):
expected = (
f"You have requested that we drop the following archived tables: {', '.join(tables)}.\n"
"This is irreversible. Consider backing up the tables first."
)
if len(tables) > 3:
expected = (
f"You have requested that we drop {len(tables)} archived tables prefixed with "
f"_airflow_deleted__.\n"
"This is irreversible. Consider backing up the tables first.\n"
)
for table in tables:
expected += f"\n {table}"
mock_ask_yesno.return_value = True
with (
patch("sys.stdout", new=StringIO()) as fake_out,
patch("builtins.input", side_effect=["drop archived tables"]),
):
_confirm_drop_archives(tables=tables)
output = fake_out.getvalue().strip()
assert output == expected
def test_user_did_not_confirm(self):
tables = ["table1", "table2"]
with (
pytest.raises(SystemExit) as cm,
patch("builtins.input", side_effect=["not drop archived tables"]),
):
_confirm_drop_archives(tables=tables)
assert str(cm.value) == "User did not confirm; exiting."
@pytest.mark.parametrize("drop_archive", [True, False])
@patch("airflow.utils.db_cleanup._dump_table_to_file")
@patch("airflow.utils.db_cleanup.inspect")
@patch("builtins.input", side_effect=["drop archived tables"])
def test_export_archived_records_only_archived_tables(
self, mock_input, inspect_mock, dump_mock, caplog, drop_archive
):
"""Test export_archived_records and show that only tables with the archive prefix are exported."""
session_mock = MagicMock()
inspector = inspect_mock.return_value
inspector.get_table_names.return_value = [f"{ARCHIVE_TABLE_PREFIX}dag_run__233", "task_instance"]
export_archived_records(
export_format="csv", output_path="path", drop_archives=drop_archive, session=session_mock
)
dump_mock.assert_called_once_with(
target_table=f"{ARCHIVE_TABLE_PREFIX}dag_run__233",
file_path=f"path/{ARCHIVE_TABLE_PREFIX}dag_run__233.csv",
export_format="csv",
session=session_mock,
)
assert f"Exporting table {ARCHIVE_TABLE_PREFIX}dag_run__233" in caplog.text
if drop_archive:
assert "Total exported tables: 1, Total dropped tables: 1" in caplog.text
else:
assert "Total exported tables: 1, Total dropped tables: 0" in caplog.text
@pytest.mark.parametrize("drop_archive", [True, False])
@patch("airflow.utils.db_cleanup._dump_table_to_file")
@patch("airflow.utils.db_cleanup.inspect")
@patch("airflow.utils.db_cleanup._confirm_drop_archives")
@patch("builtins.input", side_effect=["drop archived tables"])
def test_export_archived_no_confirm_if_no_tables(
self, mock_input, mock_confirm, inspect_mock, dump_mock, caplog, drop_archive
):
"""Test no confirmation if no archived tables found"""
session_mock = MagicMock()
inspector = inspect_mock.return_value
# No tables with the archive prefix
inspector.get_table_names.return_value = ["dag_run", "task_instance"]
export_archived_records(
export_format="csv", output_path="path", drop_archives=drop_archive, session=session_mock
)
mock_confirm.assert_not_called()
dump_mock.assert_not_called()
assert "Total exported tables: 0, Total dropped tables: 0" in caplog.text
@patch("airflow.utils.db_cleanup.csv")
def test_dump_table_to_file_function_for_csv(self, mock_csv):
mockopen = mock_open()
mock_cursor = MagicMock()
mock_session = MagicMock()
mock_session.execute.return_value = mock_cursor
mock_cursor.keys.return_value = ["test-col-1", "test-col-2"]
mock_cursor.fetchmany.side_effect = [
[("testval-1.1", "testval-1.2"), ("testval-2.1", "testval-2.2")],
[],
]
with patch("airflow.utils.db_cleanup.open", mockopen, create=True):
_dump_table_to_file(
target_table="mytable", file_path="dags/myfile.csv", export_format="csv", session=mock_session
)
mockopen.assert_called_once_with("dags/myfile.csv", "w")
writer = mock_csv.writer
writer.assert_called_once()
writer.return_value.writerow.assert_called_once_with(["test-col-1", "test-col-2"])
writer.return_value.writerows.assert_called_once_with(
[("testval-1.1", "testval-1.2"), ("testval-2.1", "testval-2.2")]
)
def test_dump_table_to_file_raises_if_format_not_supported(self):
with pytest.raises(AirflowException) as exc_info:
_dump_table_to_file(
target_table="mytable",
file_path="dags/myfile.json",
export_format="json",
session=MagicMock(),
)
assert "Export format json is not supported" in str(exc_info.value)
@pytest.mark.parametrize("tables", [["log", "dag"], ["dag_run", "task_instance"]])
@patch("airflow.utils.db_cleanup._confirm_drop_archives")
@patch("airflow.utils.db_cleanup.inspect")
def test_drop_archived_tables_no_confirm_if_no_archived_tables(
self, inspect_mock, mock_confirm, tables, caplog
):
"""
Test no confirmation if no archived tables found.
Archived tables starts with a prefix defined in ARCHIVE_TABLE_PREFIX.
"""
inspector = inspect_mock.return_value
inspector.get_table_names.return_value = tables
drop_archived_tables(tables, needs_confirm=True, session=MagicMock())
mock_confirm.assert_not_called()
assert "Total dropped tables: 0" in caplog.text
@pytest.mark.parametrize("confirm", [True, False])
@patch("airflow.utils.db_cleanup.inspect")
@patch("airflow.utils.db_cleanup._confirm_drop_archives")
@patch("builtins.input", side_effect=["drop archived tables"])
def test_drop_archived_tables(self, mock_input, confirm_mock, inspect_mock, caplog, confirm):
"""Test drop_archived_tables"""
archived_table = f"{ARCHIVE_TABLE_PREFIX}dag_run__233"
normal_table = "dag_run"
inspector = inspect_mock.return_value
inspector.get_table_names.return_value = [archived_table, normal_table]
drop_archived_tables([normal_table], needs_confirm=confirm, session=MagicMock())
assert f"Dropping archived table {archived_table}" in caplog.text
assert f"Dropping archived table {normal_table}" not in caplog.text
assert "Total dropped tables: 1" in caplog.text
if confirm:
confirm_mock.assert_called()
else:
confirm_mock.assert_not_called()
def create_tis(base_date, num_tis, run_type=DagRunType.SCHEDULED):
with create_session() as session:
bundle_name = "testing"
session.add(DagBundleModel(name=bundle_name))
session.flush()
dag_id = f"test-dag_{uuid4()}"
dag = DAG(dag_id=dag_id)
dm = DagModel(dag_id=dag_id, bundle_name=bundle_name)
session.add(dm)
SerializedDagModel.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name=bundle_name)
dag_version = DagVersion.get_latest_version(dag.dag_id)
for num in range(num_tis):
start_date = base_date.add(days=num)
dag_run = DagRun(
dag.dag_id,
run_id=f"abc_{num}",
run_type=run_type,
start_date=start_date,
)
ti = TaskInstance(
PythonOperator(task_id="dummy-task", python_callable=print),
run_id=dag_run.run_id,
dag_version_id=dag_version.id,
)
ti.dag_id = dag.dag_id
ti.start_date = start_date
session.add(dag_run)
session.add(ti)
session.commit()
|
TestDBCleanup
|
python
|
psf__requests
|
src/requests/exceptions.py
|
{
"start": 3277,
"end": 3359
}
|
class ____(InvalidURL):
"""The proxy URL provided is invalid."""
|
InvalidProxyURL
|
python
|
doocs__leetcode
|
solution/0200-0299/0202.Happy Number/Solution2.py
|
{
"start": 0,
"end": 340
}
|
class ____:
def isHappy(self, n: int) -> bool:
def next(x):
y = 0
while x:
x, v = divmod(x, 10)
y += v * v
return y
slow, fast = n, next(n)
while slow != fast:
slow, fast = next(slow), next(next(fast))
return slow == 1
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-cost-to-make-at-least-one-valid-path-in-a-grid.py
|
{
"start": 1199,
"end": 2097
}
|
class ____(object):
def minCost(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
b, t = (0, 0), (len(grid)-1, len(grid[0])-1)
dq = collections.deque([(b, 0)])
lookup = set()
while dq:
b, d = dq.popleft()
if b in lookup:
continue
lookup.add(b)
if b == t:
return d
for nd, (dr, dc) in enumerate(directions, 1):
nb = (b[0]+dr, b[1]+dc)
if not (0 <= nb[0] < len(grid) and 0 <= nb[1] < len(grid[0]) and nb not in lookup):
continue
if nd == grid[b[0]][b[1]]:
dq.appendleft((nb, d))
else:
dq.append((nb, d+1))
return -1 # never reach here
|
Solution2
|
python
|
PyCQA__pylint
|
doc/data/messages/d/duplicate-code/bad/apple.py
|
{
"start": 0,
"end": 417
}
|
class ____:
def __init__(self):
self.remaining_bites = 3
def take_bite(self):
if self.remaining_bites > 0:
print("You take a bite of the apple.")
self.remaining_bites -= 1
else:
print("The apple is already eaten up!")
def eaten_by_animal(self, animal):
self.remaining_bites = 0
print("The apple has been eaten by an animal.")
|
Apple
|
python
|
allegroai__clearml
|
clearml/automation/optimization.py
|
{
"start": 49447,
"end": 53171
}
|
class ____(SearchStrategy):
"""
Random search strategy controller. Random uniform sampling of hyperparameters.
"""
# Number of already chosen random samples before assuming we covered the entire hyper-parameter space
_hp_space_cover_samples = 42
def __init__(
self,
base_task_id: str,
hyper_parameters: Sequence[Parameter],
objective_metric: Objective,
execution_queue: str,
num_concurrent_workers: int,
pool_period_min: float = 2.0,
time_limit_per_job: Optional[float] = None,
compute_time_limit: Optional[float] = None,
max_iteration_per_job: Optional[int] = None,
total_max_jobs: Optional[int] = None,
**_: Any
) -> ():
"""
Initialize a random search optimizer.
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum umber of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes,
when time limit is exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job. When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(RandomSearch, self).__init__(
base_task_id=base_task_id,
hyper_parameters=hyper_parameters,
objective_metric=objective_metric,
execution_queue=execution_queue,
num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min,
time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit,
max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs,
**_
)
self._hyper_parameters_collection = set()
def create_job(self) -> Optional[ClearmlJob]:
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created ClearmlJob object, or None if no ClearmlJob created
"""
parameters = None
# maximum tries to ge a random set that is not already in the collection
for i in range(self._hp_space_cover_samples):
parameters = {}
for p in self._hyper_parameters:
parameters.update(p.get_value())
# hash the parameters dictionary
param_hash = hash(json.dumps(parameters, sort_keys=True))
# if this is a new set of parameters, use it.
if param_hash not in self._hyper_parameters_collection:
self._hyper_parameters_collection.add(param_hash)
break
# try again
parameters = None
# if we failed to find a random set of parameters, assume we selected all of them
if not parameters:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
|
RandomSearch
|
python
|
keras-team__keras
|
keras/src/utils/dtype_utils_test.py
|
{
"start": 1137,
"end": 1936
}
|
class ____(test_case.TestCase):
def test_is_float_float16(self):
self.assertTrue(dtype_utils.is_float("float16"))
def test_is_float_float32(self):
self.assertTrue(dtype_utils.is_float("float32"))
def test_is_float_float64(self):
self.assertTrue(dtype_utils.is_float("float64"))
def test_is_float_int32(self):
self.assertFalse(dtype_utils.is_float("int32"))
def test_is_float_bool(self):
self.assertFalse(dtype_utils.is_float("bool"))
def test_is_float_uint8(self):
self.assertFalse(dtype_utils.is_float("uint8"))
def test_is_float_containing_float(self):
self.assertTrue(dtype_utils.is_float("floating"))
def test_is_float_empty_string(self):
self.assertFalse(dtype_utils.is_float(""))
|
IsFloatTests
|
python
|
MongoEngine__mongoengine
|
tests/fields/test_enum_field.py
|
{
"start": 562,
"end": 4831
}
|
class ____(MongoDBTestCase):
def test_storage(self):
model = ModelWithEnum(status=Status.NEW).save()
assert get_as_pymongo(model) == {"_id": model.id, "status": "new"}
def test_set_enum(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status=Status.NEW).save()
assert ModelWithEnum.objects(status=Status.NEW).count() == 1
assert ModelWithEnum.objects.first().status == Status.NEW
def test_set_by_value(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status="new").save()
assert ModelWithEnum.objects.first().status == Status.NEW
def test_filter(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status="new").save()
assert ModelWithEnum.objects(status="new").count() == 1
assert ModelWithEnum.objects(status=Status.NEW).count() == 1
assert ModelWithEnum.objects(status=Status.DONE).count() == 0
def test_change_value(self):
m = ModelWithEnum(status="new")
m.status = Status.DONE
m.save()
assert m.status == Status.DONE
m.status = "wrong"
assert m.status == "wrong"
with pytest.raises(ValidationError):
m.validate()
def test_set_default(self):
class ModelWithDefault(Document):
status = EnumField(Status, default=Status.DONE)
m = ModelWithDefault().save()
assert m.status == Status.DONE
def test_enum_field_can_be_empty(self):
ModelWithEnum.drop_collection()
m = ModelWithEnum().save()
assert m.status is None
assert ModelWithEnum.objects()[0].status is None
assert ModelWithEnum.objects(status=None).count() == 1
def test_set_none_explicitly(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status=None).save()
assert ModelWithEnum.objects.first().status is None
def test_cannot_create_model_with_wrong_enum_value(self):
m = ModelWithEnum(status="wrong_one")
with pytest.raises(ValidationError):
m.validate()
def test_partial_choices(self):
partial = [Status.DONE]
enum_field = EnumField(Status, choices=partial)
assert enum_field.choices == partial
class FancyDoc(Document):
z = enum_field
FancyDoc(z=Status.DONE).validate()
with pytest.raises(
ValidationError, match=r"Value must be one of .*Status.DONE"
):
FancyDoc(z=Status.NEW).validate()
def test_wrong_choices(self):
with pytest.raises(ValueError, match="Invalid choices"):
EnumField(Status, choices=["my", "custom", "options"])
with pytest.raises(ValueError, match="Invalid choices"):
EnumField(Status, choices=[Color.RED])
with pytest.raises(ValueError, match="Invalid choices"):
EnumField(Status, choices=[Status.DONE, Color.RED])
def test_embedding_in_complex_field(self):
ModelComplexEnum.drop_collection()
model = ModelComplexEnum(
status="new", statuses=["new"], color_mapping={"red": 1}
).save()
assert model.status == Status.NEW
assert model.statuses == [Status.NEW]
assert model.color_mapping == {"red": Color.RED}
model.reload()
assert model.status == Status.NEW
assert model.statuses == [Status.NEW]
assert model.color_mapping == {"red": Color.RED}
model.status = "done"
model.color_mapping = {"blue": 2}
model.statuses = ["new", "done"]
model.save()
assert model.status == Status.DONE
assert model.statuses == [Status.NEW, Status.DONE]
assert model.color_mapping == {"blue": Color.BLUE}
model.reload()
assert model.status == Status.DONE
assert model.color_mapping == {"blue": Color.BLUE}
assert model.statuses == [Status.NEW, Status.DONE]
with pytest.raises(ValidationError, match="must be one of ..Status"):
model.statuses = [1]
model.save()
model.statuses = ["done"]
model.color_mapping = {"blue": "done"}
with pytest.raises(ValidationError, match="must be one of ..Color"):
model.save()
|
TestStringEnumField
|
python
|
docker__docker-py
|
docker/credentials/errors.py
|
{
"start": 43,
"end": 93
}
|
class ____(StoreError):
pass
|
CredentialsNotFound
|
python
|
huggingface__transformers
|
src/transformers/models/yolos/modeling_yolos.py
|
{
"start": 20707,
"end": 21375
}
|
class ____(nn.Module):
def __init__(self, config: YolosConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->Yolos
|
YolosPooler
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 485690,
"end": 486283
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PackageEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Package"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
PackageConnection
|
python
|
tiangolo__fastapi
|
docs_src/security/tutorial005_an_py310.py
|
{
"start": 1248,
"end": 1337
}
|
class ____(BaseModel):
username: str | None = None
scopes: list[str] = []
|
TokenData
|
python
|
pypa__pip
|
src/pip/_vendor/pygments/util.py
|
{
"start": 770,
"end": 8330
}
|
class ____(Exception):
"""
This exception will be raised by all option processing functions if
the type or value of the argument is not correct.
"""
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
"""
If the key `optname` from the dictionary is not in the sequence
`allowed`, raise an error, otherwise return it.
"""
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option {} must be one of {}'.format(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
"""
Intuitively, this is `options.get(optname, default)`, but restricted to
Boolean value. The Booleans can be represented as string, in order to accept
Boolean value from the command line arguments. If the key `optname` is
present in the dictionary `options` and is not associated with a Boolean,
raise an `OptionError`. If it is absent, `default` is returned instead.
The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
(matched case-insensitively).
"""
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, str):
raise OptionError(f'Invalid type {string!r} for option {optname}; use '
'1/0, yes/no, true/false, on/off')
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError(f'Invalid value {string!r} for option {optname}; use '
'1/0, yes/no, true/false, on/off')
def get_int_opt(options, optname, default=None):
"""As :func:`get_bool_opt`, but interpret the value as an integer."""
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError(f'Invalid type {string!r} for option {optname}; you '
'must give an integer value')
except ValueError:
raise OptionError(f'Invalid value {string!r} for option {optname}; you '
'must give an integer value')
def get_list_opt(options, optname, default=None):
"""
If the key `optname` from the dictionary `options` is a string,
split it at whitespace and return it. If it is already a list
or a tuple, it is returned as a list.
"""
val = options.get(optname, default)
if isinstance(val, str):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError(f'Invalid type {val!r} for option {optname}; you '
'must give a list value')
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(rf'^{regex}(\.(exe|cmd|bat|bin))?$', re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.search(text)
if m is None:
return False
doctype = m.group(1)
return re.compile(regex, re.I).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""Check if the file looks like it has a html doctype."""
return doctype_matches(text, r'html')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.search(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
def surrogatepair(c):
"""Given a unicode character code with length greater than 16 bits,
return the two 16 bit surrogate pair.
"""
# From example D28 of:
# http://www.unicode.org/book/ch03.pdf
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
|
OptionError
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 35763,
"end": 35900
}
|
class ____(BaseModel):
error: Optional[str] = Field(default=None, description="Description of the occurred error.")
|
ErrorResponseStatus
|
python
|
django__django
|
tests/modeladmin/test_checks.py
|
{
"start": 45021,
"end": 45847
}
|
class ____(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'max_num' must be an integer.",
"admin.E204",
invalid_obj=ValidationTestInline,
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
|
MaxNumCheckTests
|
python
|
scrapy__scrapy
|
tests/test_squeues_request.py
|
{
"start": 4115,
"end": 4383
}
|
class ____(TestRequestQueueBase):
is_fifo = False
@pytest.fixture
def q(self, crawler, tmp_path):
return MarshalLifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "marshal" / "lifo")
)
|
TestMarshalLifoDiskQueueRequest
|
python
|
spyder-ide__spyder
|
spyder/plugins/updatemanager/workers.py
|
{
"start": 8229,
"end": 8349
}
|
class ____(Exception):
"""Download for installer to update was cancelled."""
pass
|
UpdateDownloadCancelledException
|
python
|
pytest-dev__pytest
|
src/_pytest/fixtures.py
|
{
"start": 30117,
"end": 33030
}
|
class ____(LookupError):
"""Could not return a requested fixture (missing or invalid)."""
def __init__(
self, argname: str | None, request: FixtureRequest, msg: str | None = None
) -> None:
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self) -> FixtureLookupErrorRepr:
tblines: list[str] = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
# This function currently makes an assumption that a non-None msg means we
# have a non-empty `self.fixturestack`. This is currently true, but if
# somebody at some point want to extend the use of FixtureLookupError to
# new cases it might break.
# Add the assert to make it clearer to developer that this will fail, otherwise
# it crashes because `fspath` does not get set due to `stack` being empty.
assert self.msg is None or self.fixturestack, (
"formatrepr assumptions broken, rewrite it to handle it"
)
if msg is not None:
# The last fixture raise an error, let's present
# it at the requesting side.
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (OSError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline(f"file {fspath}, line {lineno + 1}")
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parent = self.request._pyfuncitem.parent
assert parent is not None
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parent))
if faclist:
available.add(name)
if self.argname in available:
msg = (
f" recursive dependency involving fixture '{self.argname}' detected"
)
else:
msg = f"fixture '{self.argname}' not found"
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
|
FixtureLookupError
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backend_tools.py
|
{
"start": 14230,
"end": 14524
}
|
class ____(AxisScaleBase):
"""Tool to toggle between linear and logarithmic scales on the X axis."""
description = 'Toggle scale X axis'
default_keymap = property(lambda self: mpl.rcParams['keymap.xscale'])
def set_scale(self, ax, scale):
ax.set_xscale(scale)
|
ToolXScale
|
python
|
ray-project__ray
|
rllib/core/rl_module/torch/torch_rl_module.py
|
{
"start": 8490,
"end": 12776
}
|
class ____(RLModule, nn.parallel.DistributedDataParallel):
def __init__(self, *args, **kwargs) -> None:
nn.parallel.DistributedDataParallel.__init__(self, *args, **kwargs)
# We do not want to call RLModule.__init__ here because all we need is
# the interface of that base-class not the actual implementation.
# RLModule.__init__(self, *args, **kwargs)
self.observation_space = self.unwrapped().observation_space
self.action_space = self.unwrapped().action_space
self.inference_only = self.unwrapped().inference_only
self.learner_only = self.unwrapped().learner_only
self.model_config = self.unwrapped().model_config
self.catalog = self.unwrapped().catalog
# Deprecated.
self.config = self.unwrapped().config
@override(RLModule)
def get_inference_action_dist_cls(self, *args, **kwargs) -> Type[TorchDistribution]:
return self.unwrapped().get_inference_action_dist_cls(*args, **kwargs)
@override(RLModule)
def get_exploration_action_dist_cls(
self, *args, **kwargs
) -> Type[TorchDistribution]:
return self.unwrapped().get_exploration_action_dist_cls(*args, **kwargs)
@override(RLModule)
def get_train_action_dist_cls(self, *args, **kwargs) -> Type[TorchDistribution]:
return self.unwrapped().get_train_action_dist_cls(*args, **kwargs)
@override(RLModule)
def get_initial_state(self) -> Any:
return self.unwrapped().get_initial_state()
@override(RLModule)
def is_stateful(self) -> bool:
return self.unwrapped().is_stateful()
@override(RLModule)
def _forward(self, *args, **kwargs):
return self.unwrapped()._forward(*args, **kwargs)
@override(RLModule)
def _forward_inference(self, *args, **kwargs) -> Dict[str, Any]:
return self.unwrapped()._forward_inference(*args, **kwargs)
@override(RLModule)
def _forward_exploration(self, *args, **kwargs) -> Dict[str, Any]:
return self.unwrapped()._forward_exploration(*args, **kwargs)
@override(RLModule)
def _forward_train(self, *args, **kwargs):
return self(*args, **kwargs)
@override(RLModule)
def get_state(self, *args, **kwargs):
return self.unwrapped().get_state(*args, **kwargs)
@override(RLModule)
def set_state(self, *args, **kwargs):
self.unwrapped().set_state(*args, **kwargs)
@override(RLModule)
def save_to_path(self, *args, **kwargs):
self.unwrapped().save_to_path(*args, **kwargs)
@override(RLModule)
def restore_from_path(self, *args, **kwargs):
self.unwrapped().restore_from_path(*args, **kwargs)
@override(RLModule)
def get_metadata(self, *args, **kwargs):
self.unwrapped().get_metadata(*args, **kwargs)
@override(RLModule)
def unwrapped(self) -> "RLModule":
return self.module
def compile_wrapper(rl_module: "TorchRLModule", compile_config: TorchCompileConfig):
"""A wrapper that compiles the forward methods of a TorchRLModule."""
# TODO(Artur): Remove this once our requirements enforce torch >= 2.0.0
# Check if torch framework supports torch.compile.
if (
torch is not None
and version.parse(torch.__version__) < TORCH_COMPILE_REQUIRED_VERSION
):
raise ValueError("torch.compile is only supported from torch 2.0.0")
compiled_forward_train = torch.compile(
rl_module._forward_train,
backend=compile_config.torch_dynamo_backend,
mode=compile_config.torch_dynamo_mode,
**compile_config.kwargs,
)
rl_module._forward_train = compiled_forward_train
compiled_forward_inference = torch.compile(
rl_module._forward_inference,
backend=compile_config.torch_dynamo_backend,
mode=compile_config.torch_dynamo_mode,
**compile_config.kwargs,
)
rl_module._forward_inference = compiled_forward_inference
compiled_forward_exploration = torch.compile(
rl_module._forward_exploration,
backend=compile_config.torch_dynamo_backend,
mode=compile_config.torch_dynamo_mode,
**compile_config.kwargs,
)
rl_module._forward_exploration = compiled_forward_exploration
return rl_module
|
TorchDDPRLModule
|
python
|
ray-project__ray
|
python/ray/dag/tests/experimental/test_torch_tensor_transport.py
|
{
"start": 3632,
"end": 6200
}
|
class ____:
"""Tests driver to worker tensor transport with CPU device."""
def create_and_execute_dag(self, actor, device, tensor_input, is_dict=False):
"""Create a DAG with tensor transport and execute it."""
with InputNode() as inp:
method = actor.echo_dict_device if is_dict else actor.echo_device
dag = method.bind(inp.with_tensor_transport(device=device))
compiled_dag = dag.experimental_compile()
return compiled_dag.execute(tensor_input)
def test_src_cpu_tensor_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
ref = run_driver_to_worker_dag(actor, "cpu", torch.tensor([1]))
assert ray.get(ref) == "cpu"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_gpu_tensor_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
ref = run_driver_to_worker_dag(actor, "cpu", torch.tensor([1], device="cuda"))
assert ray.get(ref) == "cpu"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_cpu_tensor_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
ref = run_driver_to_worker_dag(actor, "cpu", torch.tensor([1]))
assert ray.get(ref) == "cpu"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_gpu_tensor_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
ref = run_driver_to_worker_dag(actor, "cpu", torch.tensor([1], device="cuda"))
assert ray.get(ref) == "cpu"
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_mix_tensors_dst_cpu_node(self, ray_start_regular):
actor = Actor.remote()
tensor_dict = {
"cpu_tensor": torch.tensor([1]),
"gpu_tensor": torch.tensor([1], device="cuda"),
}
ref = run_driver_to_worker_dag(actor, "cpu", tensor_dict, is_dict=True)
assert ray.get(ref) == {"cpu_tensor": "cpu", "gpu_tensor": "cpu"}
@pytest.mark.skipif(not USE_GPU, reason="Test requires GPU")
def test_src_mix_tensors_dst_gpu_node(self, ray_start_regular):
actor = Actor.options(num_gpus=1).remote()
tensor_dict = {
"cpu_tensor": torch.tensor([1]),
"gpu_tensor": torch.tensor([1], device="cuda"),
}
ref = run_driver_to_worker_dag(actor, "cpu", tensor_dict, is_dict=True)
assert ray.get(ref) == {"cpu_tensor": "cpu", "gpu_tensor": "cpu"}
|
TestDriverToWorkerDeviceCPU
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/print_settings.py
|
{
"start": 310,
"end": 2704
}
|
class ____(BaseCommand):
help = "Print the active Django settings."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"setting", nargs="*", help="Specifies setting to be printed."
)
parser.add_argument(
"-f",
"--fail",
action="store_true",
dest="fail",
help="Fail if invalid setting name is given.",
)
parser.add_argument(
"--format", default="simple", dest="format", help="Specifies output format."
)
parser.add_argument(
"--indent",
default=4,
dest="indent",
type=int,
help="Specifies indent level for JSON and YAML",
)
@signalcommand
def handle(self, *args, **options):
setting_names = options["setting"]
settings_dct = {k: getattr(settings, k) for k in dir(settings) if k.isupper()}
if setting_names:
settings_dct = {
key: value
for key, value in settings_dct.items()
if any(
fnmatch.fnmatchcase(key, setting_name)
for setting_name in setting_names
)
}
if options["fail"]:
for setting_name in setting_names:
if not any(
fnmatch.fnmatchcase(key, setting_name)
for key in settings_dct.keys()
):
raise CommandError("%s not found in settings." % setting_name)
output_format = options["format"]
indent = options["indent"]
if output_format == "json":
print(json.dumps(settings_dct, indent=indent))
elif output_format == "yaml":
import yaml # requires PyYAML
print(yaml.dump(settings_dct, indent=indent))
elif output_format == "pprint":
from pprint import pprint
pprint(settings_dct)
elif output_format == "text":
for key, value in settings_dct.items():
print("%s = %s" % (key, value))
elif output_format == "value":
for value in settings_dct.values():
print(value)
else:
for key, value in settings_dct.items():
print("%-40s = %r" % (key, value))
|
Command
|
python
|
falconry__falcon
|
tests/test_recipes.py
|
{
"start": 1381,
"end": 2106
}
|
class ____:
@pytest.mark.parametrize(
'recipe,expected_head',
[
('output_csv_text', '"fruit","quantity"\r\n"apples",13\r\n'),
('output_csv_stream', '"n","Fibonacci Fn"\r\n0,0\r\n1,1\r\n'),
],
ids=['simple', 'stream'],
)
def test_csv_output(self, asgi, app_kind, util, recipe, expected_head):
module = util.load_module(
recipe, parent_dir='examples/recipes', suffix=app_kind
)
app = util.create_app(asgi)
app.add_route('/report', module.Report())
result = falcon.testing.simulate_get(app, '/report')
assert result.status_code == 200
assert result.text.startswith(expected_head)
|
TestOutputCSV
|
python
|
modin-project__modin
|
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
|
{
"start": 9422,
"end": 12439
}
|
class ____(PandasOnRayDataframeVirtualPartition):
axis = 1
@ray.remote
def _deploy_ray_func(
deployer,
*positional_args,
axis,
f_to_deploy,
f_len_args,
f_kwargs,
extract_metadata=True,
**kwargs,
): # pragma: no cover
"""
Execute a function on an axis partition in a worker process.
This is ALWAYS called on either ``PandasDataframeAxisPartition.deploy_axis_func``
or ``PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions``, which both
serve to deploy another dataframe function on a Ray worker process. The provided `positional_args`
contains positional arguments for both: `deployer` and for `f_to_deploy`, the parameters can be separated
using the `f_len_args` value. The parameters are combined so they will be deserialized by Ray before the
kernel is executed (`f_kwargs` will never contain more Ray objects, and thus does not require deserialization).
Parameters
----------
deployer : callable
A `PandasDataFrameAxisPartition.deploy_*` method that will call ``f_to_deploy``.
*positional_args : list
The first `f_len_args` elements in this list represent positional arguments
to pass to the `f_to_deploy`. The rest are positional arguments that will be
passed to `deployer`.
axis : {0, 1}
The axis to perform the function along. This argument is keyword only.
f_to_deploy : callable or RayObjectID
The function to deploy. This argument is keyword only.
f_len_args : int
Number of positional arguments to pass to ``f_to_deploy``. This argument is keyword only.
f_kwargs : dict
Keyword arguments to pass to ``f_to_deploy``. This argument is keyword only.
extract_metadata : bool, default: True
Whether to return metadata (length, width, ip) of the result. Passing `False` may relax
the load on object storage as the remote function would return 4 times fewer futures.
Passing `False` makes sense for temporary results where you know for sure that the
metadata will never be requested. This argument is keyword only.
**kwargs : dict
Keyword arguments to pass to ``deployer``.
Returns
-------
list : Union[tuple, list]
The result of the function call, and metadata for it.
Notes
-----
Ray functions are not detected by codecov (thus pragma: no cover).
"""
f_args = positional_args[:f_len_args]
deploy_args = positional_args[f_len_args:]
result = deployer(axis, f_to_deploy, f_args, f_kwargs, *deploy_args, **kwargs)
if not extract_metadata:
for item in result:
yield item
else:
ip = get_node_ip_address()
for r in result:
if isinstance(r, pandas.DataFrame):
for item in [r, len(r), len(r.columns), ip]:
yield item
else:
for item in [r, None, None, ip]:
yield item
|
PandasOnRayDataframeRowPartition
|
python
|
apache__airflow
|
providers/amazon/docs/aws/links/ec2.py
|
{
"start": 1198,
"end": 1712
}
|
class ____(BaseAwsLink):
"""
Helper class for constructing Amazon EC2 console links.
This is useful for displaying the list of EC2 instances, rather
than a single instance.
"""
name = "EC2 Instances"
key = "_instance_dashboard"
format_str = BASE_AWS_CONSOLE_LINK + "/ec2/home?region={region_name}#Instances:instanceId=:{instance_ids}"
@staticmethod
def format_instance_id_filter(instance_ids: list[str]) -> str:
return ",:".join(instance_ids)
|
EC2InstanceDashboardLink
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/tryceratops/TRY003.py
|
{
"start": 0,
"end": 464
}
|
class ____(Exception):
pass
def func():
a = 1
if a == 1:
raise CustomException("Long message")
elif a == 2:
raise CustomException("Short") # This is acceptable
elif a == 3:
raise CustomException("its_code_not_message") # This is acceptable
def ignore():
try:
a = 1
except Exception as ex:
# This is another violation, but this specific analyzer shouldn't care
raise ex
|
CustomException
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/embedding_ops_test.py
|
{
"start": 9495,
"end": 27853
}
|
class ____(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
@test_util.run_deprecated_v1
def testSimpleSharded(self):
with self.cached_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testMaxNorm(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
with self.cached_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops_stack.stack([norms, norms], axis=1)
self.assertAllClose(embedding, 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedResourceVariable(self):
with self.cached_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size, use_resource=True)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
self.evaluate(variables.global_variables_initializer())
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = self.evaluate(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedModPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningInt64Ids(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
@test_util.run_deprecated_v1
def testShardedDivPartitioningUnknownParamShape(self):
with self.cached_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.cached_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
@test_util.run_deprecated_v1
def testHigherRank(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids)
self.assertAllEqual(simple, array_ops.gather(params, ids))
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testHigherRankMaxNorm(self):
np.random.seed(8)
with self.cached_session():
for params_shape in (12,), (6, 3), (6, 2, 3):
# Test embedding rank 0, 1, 2.
# Note: the first dimension must be a common multiple of procs below.
params = 2 * np.ones(params_shape)
params_norm = params / np.sqrt(
np.sum(
params * params, tuple(range(params.ndim)[1:]), keepdims=True))
for ids_shape in (), (3), (4, 3), (2, 3, 4):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops.embedding_lookup(
split_params, ids, max_norm=1.0)
self.assertAllEqual(simple, sharded)
@test_util.run_deprecated_v1
def testTransform(self):
# This tests all combinations of:
# - ids rank 0, 1, >1
# - params sharded/unsharded
# It always applies max_norm.
np.random.seed(8)
l2_norm = 2.
with self.cached_session():
# Param values are in [l2_norm, l2_norm+1) so it will always clip.
params = np.random.rand(6, 3) + l2_norm
params_norm = l2_norm * params / np.sqrt(
np.sum(params * params, axis=1, keepdims=True))
# Compute the norm of each embedding. This will change the embedding
# rank to 0.
params_norm = np.linalg.norm(params_norm, axis=1)
transform = lambda x: linalg_ops.norm(x, axis=1)
for ids_shape in (), (3), (4, 3), (2, 3, 4):
# Test ids rank 0, 1, 2, 3.
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape,
dtype=np.int64)).reshape(ids_shape)
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform)
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in range(procs)
]
sharded = embedding_ops._embedding_lookup_and_transform(
split_params, ids, max_norm=l2_norm, transform_fn=transform)
# assertAllClose is used here as different implementations of sqrt may
# be used to compute each of the values being compared. For example,
# on AVX512 builds the embedding operation makes use of Eigen's fast
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, sharded)
def testRaggedMaxNorm(self):
embeddings = constant_op.constant([[2.0]])
ids = ragged_factory_ops.constant([[0, 0], [0]], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup([embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding, [[[1.0], [1.0]], [[1.0]]])
# TODO(philipphack): Consider moving this test to
# tensorflow/python/ops/embedding_ops_test.py
|
EmbeddingLookupTest
|
python
|
pandas-dev__pandas
|
pandas/core/indexing.py
|
{
"start": 89524,
"end": 91209
}
|
class ____(_ScalarAccessIndexer):
_takeable = False
def _convert_key(self, key):
"""
Require they keys to be the same type as the index. (so we don't
fallback)
"""
# GH 26989
# For series, unpacking key needs to result in the label.
# This is already the case for len(key) == 1; e.g. (1,)
if self.ndim == 1 and len(key) > 1:
key = (key,)
return key
@property
def _axes_are_unique(self) -> bool:
# Only relevant for self.ndim == 2
assert self.ndim == 2
return self.obj.index.is_unique and self.obj.columns.is_unique
def __getitem__(self, key):
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (getting)!")
return self.obj.loc[key]
return super().__getitem__(key)
def __setitem__(self, key, value) -> None:
if not CHAINED_WARNING_DISABLED:
if sys.getrefcount(self.obj) <= REF_COUNT_IDX:
warnings.warn(
_chained_assignment_msg, ChainedAssignmentError, stacklevel=2
)
if self.ndim == 2 and not self._axes_are_unique:
# GH#33041 fall back to .loc
if not isinstance(key, tuple) or not all(is_scalar(x) for x in key):
raise ValueError("Invalid call for scalar access (setting)!")
self.obj.loc[key] = value
return
return super().__setitem__(key, value)
@doc(IndexingMixin.iat)
|
_AtIndexer
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py
|
{
"start": 1051,
"end": 1452
}
|
class ____:
__slots__ = (
"d0",
"c0", # a comment regarding 'c0'
"b0",
# a comment regarding 'a0':
"a0"
)
__slots__ = [
"d",
"c", # a comment regarding 'c'
"b",
# a comment regarding 'a':
"a"
]
##################################
# Messier multiline definitions...
##################################
|
Klass3
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 308896,
"end": 309599
}
|
class ____(sgqlc.types.Input):
"""Parameters to be used for the tag_name_pattern rule"""
__schema__ = github_schema
__field_names__ = ("name", "negate", "operator", "pattern")
name = sgqlc.types.Field(String, graphql_name="name")
"""How this rule will appear to users."""
negate = sgqlc.types.Field(Boolean, graphql_name="negate")
"""If true, the rule will fail if the pattern matches."""
operator = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="operator")
"""The operator to use for matching."""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""The pattern to match with."""
|
TagNamePatternParametersInput
|
python
|
doocs__leetcode
|
solution/0300-0399/0332.Reconstruct Itinerary/Solution.py
|
{
"start": 0,
"end": 361
}
|
class ____:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
def dfs(f: str):
while g[f]:
dfs(g[f].pop())
ans.append(f)
g = defaultdict(list)
for f, t in sorted(tickets, reverse=True):
g[f].append(t)
ans = []
dfs("JFK")
return ans[::-1]
|
Solution
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/distlib/metadata.py
|
{
"start": 958,
"end": 9087
}
|
class ____(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX_1_2 = re.compile('\n \\|')
_LINE_PREFIX_PRE_1_2 = re.compile('\n ')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page',
'Author', 'Author-email', 'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External',
'Maintainer', 'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist',
'Extension', 'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension')
# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in
# the metadata. Include them in the tuple literal below to allow them
# (for now).
# Ditto for Obsoletes - see issue #140.
_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires', 'Provides', 'Obsoletes')
_566_MARKERS = ('Description-Content-Type', )
_643_MARKERS = ('Dynamic', 'License-File')
_643_FIELDS = _566_FIELDS + _643_MARKERS
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
_ALL_FIELDS.update(_566_FIELDS)
_ALL_FIELDS.update(_643_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version in ('1.3', '2.1'):
# avoid adding field names if already there
return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS)
elif version == '2.0':
raise ValueError('Metadata 2.0 is withdrawn and not supported')
# return _426_FIELDS
elif version == '2.2':
return _643_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
return any(marker in keys for marker in markers)
keys = [key for key, value in fields.items() if value not in ([], 'UNKNOWN', None)]
possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
logger.debug('Removed 1.0 due to %s', key)
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
logger.debug('Removed 1.1 due to %s', key)
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
logger.debug('Removed 1.2 due to %s', key)
if key not in _566_FIELDS and '1.3' in possible_versions:
possible_versions.remove('1.3')
logger.debug('Removed 1.3 due to %s', key)
if key not in _566_FIELDS and '2.1' in possible_versions:
if key != 'Description': # In 2.1, description allowed after headers
possible_versions.remove('2.1')
logger.debug('Removed 2.1 due to %s', key)
if key not in _643_FIELDS and '2.2' in possible_versions:
possible_versions.remove('2.2')
logger.debug('Removed 2.2 due to %s', key)
# if key not in _426_FIELDS and '2.0' in possible_versions:
# possible_versions.remove('2.0')
# logger.debug('Removed 2.0 due to %s', key)
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
logger.debug('Out of options - unknown metadata set: %s', fields)
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
# is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields')
# we have the choice, 1.0, or 1.2, 2.1 or 2.2
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.1 adds more features
# - 2.2 is the latest
if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
if is_2_1:
return '2.1'
# if is_2_2:
# return '2.2'
return '2.2'
# This follows the rules about transforming keys as described in
# https://www.python.org/dev/peps/pep-0566/#id17
_ATTR2FIELD = {name.lower().replace("-", "_"): name for name in _ALL_FIELDS}
_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python', )
_VERSION_FIELDS = ('Version', )
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist',
'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension', 'License-File')
_LISTTUPLEFIELDS = ('Project-URL', )
_ELEMENTSFIELD = ('Keywords', )
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
|
MetadataInvalidError
|
python
|
pytorch__pytorch
|
test/quantization/jit/test_quantize_jit.py
|
{
"start": 129821,
"end": 143773
}
|
class ____(QuantizationTestCase):
@override_qengines
def test_single_linear(self):
r"""Compare the result of quantizing single linear layer in
eager mode and graph mode
"""
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel(
torch.backends.quantized.engine
).eval()
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
model_eager = quantize(
annotated_linear_model, test_only_eval_fn, [self.calib_data]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@skipIfNoFBGEMM
def test_observer_with_ignored_function(self):
r"""Test observers with ignored function and make sure it works in
graph mode
"""
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel("fbgemm").eval()
for qconfig in [
QConfig(activation=default_observer, weight=default_weight_observer),
QConfig(
activation=default_histogram_observer, weight=default_weight_observer
),
QConfig(
activation=default_observer, weight=default_per_channel_weight_observer
),
]:
annotated_linear_model.qconfig = qconfig
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
model_eager = quantize(
annotated_linear_model, test_only_eval_fn, [self.calib_data]
)
qconfig_dict = {"": qconfig}
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_conv(self):
r"""Compare the result of quantizing conv layer in
eager mode and graph mode
"""
# eager mode
annotated_conv_model = AnnotatedConvModel(
torch.backends.quantized.engine
).eval()
conv_model = ConvModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model.conv.weight = torch.nn.Parameter(
annotated_conv_model.conv.weight.detach()
)
model_eager = quantize(
annotated_conv_model, test_only_eval_fn, [self.img_data_2d]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(conv_model, self.img_data_2d[0][0])
model_script = torch.jit.script(conv_model)
result_eager = model_eager(self.img_data_2d[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
self.assertEqual(model_quantized(self.img_data_2d[0][0]), result_eager)
@override_qengines
def test_conv_transpose(self):
r"""Compare the result of quantizing conv_transpose layer in
eager mode and graph mode
"""
if not qengine_is_qnnpack():
return # Currently only qnnpack is supported
# eager mode
annotated_conv_model = AnnotatedConvTransposeModel(
torch.backends.quantized.engine
).eval()
conv_model = ConvTransposeModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model.conv.weight = torch.nn.Parameter(
annotated_conv_model.conv.weight.detach()
)
model_eager = quantize(
annotated_conv_model, test_only_eval_fn, [self.img_data_2d]
)
qconfig_dict = {"": get_default_qconfig(torch.backends.quantized.engine)}
model_traced = torch.jit.trace(conv_model, self.img_data_2d[0][0])
model_script = torch.jit.script(conv_model)
result_eager = model_eager(self.img_data_2d[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
self.assertEqual(model_quantized(self.img_data_2d[0][0]), result_eager)
@override_qengines
def test_conv_bn(self):
r"""Compare the result of quantizing conv + bn layer in
eager mode and graph mode
"""
# eager mode
conv_model = AnnotatedConvBnModel().eval()
conv_model_to_script = ConvBnModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
conv_model_to_script.conv.weight = torch.nn.Parameter(
conv_model.conv.weight.detach()
)
fuse_modules(conv_model, ["conv", "bn"], inplace=True)
model_eager = quantize(conv_model, test_only_eval_fn, [self.img_data_2d])
qconfig_dict = {"": default_qconfig}
model_script = quantize_jit(
torch.jit.script(conv_model_to_script),
qconfig_dict,
test_only_eval_fn,
[self.img_data_2d],
inplace=False,
)
result_eager = model_eager(self.img_data_2d[0][0])
result_script = model_script(self.img_data_2d[0][0])
self.assertEqual(result_eager, result_script)
@override_qengines
def test_nested(self):
# Eager mode
eager_model = AnnotatedNestedModel(torch.backends.quantized.engine).eval()
# Graph mode
script_model = NestedModel().eval()
# Copy weights for eager_model
script_model.sub1.fc.weight = torch.nn.Parameter(
eager_model.sub1.fc.weight.detach()
)
script_model.sub1.fc.bias = torch.nn.Parameter(
eager_model.sub1.fc.bias.detach()
)
script_model.sub2.fc1.weight = torch.nn.Parameter(
eager_model.sub2.fc1.module.weight.detach()
)
script_model.sub2.fc1.bias = torch.nn.Parameter(
eager_model.sub2.fc1.module.bias.detach()
)
script_model.sub2.fc2.weight = torch.nn.Parameter(
eager_model.sub2.fc2.weight.detach()
)
script_model.sub2.fc2.bias = torch.nn.Parameter(
eager_model.sub2.fc2.bias.detach()
)
script_model.fc3.weight = torch.nn.Parameter(
eager_model.fc3.module.weight.detach()
)
script_model.fc3.bias = torch.nn.Parameter(eager_model.fc3.module.bias.detach())
model_eager = quantize(eager_model, test_only_eval_fn, [self.calib_data])
qconfig_dict = {
"sub2.fc1": default_per_channel_qconfig
if qengine_is_fbgemm()
else default_qconfig,
"fc3": default_qconfig,
}
model_traced = torch.jit.trace(script_model, self.calib_data[0][0])
model_script = torch.jit.script(script_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_skip_quant(self):
"""Test None qconfig"""
# Eager mode
eager_model = AnnotatedSkipQuantModel(torch.backends.quantized.engine).eval()
# Graph mode
script_model = SkipQuantModel().eval()
# Copy weights for eager_model
script_model.sub.fc1.weight = torch.nn.Parameter(
eager_model.sub.module.fc1.weight.detach()
)
script_model.sub.fc1.bias = torch.nn.Parameter(
eager_model.sub.module.fc1.bias.detach()
)
script_model.sub.fc2.weight = torch.nn.Parameter(
eager_model.sub.module.fc2.weight.detach()
)
script_model.sub.fc2.bias = torch.nn.Parameter(
eager_model.sub.module.fc2.bias.detach()
)
script_model.fc.weight = torch.nn.Parameter(eager_model.fc.weight.detach())
script_model.fc.bias = torch.nn.Parameter(eager_model.fc.bias.detach())
eager_model.fuse_modules()
model_eager = quantize(eager_model, test_only_eval_fn, [self.calib_data])
qconfig_dict = {
"": get_default_qconfig(torch.backends.quantized.engine),
"fc": None,
}
model_traced = torch.jit.trace(script_model, self.calib_data[0][0])
model_script = torch.jit.script(script_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_jit(
model_under_test,
qconfig_dict,
test_only_eval_fn,
[self.calib_data],
inplace=False,
)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
@override_qengines
def test_single_linear_dynamic(self):
r"""Compare the result of dynamic quantization of single linear layer in
eager mode and graph mode.
"""
if qengine_is_qnnpack():
# eager mode
annotated_linear_model = AnnotatedSingleLayerLinearModel("qnnpack").eval()
linear_model = SingleLayerLinearModel().eval()
# copy the weight from eager mode so that we can
# compare the result of the two quantized models later
linear_model.fc1.weight = torch.nn.Parameter(
annotated_linear_model.fc1.module.weight.detach()
)
linear_model.fc1.bias = torch.nn.Parameter(
annotated_linear_model.fc1.module.bias.detach()
)
qconfig_dict = {"": default_dynamic_qconfig}
model_eager = quantize_dynamic(annotated_linear_model, qconfig_dict)
model_traced = torch.jit.trace(linear_model, self.calib_data[0][0])
model_script = torch.jit.script(linear_model)
result_eager = model_eager(self.calib_data[0][0])
for model_under_test in [model_traced, model_script]:
model_quantized = quantize_dynamic_jit(model_under_test, qconfig_dict)
self.assertEqual(model_quantized(self.calib_data[0][0]), result_eager)
# Check to make sure choose_qparams->quant->dequant->linear is numerically
# equivalent to the final quantized model.
model_fake_quantized = quantize_dynamic_jit(
model_under_test, qconfig_dict, debug=True
)
self.assertEqual(
model_fake_quantized(self.calib_data[0][0]), result_eager
)
@skipIfNoFBGEMM
def test_linear_dynamic_fp16(self):
linear_model = SingleLayerLinearModel().eval()
# Create weight tensor values that are beyond fp16 max
x = torch.ones(5, 5) * 65532
linear_model.fc1.weight = torch.nn.Parameter(x)
import warnings
model_eager = quantize_dynamic(linear_model, dtype=torch.float16)
result_eager = model_eager(self.calib_data[0][0])
for trace in [True]:
with warnings.catch_warnings(record=True) as w:
quantized_model = self.checkGraphModeOp(
linear_model,
self.calib_data[0][0],
"quantized::linear_dynamic_fp16",
tracing=trace,
dynamic=True,
qconfig=float16_dynamic_qconfig,
)
# compare result with eager mode
self.assertEqual(quantized_model(self.calib_data[0][0]), result_eager)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
|
TestQuantizeJit
|
python
|
pytorch__pytorch
|
test/distributions/test_distributions.py
|
{
"start": 265675,
"end": 274340
}
|
class ____(DistributionsTestCase):
def setUp(self):
super().setUp()
positive_var = torch.randn(20, dtype=torch.double).exp()
positive_var2 = torch.randn(20, dtype=torch.double).exp()
random_var = torch.randn(20, dtype=torch.double)
simplex_tensor = softmax(torch.randn(20, dtype=torch.double), dim=-1)
cov_tensor = torch.randn(20, 20, dtype=torch.double)
cov_tensor = cov_tensor @ cov_tensor.mT
self.distribution_pairs = [
(Bernoulli(simplex_tensor), scipy.stats.bernoulli(simplex_tensor)),
(
Beta(positive_var, positive_var2),
scipy.stats.beta(positive_var, positive_var2),
),
(
Binomial(10, simplex_tensor),
scipy.stats.binom(
10 * np.ones(simplex_tensor.shape), simplex_tensor.numpy()
),
),
(
Cauchy(random_var, positive_var),
scipy.stats.cauchy(loc=random_var, scale=positive_var),
),
(Dirichlet(positive_var), scipy.stats.dirichlet(positive_var)),
(
Exponential(positive_var),
scipy.stats.expon(scale=positive_var.reciprocal()),
),
(
FisherSnedecor(
positive_var, 4 + positive_var2
), # var for df2<=4 is undefined
scipy.stats.f(positive_var, 4 + positive_var2),
),
(
Gamma(positive_var, positive_var2),
scipy.stats.gamma(positive_var, scale=positive_var2.reciprocal()),
),
(Geometric(simplex_tensor), scipy.stats.geom(simplex_tensor, loc=-1)),
(
Gumbel(random_var, positive_var2),
scipy.stats.gumbel_r(random_var, positive_var2),
),
(
GeneralizedPareto(
loc=random_var, scale=positive_var, concentration=random_var / 10
),
scipy.stats.genpareto(
c=random_var / 10, loc=random_var, scale=positive_var
),
),
(HalfCauchy(positive_var), scipy.stats.halfcauchy(scale=positive_var)),
(HalfNormal(positive_var2), scipy.stats.halfnorm(scale=positive_var2)),
(
InverseGamma(positive_var, positive_var2),
scipy.stats.invgamma(positive_var, scale=positive_var2),
),
(
Laplace(random_var, positive_var2),
scipy.stats.laplace(random_var, positive_var2),
),
(
# Tests fail 1e-5 threshold if scale > 3
LogNormal(random_var, positive_var.clamp(max=3)),
scipy.stats.lognorm(
s=positive_var.clamp(max=3), scale=random_var.exp()
),
),
(
LowRankMultivariateNormal(
random_var, torch.zeros(20, 1, dtype=torch.double), positive_var2
),
scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2)),
),
(
Multinomial(10, simplex_tensor),
scipy.stats.multinomial(10, simplex_tensor),
),
(
MultivariateNormal(random_var, torch.diag(positive_var2)),
scipy.stats.multivariate_normal(random_var, torch.diag(positive_var2)),
),
(
MultivariateNormal(random_var, cov_tensor),
scipy.stats.multivariate_normal(random_var, cov_tensor),
),
(
Normal(random_var, positive_var2),
scipy.stats.norm(random_var, positive_var2),
),
(
OneHotCategorical(simplex_tensor),
scipy.stats.multinomial(1, simplex_tensor),
),
(
Pareto(positive_var, 2 + positive_var2),
scipy.stats.pareto(2 + positive_var2, scale=positive_var),
),
(Poisson(positive_var), scipy.stats.poisson(positive_var)),
(
StudentT(2 + positive_var, random_var, positive_var2),
scipy.stats.t(2 + positive_var, random_var, positive_var2),
),
(
Uniform(random_var, random_var + positive_var),
scipy.stats.uniform(random_var, positive_var),
),
(
VonMises(random_var, positive_var),
scipy.stats.vonmises(positive_var, loc=random_var),
),
(
Weibull(
positive_var[0], positive_var2[0]
), # scipy var for Weibull only supports scalars
scipy.stats.weibull_min(c=positive_var2[0], scale=positive_var[0]),
),
(
# scipy var for Wishart only supports scalars
# SciPy allowed ndim -1 < df < ndim for Wishar distribution after version 1.7.0
Wishart(
(
20
if version.parse(scipy.__version__) < version.parse("1.7.0")
else 19
)
+ positive_var[0],
cov_tensor,
),
scipy.stats.wishart(
(
20
if version.parse(scipy.__version__) < version.parse("1.7.0")
else 19
)
+ positive_var[0].item(),
cov_tensor,
),
),
]
def test_mean(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
if isinstance(pytorch_dist, (Cauchy, HalfCauchy)):
# Cauchy, HalfCauchy distributions' mean is nan, skipping check
continue
elif isinstance(
pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)
):
self.assertEqual(pytorch_dist.mean, scipy_dist.mean, msg=pytorch_dist)
else:
self.assertEqual(pytorch_dist.mean, scipy_dist.mean(), msg=pytorch_dist)
def test_variance_stddev(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
if isinstance(pytorch_dist, (Cauchy, HalfCauchy, VonMises)):
# Cauchy, HalfCauchy distributions' standard deviation is nan, skipping check
# VonMises variance is circular and scipy doesn't produce a correct result
continue
elif isinstance(pytorch_dist, (Multinomial, OneHotCategorical)):
self.assertEqual(
pytorch_dist.variance, np.diag(scipy_dist.cov()), msg=pytorch_dist
)
self.assertEqual(
pytorch_dist.stddev,
np.diag(scipy_dist.cov()) ** 0.5,
msg=pytorch_dist,
)
elif isinstance(
pytorch_dist, (LowRankMultivariateNormal, MultivariateNormal)
):
self.assertEqual(
pytorch_dist.variance, np.diag(scipy_dist.cov), msg=pytorch_dist
)
self.assertEqual(
pytorch_dist.stddev,
np.diag(scipy_dist.cov) ** 0.5,
msg=pytorch_dist,
)
else:
self.assertEqual(
pytorch_dist.variance, scipy_dist.var(), msg=pytorch_dist
)
self.assertEqual(
pytorch_dist.stddev, scipy_dist.var() ** 0.5, msg=pytorch_dist
)
@set_default_dtype(torch.double)
def test_cdf(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
samples = pytorch_dist.sample((5,))
try:
cdf = pytorch_dist.cdf(samples)
except NotImplementedError:
continue
self.assertEqual(cdf, scipy_dist.cdf(samples), msg=pytorch_dist)
def test_icdf(self):
for pytorch_dist, scipy_dist in self.distribution_pairs:
samples = torch.rand((5,) + pytorch_dist.batch_shape, dtype=torch.double)
try:
icdf = pytorch_dist.icdf(samples)
except NotImplementedError:
continue
self.assertEqual(icdf, scipy_dist.ppf(samples), msg=pytorch_dist)
|
TestAgainstScipy
|
python
|
pandas-dev__pandas
|
pandas/tests/apply/conftest.py
|
{
"start": 139,
"end": 1877
}
|
class ____(BaseExecutionEngine):
"""
Execution Engine to test if the execution engine interface receives and
uses all parameters provided by the user.
Making this engine work as the default Python engine by calling it, no extra
functionality is implemented here.
When testing, this will be called when this engine is provided, and then the
same pandas.map and pandas.apply function will be called, but without engine,
executing the default behavior from the python engine.
"""
def map(data, func, args, kwargs, decorator, skip_na):
kwargs_to_pass = kwargs if isinstance(data, DataFrame) else {}
return data.map(func, na_action="ignore" if skip_na else None, **kwargs_to_pass)
def apply(data, func, args, kwargs, decorator, axis):
if isinstance(data, Series):
return data.apply(func, convert_dtype=True, args=args, by_row=False)
elif isinstance(data, DataFrame):
return data.apply(
func,
axis=axis,
raw=False,
result_type=None,
args=args,
by_row="compat",
**kwargs,
)
else:
assert isinstance(data, np.ndarray)
def wrap_function(func):
# https://github.com/numpy/numpy/issues/8352
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, str):
result = np.array(result, dtype=object)
return result
return wrapper
return np.apply_along_axis(wrap_function(func), axis, data, *args, **kwargs)
|
MockExecutionEngine
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/tslibs/timedelta.py
|
{
"start": 211,
"end": 1188
}
|
class ____:
def setup(self):
self.nptimedelta64 = np.timedelta64(3600)
self.dttimedelta = datetime.timedelta(seconds=3600)
self.td = Timedelta(3600, unit="s")
def time_from_int(self):
Timedelta(123456789)
def time_from_unit(self):
Timedelta(1, unit="D")
def time_from_components(self):
Timedelta(
days=1,
hours=2,
minutes=3,
seconds=4,
milliseconds=5,
microseconds=6,
nanoseconds=7,
)
def time_from_datetime_timedelta(self):
Timedelta(self.dttimedelta)
def time_from_np_timedelta(self):
Timedelta(self.nptimedelta64)
def time_from_string(self):
Timedelta("1 days")
def time_from_iso_format(self):
Timedelta("P4DT12H30M5S")
def time_from_missing(self):
Timedelta("nat")
def time_from_pd_timedelta(self):
Timedelta(self.td)
|
TimedeltaConstructor
|
python
|
huggingface__transformers
|
src/transformers/models/vitpose/image_processing_vitpose.py
|
{
"start": 13173,
"end": 29987
}
|
class ____(BaseImageProcessor):
r"""
Constructs a VitPose image processor.
Args:
do_affine_transform (`bool`, *optional*, defaults to `True`):
Whether to apply an affine transformation to the input images.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 192}`):
Resolution of the image after `affine_transform` is applied. Only has an effect if `do_affine_transform` is set to `True`. Can
be overridden by `size` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`list[int]`, defaults to `[0.485, 0.456, 0.406]`, *optional*):
The sequence of means for each channel, to be used when normalizing images.
image_std (`list[int]`, defaults to `[0.229, 0.224, 0.225]`, *optional*):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
valid_kwargs = VitPoseImageProcessorKwargs
model_input_names = ["pixel_values"]
def __init__(
self,
do_affine_transform: bool = True,
size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.do_affine_transform = do_affine_transform
self.size = size if size is not None else {"height": 256, "width": 192}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.normalize_factor = 200.0
def affine_transform(
self,
image: np.ndarray,
center: tuple[float],
scale: tuple[float],
rotation: float,
size: dict[str, int],
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Apply an affine transformation to an image.
Args:
image (`np.ndarray`):
Image to transform.
center (`tuple[float]`):
Center of the bounding box (x, y).
scale (`tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`dict[str, int]`):
Size of the destination image.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image.
"""
data_format = input_data_format if data_format is None else data_format
size = (size["width"], size["height"])
# one uses a pixel standard deviation of 200 pixels
transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)
# input image requires channels last format
image = (
image
if input_data_format == ChannelDimension.LAST
else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
)
image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))
image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)
return image
def preprocess(
self,
images: ImageInput,
boxes: Union[list[list[float]], np.ndarray],
do_affine_transform: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
do_affine_transform (`bool`, *optional*, defaults to `self.do_affine_transform`):
Whether to apply an affine transformation to the input images.
size (`dict[str, int]` *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
do_affine_transform = do_affine_transform if do_affine_transform is not None else self.do_affine_transform
size = size if size is not None else self.size
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if isinstance(boxes, list) and len(images) != len(boxes):
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {len(boxes)}")
elif isinstance(boxes, np.ndarray) and len(images) != boxes.shape[0]:
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {boxes.shape[0]}")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
# transformations (affine transformation + rescaling + normalization)
if self.do_affine_transform:
new_images = []
for image, image_boxes in zip(images, boxes):
for box in image_boxes:
center, scale = box_to_center_and_scale(
box,
image_width=size["width"],
image_height=size["height"],
normalize_factor=self.normalize_factor,
)
transformed_image = self.affine_transform(
image, center, scale, rotation=0, size=size, input_data_format=input_data_format
)
new_images.append(transformed_image)
images = new_images
# For batch processing, the number of boxes must be consistent across all images in the batch.
# When using a list input, the number of boxes can vary dynamically per image.
# The image processor creates pixel_values of shape (batch_size*num_persons, num_channels, height, width)
all_images = []
for image in images:
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
def keypoints_from_heatmaps(
self,
heatmaps: np.ndarray,
center: np.ndarray,
scale: np.ndarray,
kernel: int = 11,
):
"""
Get final keypoint predictions from heatmaps and transform them back to
the image.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`):
Model predicted heatmaps.
center (`np.ndarray` of shape `(batch_size, 2)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(batch_size, 2)`):
Scale of the bounding box wrt original images of width and height.
kernel (int, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location in images.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
batch_size, _, height, width = heatmaps.shape
coords, scores = get_keypoint_predictions(heatmaps)
preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel)
# Transform back to the image
for i in range(batch_size):
preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width])
return preds, scores
def post_process_pose_estimation(
self,
outputs: "VitPoseEstimatorOutput",
boxes: Union[list[list[list[float]]], np.ndarray],
kernel_size: int = 11,
threshold: Optional[float] = None,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Transform the heatmaps into keypoint predictions and transform them back to the image.
Args:
outputs (`VitPoseEstimatorOutput`):
VitPoseForPoseEstimation model outputs.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
kernel_size (`int`, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation.
threshold (`float`, *optional*, defaults to None):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will be resize with the default value.
Returns:
`list[list[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image
in the batch as predicted by the model.
"""
# First compute centers and scales for each bounding box
batch_size, num_keypoints, _, _ = outputs.heatmaps.shape
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
centers = np.zeros((batch_size, 2), dtype=np.float32)
scales = np.zeros((batch_size, 2), dtype=np.float32)
flattened_boxes = list(itertools.chain(*boxes))
for i in range(batch_size):
if target_sizes is not None:
image_width, image_height = target_sizes[i][0], target_sizes[i][1]
scale_factor = np.array([image_width, image_height, image_width, image_height])
flattened_boxes[i] = flattened_boxes[i] * scale_factor
width, height = self.size["width"], self.size["height"]
center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height)
centers[i, :] = center
scales[i, :] = scale
preds, scores = self.keypoints_from_heatmaps(
outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size
)
all_boxes = np.zeros((batch_size, 4), dtype=np.float32)
all_boxes[:, 0:2] = centers[:, 0:2]
all_boxes[:, 2:4] = scales[:, 0:2]
poses = torch.tensor(preds)
scores = torch.tensor(scores)
labels = torch.arange(0, num_keypoints)
bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes))
results: list[list[dict[str, torch.Tensor]]] = []
pose_bbox_pairs = zip(poses, scores, bboxes_xyxy)
for image_bboxes in boxes:
image_results: list[dict[str, torch.Tensor]] = []
for _ in image_bboxes:
# Unpack the next pose and bbox_xyxy from the iterator
pose, score, bbox_xyxy = next(pose_bbox_pairs)
score = score.squeeze()
keypoints_labels = labels
if threshold is not None:
keep = score > threshold
pose = pose[keep]
score = score[keep]
keypoints_labels = keypoints_labels[keep]
pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy}
image_results.append(pose_result)
results.append(image_results)
return results
__all__ = ["VitPoseImageProcessor"]
|
VitPoseImageProcessor
|
python
|
huggingface__transformers
|
src/transformers/models/ernie/configuration_ernie.py
|
{
"start": 865,
"end": 6221
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ErnieModel`]. It is used to
instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ERNIE
[nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ErnieModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`].
task_type_vocab_size (`int`, *optional*, defaults to 3):
The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
use_task_id (`bool`, *optional*, defaults to `False`):
Whether or not the model support `task_type_ids`
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ErnieConfig, ErnieModel
>>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
>>> configuration = ErnieConfig()
>>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
>>> model = ErnieModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
task_type_vocab_size=3,
use_task_id=False,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
use_cache=True,
classifier_dropout=None,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.task_type_vocab_size = task_type_vocab_size
self.use_task_id = use_task_id
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
__all__ = ["ErnieConfig"]
|
ErnieConfig
|
python
|
celery__celery
|
t/unit/app/test_routes.py
|
{
"start": 7502,
"end": 8026
}
|
class ____:
def test_prepare(self):
o = object()
R = [
{'foo': 'bar'},
qualname(TestRouter),
o,
]
p = routes.prepare(R)
assert isinstance(p[0], routes.MapRoute)
assert isinstance(maybe_evaluate(p[1]), TestRouter)
assert p[2] is o
assert routes.prepare(o) == [o]
def test_prepare_item_is_dict(self):
R = {'foo': 'bar'}
p = routes.prepare(R)
assert isinstance(p[0], routes.MapRoute)
|
test_prepare
|
python
|
wandb__wandb
|
wandb/sdk/launch/runner/kubernetes_runner.py
|
{
"start": 2686,
"end": 8889
}
|
class ____(AbstractRun):
"""Wrapper for a launched run on Kubernetes."""
def __init__(
self,
batch_api: "BatchV1Api",
core_api: "CoreV1Api",
apps_api: "AppsV1Api",
network_api: "NetworkingV1Api",
name: str,
namespace: Optional[str] = "default",
secret: Optional["V1Secret"] = None,
auxiliary_resource_label_key: Optional[str] = None,
) -> None:
"""Initialize a KubernetesSubmittedRun.
Other implementations of the AbstractRun interface poll on the run
when `get_status` is called, but KubernetesSubmittedRun uses
Kubernetes watch streams to update the run status. One thread handles
events from the job object and another thread handles events from the
rank 0 pod. These threads updated the `_status` attributed of the
KubernetesSubmittedRun object. When `get_status` is called, the
`_status` attribute is returned.
Arguments:
batch_api: Kubernetes BatchV1Api object.
core_api: Kubernetes CoreV1Api object.
network_api: Kubernetes NetworkV1Api object.
name: Name of the job.
namespace: Kubernetes namespace.
secret: Kubernetes secret.
Returns:
None.
"""
self.batch_api = batch_api
self.core_api = core_api
self.apps_api = apps_api
self.network_api = network_api
self.name = name
self.namespace = namespace
self._fail_count = 0
self.secret = secret
self.auxiliary_resource_label_key = auxiliary_resource_label_key
@property
def id(self) -> str:
"""Return the run id."""
return self.name
async def get_logs(self) -> Optional[str]:
try:
pods = await self.core_api.list_namespaced_pod(
label_selector=f"job-name={self.name}", namespace=self.namespace
)
pod_names = [pi.metadata.name for pi in pods.items]
if not pod_names:
wandb.termwarn(f"Found no pods for kubernetes job: {self.name}")
return None
logs = await self.core_api.read_namespaced_pod_log(
name=pod_names[0], namespace=self.namespace
)
if logs:
return str(logs)
else:
wandb.termwarn(f"No logs for kubernetes pod(s): {pod_names}")
return None
except Exception as e:
wandb.termerror(f"{LOG_PREFIX}Failed to get pod logs: {e}")
return None
async def wait(self) -> bool:
"""Wait for the run to finish.
Returns:
True if the run finished successfully, False otherwise.
"""
while True:
status = await self.get_status()
wandb.termlog(f"{LOG_PREFIX}Job {self.name} status: {status.state}")
if status.state in ["finished", "failed", "preempted"]:
break
await asyncio.sleep(5)
await self._delete_secret()
await self._delete_auxiliary_resources_by_label()
return (
status.state == "finished"
) # todo: not sure if this (copied from aws runner) is the right approach? should we return false on failure
async def get_status(self) -> Status:
status = LaunchKubernetesMonitor.get_status(self.name)
if status in ["stopped", "failed", "finished", "preempted"]:
await self._delete_secret()
await self._delete_auxiliary_resources_by_label()
return status
async def cancel(self) -> None:
"""Cancel the run."""
try:
await self.batch_api.delete_namespaced_job(
namespace=self.namespace,
name=self.name,
)
await self._delete_secret()
await self._delete_auxiliary_resources_by_label()
except ApiException as e:
raise LaunchError(
f"Failed to delete Kubernetes Job {self.name} in namespace {self.namespace}: {str(e)}"
) from e
async def _delete_secret(self) -> None:
# Cleanup secret if not running in a helm-managed context
if not os.environ.get("WANDB_RELEASE_NAME") and self.secret:
await self.core_api.delete_namespaced_secret(
name=self.secret.metadata.name,
namespace=self.secret.metadata.namespace,
)
self.secret = None
async def _delete_auxiliary_resources_by_label(self) -> None:
if self.auxiliary_resource_label_key is None:
return
label_selector = (
f"{WANDB_K8S_LABEL_AUXILIARY_RESOURCE}={self.auxiliary_resource_label_key}"
)
try:
resource_cleanups = [
(self.core_api, "service"),
(self.batch_api, "job"),
(self.core_api, "pod"),
(self.core_api, "secret"),
(self.apps_api, "deployment"),
(self.network_api, "network_policy"),
]
for api_client, resource_type in resource_cleanups:
try:
list_method = getattr(
api_client, f"list_namespaced_{resource_type}"
)
delete_method = getattr(
api_client, f"delete_namespaced_{resource_type}"
)
# List resources with our label
resources = await list_method(
namespace=self.namespace, label_selector=label_selector
)
# Delete each resource
for resource in resources.items:
await delete_method(
name=resource.metadata.name, namespace=self.namespace
)
except (AttributeError, ApiException) as e:
wandb.termwarn(f"Could not clean up {resource_type}: {e}")
except Exception as e:
wandb.termwarn(f"Failed to clean up some auxiliary resources: {e}")
|
KubernetesSubmittedRun
|
python
|
kamyu104__LeetCode-Solutions
|
Python/binary-tree-pruning.py
|
{
"start": 29,
"end": 409
}
|
class ____(object):
def pruneTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
root.left = self.pruneTree(root.left)
root.right = self.pruneTree(root.right)
if not root.left and not root.right and root.val == 0:
return None
return root
|
Solution
|
python
|
sphinx-doc__sphinx
|
sphinx/util/cfamily.py
|
{
"start": 3979,
"end": 4124
}
|
class ____(ASTBaseBase):
def describe_signature(self, signode: TextElement) -> None:
raise NotImplementedError(repr(self))
|
ASTAttribute
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job_manager.py
|
{
"start": 2635,
"end": 7522
}
|
class ____:
"""
Minimal, state-agnostic manager:
- asks jobs to start when capacity allows (jobs decide if they can start)
- polls jobs in batch for status updates
- yields completed jobs
- accepts 'new_jobs' emitted by jobs (e.g., after split) and puts them into the running set
"""
JOB_STATUS_UPDATE_SLEEP_SECONDS = 30
def __init__(
self, api: "API", jobs: Iterator[AsyncJob], account_id: str, *, throttle_limit: float = 90.0, max_jobs_in_queue: int = 100
):
self._api = api
self._account_id = account_id
self._jobs = iter(jobs)
self._running_jobs: List[AsyncJob] = []
self._prefetched_job: Optional[AsyncJob] = None # look-ahead buffer
self._api_limit = APILimit(self._api, self._account_id, throttle_limit=throttle_limit, max_jobs=max_jobs_in_queue)
# --- Public consumption API ---
def completed_jobs(self) -> Iterator[AsyncJob]:
while self._running_jobs or self._has_more_jobs():
self._start_jobs()
completed = self._check_jobs_status()
if completed:
yield from completed
else:
logger.info(f"No jobs ready to be consumed, wait for {self.JOB_STATUS_UPDATE_SLEEP_SECONDS} seconds")
time.sleep(self.JOB_STATUS_UPDATE_SLEEP_SECONDS)
# --- Internals ---
def _check_jobs_status(self) -> List[AsyncJob]:
"""
Batch-poll all running jobs. Collect completed ones. If a job produced
additional work (via job.new_jobs), put those into the running set.
"""
completed_jobs: List[AsyncJob] = []
# Ask each job to update itself. For plain jobs, this batches directly;
# for parent jobs, their update_job implementation will update children.
update_in_batch(api=self._api.api, jobs=self._running_jobs)
new_running: List[AsyncJob] = []
for job in self._running_jobs:
if job.completed:
completed_jobs.append(job)
else:
# If the job emitted new work (e.g., via split), take its `new_jobs` instead of keeping the parent.
# This effectively "clears" the old job from the running set: we replace it with its children.
new_jobs = job.new_jobs
if new_jobs:
new_running.extend(new_jobs)
else:
# Keep the job in running set if it hasn't finished.
new_running.append(job)
self._running_jobs = new_running
logger.info(
"Manager status: completed=%d, running=%d, inflight=%d, throttle=%.2f",
len(completed_jobs),
len(self._running_jobs),
self._api_limit.inflight,
self._api_limit.current_throttle,
)
return completed_jobs
def _start_jobs(self) -> None:
"""
Phase 1: give already-running jobs a chance to start more internal work
(useful for ParentAsyncJob that staggers children).
Phase 2: pull fresh jobs from the upstream iterator while capacity allows.
NOTE: jobs themselves decide whether they can start by consulting APILimit.
"""
# Phase 1 — let existing running jobs opportunistically start internal work.
for job in self._running_jobs:
if not job.started:
# Simple job: (re)starts itself if capacity allows — including retries after failure.
# Parent job: typically starts some children and remains 'not fully started' until all children started.
job.start(self._api_limit)
if self._api_limit.limit_reached:
return
if self._api_limit.capacity_reached:
# No point in trying to start new jobs if we are at max concurrency.
return
# Phase 2 — schedule new jobs while there is capacity.
while True:
next_job = self._pull_next_job()
if not next_job:
break
next_job.start(self._api_limit)
# Regardless of whether it could start immediately, keep it in the running set.
# It will attempt to start again in Phase 1 on subsequent cycles.
self._running_jobs.append(next_job)
if self._api_limit.limit_reached:
break
def _pull_next_job(self) -> Optional[AsyncJob]:
if self._prefetched_job is not None:
pulled_job, self._prefetched_job = self._prefetched_job, None
return pulled_job
return next(self._jobs, None)
def _has_more_jobs(self) -> bool:
if self._prefetched_job is not None:
return True
self._prefetched_job = next(self._jobs, None)
return self._prefetched_job is not None
|
InsightAsyncJobManager
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-braintree/source_braintree/schemas/cards.py
|
{
"start": 538,
"end": 1257
}
|
class ____(CatalogModel):
"""
https://developer.paypal.com/braintree/docs/reference/response/credit-card
"""
billing_address: Address
bin: str
card_type: str
cardholder_name: str
commercial: str
country_of_issuance: str
created_at: datetime
customer_id: str
customer_location: str
debit: str
default: bool
durbin_regulated: str
expiration_date: str
expiration_month: str
expiration_year: str
expired: bool
healthcare: str
image_url: str
issuing_bank: str
last_4: str
masked_number: str
payroll: str
prepaid: str
product_id: str
token: str
unique_number_identifier: str
updated_at: datetime
|
CreditCard
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
|
{
"start": 11596,
"end": 12237
}
|
class ____(test.TestCase, PythonOpImpl):
# Verifies that: space_to_batch(x) = transpose(space_to_depth(transpose(x)))
@test_util.run_deprecated_v1
def testSpaceToDepthTranspose(self):
x = np.arange(5 * 10 * 16 * 7, dtype=np.float32).reshape([5, 10, 16, 7])
block_size = 2
paddings = np.zeros((2, 2), dtype=np.int32)
y1 = self.space_to_batch(x, paddings, block_size=block_size)
y2 = array_ops.transpose(
array_ops.space_to_depth(
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.session():
self.assertAllEqual(y1, y2)
|
SpaceToBatchSpaceToDepth
|
python
|
jazzband__prettytable
|
src/prettytable/prettytable.py
|
{
"start": 5197,
"end": 106207
}
|
class ____:
_xhtml: bool
_align: dict[str, AlignType]
_valign: dict[str, VAlignType]
_min_width: dict[str, int]
_max_width: dict[str, int]
_min_table_width: int | None
_max_table_width: int | None
_fields: Sequence[str | None] | None
_title: str | None
_start: int
_end: int | None
_sortby: str | None
_reversesort: bool
_sort_key: Callable[[RowType], SupportsRichComparison]
_row_filter: Callable[[RowType], bool]
_header: bool
_use_header_width: bool
_header_style: HeaderStyleType
_border: bool
_preserve_internal_border: bool
_hrules: HRuleStyle
_vrules: VRuleStyle
_int_format: dict[str, str]
_float_format: dict[str, str]
_custom_format: dict[str, Callable[[str, Any], str]]
_padding_width: int
_left_padding_width: int | None
_right_padding_width: int | None
_vertical_char: str
_horizontal_char: str
_horizontal_align_char: str | None
_junction_char: str
_top_junction_char: str | None
_bottom_junction_char: str | None
_right_junction_char: str | None
_left_junction_char: str | None
_top_right_junction_char: str | None
_top_left_junction_char: str | None
_bottom_right_junction_char: str | None
_bottom_left_junction_char: str | None
_format: bool
_print_empty: bool
_oldsortslice: bool
_attributes: dict[str, str]
_escape_header: bool
_escape_data: bool
_style: TableStyle | None
orgmode: bool
_widths: list[int]
_hrule: str
_break_on_hyphens: bool
def __init__(self, field_names: Sequence[str] | None = None, **kwargs) -> None:
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
title - optional table title
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
header - print a header showing field names (True or False)
use_header_width - reflect width of header (True or False)
header_style - stylisation to apply to field names in header
("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
preserve_internal_border - print a border inside the table even if
border is disabled (True or False)
hrules - controls printing of horizontal rules after rows.
Allowed values: HRuleStyle
vrules - controls printing of vertical rules between columns.
Allowed values: VRuleStyle
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
custom_format - controls formatting of any column using callable
min_table_width - minimum desired table width, in characters
max_table_width - maximum desired table width, in characters
min_width - minimum desired field width, in characters
max_width - maximum desired field width, in characters
padding_width - number of spaces on either side of column data
(only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
horizontal_align_char - single character string used to indicate alignment
junction_char - single character string used to draw line junctions
top_junction_char - single character string used to draw top line junctions
bottom_junction_char -
single character string used to draw bottom line junctions
right_junction_char - single character string used to draw right line junctions
left_junction_char - single character string used to draw left line junctions
top_right_junction_char -
single character string used to draw top-right line junctions
top_left_junction_char -
single character string used to draw top-left line junctions
bottom_right_junction_char -
single character string used to draw bottom-right line junctions
bottom_left_junction_char -
single character string used to draw bottom-left line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
row_filter - filter function applied on rows
align - default align for each column (None, "l", "c" or "r")
valign - default valign for each row (None, "t", "m" or "b")
reversesort - True or False to sort in descending or ascending order
oldsortslice - Slice rows before sorting in the "old style"
break_on_hyphens - Whether long lines are broken on hypens or not, default: True
"""
self.encoding = kwargs.get("encoding", "UTF-8")
# Data
self._field_names: list[str] = []
self._rows: list[RowType] = []
self._dividers: list[bool] = []
self.align = {}
self.valign = {}
self.max_width = {}
self.min_width = {}
self.int_format = {}
self.float_format = {}
self.custom_format = {}
self._style = None
# Options
self._options = [
"title",
"start",
"end",
"fields",
"header",
"use_header_width",
"border",
"preserve_internal_border",
"sortby",
"reversesort",
"sort_key",
"row_filter",
"attributes",
"format",
"hrules",
"vrules",
"int_format",
"float_format",
"custom_format",
"min_table_width",
"max_table_width",
"padding_width",
"left_padding_width",
"right_padding_width",
"vertical_char",
"horizontal_char",
"horizontal_align_char",
"junction_char",
"header_style",
"xhtml",
"print_empty",
"oldsortslice",
"top_junction_char",
"bottom_junction_char",
"right_junction_char",
"left_junction_char",
"top_right_junction_char",
"top_left_junction_char",
"bottom_right_junction_char",
"bottom_left_junction_char",
"align",
"valign",
"max_width",
"min_width",
"none_format",
"escape_header",
"escape_data",
"break_on_hyphens",
]
self._none_format: dict[str, str | None] = {}
self._kwargs = {}
if field_names:
self.field_names = field_names
else:
self._widths: list[int] = []
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
self._kwargs[option] = kwargs[option]
else:
kwargs[option] = None
self._kwargs[option] = None
self._title = kwargs["title"] or None
self._start = kwargs["start"] or 0
self._end = kwargs["end"] or None
self._fields = kwargs["fields"] or None
if kwargs["header"] in (True, False):
self._header = kwargs["header"]
else:
self._header = True
if kwargs["use_header_width"] in (True, False):
self._use_header_width = kwargs["use_header_width"]
else:
self._use_header_width = True
self._header_style = kwargs["header_style"] or None
if kwargs["border"] in (True, False):
self._border = kwargs["border"]
else:
self._border = True
if kwargs["preserve_internal_border"] in (True, False):
self._preserve_internal_border = kwargs["preserve_internal_border"]
else:
self._preserve_internal_border = False
self._hrules = kwargs["hrules"] or HRuleStyle.FRAME
self._vrules = kwargs["vrules"] or VRuleStyle.ALL
self._sortby = kwargs["sortby"] or None
if kwargs["reversesort"] in (True, False):
self._reversesort = kwargs["reversesort"]
else:
self._reversesort = False
self._sort_key = kwargs["sort_key"] or (lambda x: x)
self._row_filter = kwargs["row_filter"] or (lambda x: True)
if kwargs["escape_data"] in (True, False):
self._escape_data = kwargs["escape_data"]
else:
self._escape_data = True
if kwargs["escape_header"] in (True, False):
self._escape_header = kwargs["escape_header"]
else:
self._escape_header = True
self._column_specific_args()
self._min_table_width = kwargs["min_table_width"] or None
self._max_table_width = kwargs["max_table_width"] or None
if kwargs["padding_width"] is None:
self._padding_width = 1
else:
self._padding_width = kwargs["padding_width"]
self._left_padding_width = kwargs["left_padding_width"] or None
self._right_padding_width = kwargs["right_padding_width"] or None
self._vertical_char = kwargs["vertical_char"] or "|"
self._horizontal_char = kwargs["horizontal_char"] or "-"
self._horizontal_align_char = kwargs["horizontal_align_char"]
self._junction_char = kwargs["junction_char"] or "+"
self._top_junction_char = kwargs["top_junction_char"]
self._bottom_junction_char = kwargs["bottom_junction_char"]
self._right_junction_char = kwargs["right_junction_char"]
self._left_junction_char = kwargs["left_junction_char"]
self._top_right_junction_char = kwargs["top_right_junction_char"]
self._top_left_junction_char = kwargs["top_left_junction_char"]
self._bottom_right_junction_char = kwargs["bottom_right_junction_char"]
self._bottom_left_junction_char = kwargs["bottom_left_junction_char"]
if kwargs["print_empty"] in (True, False):
self._print_empty = kwargs["print_empty"]
else:
self._print_empty = True
if kwargs["oldsortslice"] in (True, False):
self._oldsortslice = kwargs["oldsortslice"]
else:
self._oldsortslice = False
self._format = kwargs["format"] or False
self._xhtml = kwargs["xhtml"] or False
self._attributes = kwargs["attributes"] or {}
if kwargs["break_on_hyphens"] in (True, False):
self._break_on_hyphens = kwargs["break_on_hyphens"]
else:
self._break_on_hyphens = True
def _column_specific_args(self) -> None:
# Column specific arguments, use property.setters
for attr in (
"align",
"valign",
"max_width",
"min_width",
"int_format",
"float_format",
"custom_format",
"none_format",
):
setattr(
self, attr, (self._kwargs[attr] or {}) if attr in self._kwargs else {}
)
def _justify(self, text: str, width: int, align: AlignType) -> str:
excess = width - _str_block_width(text)
if align == "l":
return text + excess * " "
elif align == "r":
return excess * " " + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess // 2) * " " + text + (excess // 2 + 1) * " "
# and more space on left if text is of even length
else:
return (excess // 2 + 1) * " " + text + (excess // 2) * " "
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess // 2) * " " + text + (excess // 2) * " "
def __getattr__(self, name):
if name == "rowcount":
return len(self._rows)
elif name == "colcount":
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index: int | slice) -> PrettyTable:
new = PrettyTable()
new.field_names = self.field_names
for attr in self._options:
setattr(new, "_" + attr, getattr(self, "_" + attr))
setattr(new, "_align", getattr(self, "_align"))
if isinstance(index, slice):
for row in self._rows[index]:
new.add_row(row)
elif isinstance(index, int):
new.add_row(self._rows[index])
else:
msg = f"Index {index} is invalid, must be an integer or slice"
raise IndexError(msg)
return new
def __str__(self) -> str:
return self.get_string()
def __repr__(self) -> str:
return self.get_string()
def _repr_html_(self) -> str:
"""
Returns get_html_string value by default
as the repr call in Jupyter notebook environment
"""
return self.get_html_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base
# to validate options. It will call the appropriate validation method for that
# option. The individual validation methods should never need to be called directly
# (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGEMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with
# persistent settings
def _validate_option(self, option, val) -> None:
if option == "field_names":
self._validate_field_names(val)
elif option == "none_format":
self._validate_none_format(val)
elif option in (
"start",
"end",
"max_width",
"min_width",
"min_table_width",
"max_table_width",
"padding_width",
"left_padding_width",
"right_padding_width",
):
self._validate_nonnegative_int(option, val)
elif option == "sortby":
self._validate_field_name(option, val)
elif option in ("sort_key", "row_filter"):
self._validate_function(option, val)
elif option == "hrules":
self._validate_hrules(option, val)
elif option == "vrules":
self._validate_vrules(option, val)
elif option == "fields":
self._validate_all_field_names(option, val)
elif option in (
"header",
"use_header_width",
"border",
"preserve_internal_border",
"reversesort",
"xhtml",
"format",
"print_empty",
"oldsortslice",
"escape_header",
"escape_data",
"break_on_hyphens",
):
self._validate_true_or_false(option, val)
elif option == "header_style":
self._validate_header_style(val)
elif option == "int_format":
self._validate_int_format(option, val)
elif option == "float_format":
self._validate_float_format(option, val)
elif option == "custom_format":
for k, formatter in val.items():
self._validate_function(f"{option}.{k}", formatter)
elif option in (
"vertical_char",
"horizontal_char",
"horizontal_align_char",
"junction_char",
"top_junction_char",
"bottom_junction_char",
"right_junction_char",
"left_junction_char",
"top_right_junction_char",
"top_left_junction_char",
"bottom_right_junction_char",
"bottom_left_junction_char",
):
self._validate_single_char(option, val)
elif option == "attributes":
self._validate_attributes(option, val)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
msg = (
"Field name list has incorrect number of values, "
f"(actual) {len(val)}!={len(self._field_names)} (expected)"
)
raise ValueError(msg)
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
msg = (
"Field name list has incorrect number of values, "
f"(actual) {len(val)}!={len(self._rows[0])} (expected)"
)
raise ValueError(msg)
# Check for uniqueness
try:
assert len(val) == len(set(val))
except AssertionError:
msg = "Field names must be unique"
raise ValueError(msg)
def _validate_none_format(self, val):
try:
if val is not None:
assert isinstance(val, str)
except AssertionError:
msg = "Replacement for None value must be a string if being supplied."
raise TypeError(msg)
def _validate_header_style(self, val):
try:
assert val in ("cap", "title", "upper", "lower", None)
except AssertionError:
msg = "Invalid header style, use cap, title, upper, lower or None"
raise ValueError(msg)
def _validate_align(self, val):
try:
assert val in ["l", "c", "r"]
except AssertionError:
msg = f"Alignment {val} is invalid, use l, c or r"
raise ValueError(msg)
def _validate_valign(self, val):
try:
assert val in ["t", "m", "b"]
except AssertionError:
msg = f"Alignment {val} is invalid, use t, m, b"
raise ValueError(msg)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
msg = f"Invalid value for {name}: {val}"
raise ValueError(msg)
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
msg = f"Invalid value for {name}. Must be True or False."
raise ValueError(msg)
def _validate_int_format(self, name, val):
if val == "":
return
try:
assert isinstance(val, str)
assert val.isdigit()
except AssertionError:
msg = f"Invalid value for {name}. Must be an integer format string."
raise ValueError(msg)
def _validate_float_format(self, name, val):
if val == "":
return
try:
assert isinstance(val, str)
assert "." in val
bits = val.split(".")
assert len(bits) <= 2
assert bits[0] == "" or bits[0].isdigit()
assert (
bits[1] == ""
or bits[1].isdigit()
or (bits[1][-1] == "f" and bits[1].rstrip("f").isdigit())
)
except AssertionError:
msg = f"Invalid value for {name}. Must be a float format string."
raise ValueError(msg)
def _validate_function(self, name, val):
try:
assert hasattr(val, "__call__")
except AssertionError:
msg = f"Invalid value for {name}. Must be a function."
raise ValueError(msg)
def _validate_hrules(self, name, val):
try:
assert val in list(HRuleStyle)
except AssertionError:
msg = f"Invalid value for {name}. Must be HRuleStyle."
raise ValueError(msg)
def _validate_vrules(self, name, val):
try:
assert val in list(VRuleStyle)
except AssertionError:
msg = f"Invalid value for {name}. Must be VRuleStyle."
raise ValueError(msg)
def _validate_field_name(self, name, val):
try:
assert (val in self._field_names) or (val is None)
except AssertionError:
msg = f"Invalid field name: {val}"
raise ValueError(msg)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
msg = "Fields must be a sequence of field names"
raise ValueError(msg)
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
msg = f"Invalid value for {name}. Must be a string of length 1."
raise ValueError(msg)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
msg = "Attributes must be a dictionary of name/value pairs"
raise TypeError(msg)
##############################
# ATTRIBUTE MANAGEMENT #
##############################
@property
def rows(self) -> list[RowType]:
return self._rows[:]
@property
def dividers(self) -> list[bool]:
return self._dividers[:]
@property
def xhtml(self) -> bool:
"""Print <br/> tags if True, <br> tags if False"""
return self._xhtml
@xhtml.setter
def xhtml(self, val: bool) -> None:
self._validate_option("xhtml", val)
self._xhtml = val
@property
def none_format(self) -> dict[str, str | None]:
return self._none_format
@none_format.setter
def none_format(self, val: str | dict[str, str | None] | None):
"""Representation of None values:
Arguments:
val - The alternative representation to be used for None values
"""
if not self._field_names:
self._none_format = {}
elif isinstance(val, str):
for field in self._field_names:
self._none_format[field] = None
self._validate_none_format(val)
for field in self._field_names:
self._none_format[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_none_format(fval)
self._none_format[field] = fval
else:
for field in self._field_names:
self._none_format[field] = None
@property
def field_names(self) -> list[str]:
"""List or tuple of field names
When setting field_names, if there are already field names the new list
of field names must be the same length. Columns are renamed and row data
remains unchanged."""
return self._field_names
@field_names.setter
def field_names(self, val: Sequence[Any]) -> None:
val = cast("list[str]", [str(x) for x in val])
self._validate_option("field_names", val)
old_names = None
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
self._column_specific_args()
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
if old_name not in self._align:
self._align.pop(old_name)
elif self._align:
for field_name in self._field_names:
self._align[field_name] = self._align[BASE_ALIGN_VALUE]
else:
self.align = "c"
if self._valign and old_names:
for old_name, new_name in zip(old_names, val):
self._valign[new_name] = self._valign[old_name]
for old_name in old_names:
if old_name not in self._valign:
self._valign.pop(old_name)
else:
self.valign = "t"
@property
def align(self) -> dict[str, AlignType]:
"""Controls alignment of fields
Arguments:
align - alignment, one of "l", "c", or "r" """
return self._align
@align.setter
def align(self, val: AlignType | dict[str, AlignType] | None) -> None:
if isinstance(val, str):
self._validate_align(val)
if not self._field_names:
self._align = {BASE_ALIGN_VALUE: val}
else:
for field in self._field_names:
self._align[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_align(fval)
self._align[field] = fval
else:
if not self._field_names:
self._align = {BASE_ALIGN_VALUE: "c"}
else:
for field in self._field_names:
self._align[field] = "c"
@property
def valign(self) -> dict[str, VAlignType]:
"""Controls vertical alignment of fields
Arguments:
valign - vertical alignment, one of "t", "m", or "b" """
return self._valign
@valign.setter
def valign(self, val: VAlignType | dict[str, VAlignType] | None) -> None:
if not self._field_names:
self._valign = {}
if isinstance(val, str):
self._validate_valign(val)
for field in self._field_names:
self._valign[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_valign(fval)
self._valign[field] = fval
else:
for field in self._field_names:
self._valign[field] = "t"
@property
def max_width(self) -> dict[str, int]:
"""Controls maximum width of fields
Arguments:
max_width - maximum width integer"""
return self._max_width
@max_width.setter
def max_width(self, val: int | dict[str, int] | None) -> None:
if isinstance(val, int):
self._validate_option("max_width", val)
for field in self._field_names:
self._max_width[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_option("max_width", fval)
self._max_width[field] = fval
else:
self._max_width = {}
@property
def min_width(self) -> dict[str, int]:
"""Controls minimum width of fields
Arguments:
min_width - minimum width integer"""
return self._min_width
@min_width.setter
def min_width(self, val: int | dict[str, int] | None) -> None:
if isinstance(val, int):
self._validate_option("min_width", val)
for field in self._field_names:
self._min_width[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_option("min_width", fval)
self._min_width[field] = fval
else:
self._min_width = {}
@property
def min_table_width(self) -> int | None:
return self._min_table_width
@min_table_width.setter
def min_table_width(self, val: int) -> None:
self._validate_option("min_table_width", val)
self._min_table_width = val
@property
def max_table_width(self) -> int | None:
return self._max_table_width
@max_table_width.setter
def max_table_width(self, val: int) -> None:
self._validate_option("max_table_width", val)
self._max_table_width = val
@property
def fields(self) -> Sequence[str | None] | None:
"""List or tuple of field names to include in displays"""
return self._fields
@fields.setter
def fields(self, val: Sequence[str | None]) -> None:
self._validate_option("fields", val)
self._fields = val
@property
def title(self) -> str | None:
"""Optional table title
Arguments:
title - table title"""
return self._title
@title.setter
def title(self, val: str) -> None:
self._title = str(val)
@property
def start(self) -> int:
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
@start.setter
def start(self, val: int) -> None:
self._validate_option("start", val)
self._start = val
@property
def end(self) -> int | None:
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
@end.setter
def end(self, val: int) -> None:
self._validate_option("end", val)
self._end = val
@property
def sortby(self) -> str | None:
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
@sortby.setter
def sortby(self, val: str | None) -> None:
self._validate_option("sortby", val)
self._sortby = val
@property
def reversesort(self) -> bool:
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by
ascending order"""
return self._reversesort
@reversesort.setter
def reversesort(self, val: bool) -> None:
self._validate_option("reversesort", val)
self._reversesort = val
@property
def sort_key(self) -> Callable[[RowType], SupportsRichComparison]:
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be
sorted"""
return self._sort_key
@sort_key.setter
def sort_key(self, val: Callable[[RowType], SupportsRichComparison]) -> None:
self._validate_option("sort_key", val)
self._sort_key = val
@property
def row_filter(self) -> Callable[[RowType], bool]:
"""Filter function, applied to data points
Arguments:
row_filter - a function which takes one argument and returns a Boolean"""
return self._row_filter
@row_filter.setter
def row_filter(self, val: Callable[[RowType], bool]) -> None:
self._validate_option("row_filter", val)
self._row_filter = val
@property
def header(self) -> bool:
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
@header.setter
def header(self, val: bool) -> None:
self._validate_option("header", val)
self._header = val
@property
def use_header_width(self) -> bool:
"""Controls whether header is included in computing width
Arguments:
use_header_width - respect width of fieldname in header to calculate column
width (True or False)
"""
return self._use_header_width
@use_header_width.setter
def use_header_width(self, val: bool) -> None:
self._validate_option("use_header_width", val)
self._use_header_width = val
@property
def header_style(self) -> HeaderStyleType:
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header
("cap", "title", "upper", "lower" or None)"""
return self._header_style
@header_style.setter
def header_style(self, val: HeaderStyleType) -> None:
self._validate_header_style(val)
self._header_style = val
@property
def border(self) -> bool:
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
@border.setter
def border(self, val: bool) -> None:
self._validate_option("border", val)
self._border = val
@property
def preserve_internal_border(self) -> bool:
"""Controls printing of border inside table
Arguments:
preserve_internal_border - print a border inside the table even if
border is disabled (True or False)"""
return self._preserve_internal_border
@preserve_internal_border.setter
def preserve_internal_border(self, val: bool) -> None:
self._validate_option("preserve_internal_border", val)
self._preserve_internal_border = val
@property
def hrules(self) -> HRuleStyle:
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: HRuleStyle"""
return self._hrules
@hrules.setter
def hrules(self, val: HRuleStyle) -> None:
self._validate_option("hrules", val)
self._hrules = val
@property
def vrules(self) -> VRuleStyle:
"""Controls printing of vertical rules between columns
Arguments:
vrules - vertical rules style. Allowed values: VRuleStyle"""
return self._vrules
@vrules.setter
def vrules(self, val: VRuleStyle) -> None:
self._validate_option("vrules", val)
self._vrules = val
@property
def int_format(self) -> dict[str, str]:
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
@int_format.setter
def int_format(self, val: str | dict[str, str] | None) -> None:
if isinstance(val, str):
self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_option("int_format", fval)
self._int_format[field] = fval
else:
self._int_format = {}
@property
def float_format(self) -> dict[str, str]:
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
@float_format.setter
def float_format(self, val: str | dict[str, str] | None) -> None:
if isinstance(val, str):
self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
elif isinstance(val, dict) and val:
for field, fval in val.items():
self._validate_option("float_format", fval)
self._float_format[field] = fval
else:
self._float_format = {}
@property
def custom_format(self) -> dict[str, Callable[[str, Any], str]]:
"""Controls formatting of any column using callable
Arguments:
custom_format - Dictionary of field_name and callable"""
return self._custom_format
@custom_format.setter
def custom_format(
self,
val: Callable[[str, Any], str] | dict[str, Callable[[str, Any], str]] | None,
):
if val is None:
self._custom_format = {}
elif isinstance(val, dict):
for k, v in val.items():
self._validate_function(f"custom_value.{k}", v)
self._custom_format = val
elif hasattr(val, "__call__"):
self._validate_function("custom_value", val)
for field in self._field_names:
self._custom_format[field] = val
else:
msg = "The custom_format property need to be a dictionary or callable"
raise TypeError(msg)
@property
def padding_width(self) -> int:
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
@padding_width.setter
def padding_width(self, val: int) -> None:
self._validate_option("padding_width", val)
self._padding_width = val
@property
def left_padding_width(self) -> int | None:
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
@left_padding_width.setter
def left_padding_width(self, val: int) -> None:
self._validate_option("left_padding_width", val)
self._left_padding_width = val
@property
def right_padding_width(self) -> int | None:
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
@right_padding_width.setter
def right_padding_width(self, val: int) -> None:
self._validate_option("right_padding_width", val)
self._right_padding_width = val
@property
def vertical_char(self) -> str:
"""The character used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
@vertical_char.setter
def vertical_char(self, val: str) -> None:
val = str(val)
self._validate_option("vertical_char", val)
self._vertical_char = val
@property
def horizontal_char(self) -> str:
"""The character used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
@horizontal_char.setter
def horizontal_char(self, val: str) -> None:
val = str(val)
self._validate_option("horizontal_char", val)
self._horizontal_char = val
@property
def horizontal_align_char(self) -> str:
"""The character used to indicate column alignment in horizontal lines
Arguments:
horizontal_align_char - single character string used to indicate alignment"""
return self._bottom_left_junction_char or self.junction_char
@horizontal_align_char.setter
def horizontal_align_char(self, val: str) -> None:
val = str(val)
self._validate_option("horizontal_align_char", val)
self._horizontal_align_char = val
@property
def junction_char(self) -> str:
"""The character used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
@junction_char.setter
def junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("junction_char", val)
self._junction_char = val
@property
def top_junction_char(self) -> str:
"""The character used when printing table borders to draw top line junctions
Arguments:
top_junction_char - single character string used to draw top line junctions"""
return self._top_junction_char or self.junction_char
@top_junction_char.setter
def top_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("top_junction_char", val)
self._top_junction_char = val
@property
def bottom_junction_char(self) -> str:
"""The character used when printing table borders to draw bottom line junctions
Arguments:
bottom_junction_char -
single character string used to draw bottom line junctions"""
return self._bottom_junction_char or self.junction_char
@bottom_junction_char.setter
def bottom_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("bottom_junction_char", val)
self._bottom_junction_char = val
@property
def right_junction_char(self) -> str:
"""The character used when printing table borders to draw right line junctions
Arguments:
right_junction_char -
single character string used to draw right line junctions"""
return self._right_junction_char or self.junction_char
@right_junction_char.setter
def right_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("right_junction_char", val)
self._right_junction_char = val
@property
def left_junction_char(self) -> str:
"""The character used when printing table borders to draw left line junctions
Arguments:
left_junction_char - single character string used to draw left line junctions"""
return self._left_junction_char or self.junction_char
@left_junction_char.setter
def left_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("left_junction_char", val)
self._left_junction_char = val
@property
def top_right_junction_char(self) -> str:
"""
The character used when printing table borders to draw top-right line junctions
Arguments:
top_right_junction_char -
single character string used to draw top-right line junctions"""
return self._top_right_junction_char or self.junction_char
@top_right_junction_char.setter
def top_right_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("top_right_junction_char", val)
self._top_right_junction_char = val
@property
def top_left_junction_char(self) -> str:
"""
The character used when printing table borders to draw top-left line junctions
Arguments:
top_left_junction_char -
single character string used to draw top-left line junctions"""
return self._top_left_junction_char or self.junction_char
@top_left_junction_char.setter
def top_left_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("top_left_junction_char", val)
self._top_left_junction_char = val
@property
def bottom_right_junction_char(self) -> str:
"""The character used when printing table borders
to draw bottom-right line junctions
Arguments:
bottom_right_junction_char -
single character string used to draw bottom-right line junctions"""
return self._bottom_right_junction_char or self.junction_char
@bottom_right_junction_char.setter
def bottom_right_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("bottom_right_junction_char", val)
self._bottom_right_junction_char = val
@property
def bottom_left_junction_char(self) -> str:
"""The character used when printing table borders
to draw bottom-left line junctions
Arguments:
bottom_left_junction_char -
single character string used to draw bottom-left line junctions"""
return self._bottom_left_junction_char or self.junction_char
@bottom_left_junction_char.setter
def bottom_left_junction_char(self, val: str) -> None:
val = str(val)
self._validate_option("bottom_left_junction_char", val)
self._bottom_left_junction_char = val
@property
def format(self) -> bool:
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
@format.setter
def format(self, val: bool) -> None:
self._validate_option("format", val)
self._format = val
@property
def print_empty(self) -> bool:
"""Controls whether or not empty tables produce a header and frame or just an
empty string
Arguments:
print_empty - True or False"""
return self._print_empty
@print_empty.setter
def print_empty(self, val: bool) -> None:
self._validate_option("print_empty", val)
self._print_empty = val
@property
def attributes(self) -> dict[str, str]:
"""A dictionary of HTML attribute name/value pairs to be included in the
<table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
@attributes.setter
def attributes(self, val: dict[str, str]) -> None:
self._validate_option("attributes", val)
self._attributes = val
@property
def oldsortslice(self) -> bool:
"""oldsortslice - Slice rows before sorting in the "old style" """
return self._oldsortslice
@oldsortslice.setter
def oldsortslice(self, val: bool) -> None:
self._validate_option("oldsortslice", val)
self._oldsortslice = val
@property
def escape_header(self) -> bool:
"""Escapes the text within a header (True or False)"""
return self._escape_header
@escape_header.setter
def escape_header(self, val: bool) -> None:
self._validate_option("escape_header", val)
self._escape_header = val
@property
def escape_data(self) -> bool:
"""Escapes the text within a data field (True or False)"""
return self._escape_data
@escape_data.setter
def escape_data(self, val: bool) -> None:
self._validate_option("escape_data", val)
self._escape_data = val
@property
def break_on_hyphens(self) -> bool:
"""Break longlines on hyphens (True or False)"""
return self._break_on_hyphens
@break_on_hyphens.setter
def break_on_hyphens(self, val: bool) -> None:
self._validate_option("break_on_hyphens", val)
self._break_on_hyphens = val
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs: Mapping[str, Any]) -> OptionsType:
options: dict[str, Any] = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, option)
return cast(OptionsType, options)
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style: TableStyle) -> None:
self._set_default_style()
self._style = style
if style == TableStyle.MSWORD_FRIENDLY:
self._set_msword_style()
elif style == TableStyle.PLAIN_COLUMNS:
self._set_columns_style()
elif style == TableStyle.MARKDOWN:
self._set_markdown_style()
elif style == TableStyle.ORGMODE:
self._set_orgmode_style()
elif style == TableStyle.DOUBLE_BORDER:
self._set_double_border_style()
elif style == TableStyle.SINGLE_BORDER:
self._set_single_border_style()
elif style == TableStyle.RANDOM:
self._set_random_style()
elif style != TableStyle.DEFAULT:
msg = "Invalid pre-set style"
raise ValueError(msg)
def _set_orgmode_style(self) -> None:
self.orgmode = True
def _set_markdown_style(self) -> None:
self.header = True
self.border = True
self._hrules = HRuleStyle.HEADER
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.junction_char = "|"
self._horizontal_align_char = ":"
def _set_default_style(self) -> None:
self.header = True
self.border = True
self._hrules = HRuleStyle.FRAME
self._vrules = VRuleStyle.ALL
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.horizontal_char = "-"
self._horizontal_align_char = None
self.junction_char = "+"
self._top_junction_char = None
self._bottom_junction_char = None
self._right_junction_char = None
self._left_junction_char = None
self._top_right_junction_char = None
self._top_left_junction_char = None
self._bottom_right_junction_char = None
self._bottom_left_junction_char = None
def _set_msword_style(self) -> None:
self.header = True
self.border = True
self._hrules = HRuleStyle.NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
def _set_columns_style(self) -> None:
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_double_border_style(self) -> None:
self.horizontal_char = "═"
self.vertical_char = "║"
self.junction_char = "╬"
self.top_junction_char = "╦"
self.bottom_junction_char = "╩"
self.right_junction_char = "╣"
self.left_junction_char = "╠"
self.top_right_junction_char = "╗"
self.top_left_junction_char = "╔"
self.bottom_right_junction_char = "╝"
self.bottom_left_junction_char = "╚"
def _set_single_border_style(self) -> None:
self.horizontal_char = "─"
self.vertical_char = "│"
self.junction_char = "┼"
self.top_junction_char = "┬"
self.bottom_junction_char = "┴"
self.right_junction_char = "┤"
self.left_junction_char = "├"
self.top_right_junction_char = "┐"
self.top_left_junction_char = "┌"
self.bottom_right_junction_char = "┘"
self.bottom_left_junction_char = "└"
def _set_random_style(self) -> None:
# Just for fun!
import random
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice(list(HRuleStyle))
self._vrules = random.choice(list(VRuleStyle))
self.left_padding_width = random.randint(0, 5)
self.right_padding_width = random.randint(0, 5)
self.vertical_char = random.choice(r"~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.horizontal_char = random.choice(r"~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.junction_char = random.choice(r"~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.preserve_internal_border = random.choice((True, False))
##############################
# DATA INPUT METHODS #
##############################
def add_rows(self, rows: Sequence[RowType], *, divider: bool = False) -> None:
"""Add rows to the table
Arguments:
rows - rows of data, should be an iterable of lists, each list with as many
elements as the table has fields
divider - add row divider after the row block
"""
for row in rows[:-1]:
self.add_row(row)
if len(rows) > 0:
self.add_row(rows[-1], divider=divider)
def add_row(self, row: RowType, *, divider: bool = False) -> None:
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
msg = (
"Row has incorrect number of values, "
f"(actual) {len(row)}!={len(self._field_names)} (expected)"
)
raise ValueError(msg)
if not self._field_names:
self.field_names = [f"Field {n + 1}" for n in range(len(row))]
self._rows.append(list(row))
self._dividers.append(divider)
def del_row(self, row_index: int) -> None:
"""Delete a row from the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows) - 1:
msg = (
f"Can't delete row at index {row_index}, "
f"table only has {len(self._rows)} rows"
)
raise IndexError(msg)
del self._rows[row_index]
del self._dividers[row_index]
def add_divider(self) -> None:
"""Add a divider to the table"""
if len(self._dividers) >= 1:
self._dividers[-1] = True
def add_column(
self,
fieldname: str,
column: Sequence[Any],
align: AlignType = "c",
valign: VAlignType = "t",
) -> None:
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and
"r" for right
valign - desired vertical alignment for new columns - "t" for top,
"m" for middle and "b" for bottom"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(len(column)):
if len(self._rows) < i + 1:
self._rows.append([])
self._dividers.append(False)
self._rows[i].append(column[i])
else:
msg = (
f"Column length {len(column)} does not match number of rows "
f"{len(self._rows)}"
)
raise ValueError(msg)
def add_autoindex(self, fieldname: str = "Index") -> None:
"""Add an auto-incrementing index column to the table.
Arguments:
fieldname - name of the field to contain the new column of data"""
self._field_names.insert(0, fieldname)
self._align[fieldname] = self._kwargs["align"] or "c"
self._valign[fieldname] = self._kwargs["valign"] or "t"
for i, row in enumerate(self._rows):
row.insert(0, i + 1)
def del_column(self, fieldname: str) -> None:
"""Delete a column from the table
Arguments:
fieldname - The field name of the column you want to delete."""
if fieldname not in self._field_names:
msg = (
"Can't delete column {!r} which is not a field name of this table."
" Field names are: {}".format(
fieldname, ", ".join(map(repr, self._field_names))
)
)
raise ValueError(msg)
col_index = self._field_names.index(fieldname)
del self._field_names[col_index]
for row in self._rows:
del row[col_index]
def clear_rows(self) -> None:
"""Delete all rows from the table but keep the current field names"""
self._rows = []
self._dividers = []
def clear(self) -> None:
"""Delete all rows and field names from the table, maintaining nothing but
styling options"""
self._rows = []
self._dividers = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self) -> Self:
import copy
return copy.deepcopy(self)
def get_formatted_string(self, out_format: str = "text", **kwargs) -> str:
"""Return string representation of specified format of table in current state.
Arguments:
out_format - resulting table format
kwargs - passed through to function that performs formatting
"""
if out_format == "text":
return self.get_string(**kwargs)
if out_format == "html":
return self.get_html_string(**kwargs)
if out_format == "json":
return self.get_json_string(**kwargs)
if out_format == "csv":
return self.get_csv_string(**kwargs)
if out_format == "latex":
return self.get_latex_string(**kwargs)
if out_format == "mediawiki":
return self.get_mediawiki_string(**kwargs)
msg = (
f"Invalid format {out_format}. "
"Must be one of: text, html, json, csv, latex or mediawiki"
)
raise ValueError(msg)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field: str, value: Any) -> str:
if isinstance(value, int) and field in self._int_format:
return (f"%{self._int_format[field]}d") % value
elif isinstance(value, float) and field in self._float_format:
return (f"%{self._float_format[field]}f") % value
formatter = self._custom_format.get(field, (lambda f, v: str(v)))
return formatter(field, value)
def _compute_table_width(self, options) -> int:
if options["vrules"] == VRuleStyle.FRAME:
table_width = 2
if options["vrules"] == VRuleStyle.ALL:
table_width = 1
else:
table_width = 0
per_col_padding = sum(self._get_padding_widths(options))
for index, fieldname in enumerate(self.field_names):
if not options["fields"] or (
options["fields"] and fieldname in options["fields"]
):
table_width += self._widths[index] + per_col_padding + 1
return table_width
def _compute_widths(self, rows: list[list[str]], options: OptionsType) -> None:
if options["header"] and options["use_header_width"]:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if (
value == "None"
and (none_val := self.none_format.get(fieldname)) is not None
):
value = none_val
if fieldname in self.max_width:
widths[index] = max(
widths[index],
min(_get_size(value)[0], self.max_width[fieldname]),
)
else:
widths[index] = max(widths[index], _get_size(value)[0])
if fieldname in self.min_width:
widths[index] = max(widths[index], self.min_width[fieldname])
if self._style == TableStyle.MARKDOWN:
# Markdown needs at least one hyphen in the divider
if self._align[fieldname] in ("l", "r"):
min_width = 1
else: # "c"
min_width = 3
widths[index] = max(min_width, widths[index])
self._widths = widths
per_col_padding = sum(self._get_padding_widths(options))
# Are we exceeding max_table_width?
if self._max_table_width:
table_width = self._compute_table_width(options)
if table_width > self._max_table_width:
# Shrink widths in proportion
markup_chars = per_col_padding * len(widths) + len(widths) - 1
scale = (self._max_table_width - markup_chars) / (
table_width - markup_chars
)
self._widths = [max(1, int(w * scale)) for w in widths]
# Are we under min_table_width or title width?
if self._min_table_width or options["title"]:
if options["title"]:
title_width = _str_block_width(options["title"]) + per_col_padding
if options["vrules"] in (VRuleStyle.FRAME, VRuleStyle.ALL):
title_width += 2
else:
title_width = 0
min_table_width = self.min_table_width or 0
min_width = max(title_width, min_table_width)
if options["border"]:
borders = len(widths) + 1
elif options["preserve_internal_border"]:
borders = len(widths)
else:
borders = 0
# Subtract padding for each column and borders
min_width -= sum([per_col_padding for _ in widths]) + borders
# What is being scaled is content so we sum column widths
content_width = sum(widths) or 1
if content_width < min_width:
# Grow widths in proportion
scale = 1.0 * min_width / content_width
widths = [int(w * scale) for w in widths]
if sum(widths) < min_width:
widths[-1] += min_width - sum(widths)
self._widths = widths
def _get_padding_widths(self, options: OptionsType) -> tuple[int, int]:
if options["left_padding_width"] is not None:
lpad = options["left_padding_width"]
else:
lpad = options["padding_width"]
if options["right_padding_width"] is not None:
rpad = options["right_padding_width"]
else:
rpad = options["padding_width"]
return lpad, rpad
def _get_rows(self, options: OptionsType) -> list[RowType]:
"""Return only those data rows that should be printed, based on slicing and
sorting.
Arguments:
options - dictionary of option settings."""
if options["oldsortslice"]:
rows = self._rows[options["start"] : options["end"]]
else:
rows = self._rows
rows = [row for row in rows if options["row_filter"](row)]
# Sort
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
# Slice if necessary
if not options["oldsortslice"]:
rows = rows[options["start"] : options["end"]]
return rows
def _get_dividers(self, options: OptionsType) -> list[bool]:
"""Return only those dividers that should be printed, based on slicing.
Arguments:
options - dictionary of option settings."""
if options["oldsortslice"]:
dividers = self._dividers[options["start"] : options["end"]]
else:
dividers = self._dividers
if options["sortby"]:
dividers = [False for divider in dividers]
return dividers
def _format_row(self, row: RowType) -> list[str]:
return [
self._format_value(field, value)
for (field, value) in zip(self._field_names, row)
]
def _format_rows(self, rows: list[RowType]) -> list[list[str]]:
return [self._format_row(row) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs) -> str:
"""Return string representation of table in current state.
Arguments:
title - optional table title
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
use_header_width - reflect width of header (True or False)
border - print a border around the table (True or False)
preserve_internal_border - print a border inside the table even if
border is disabled (True or False)
hrules - controls printing of horizontal rules after rows.
Allowed values: HRuleStyle
vrules - controls printing of vertical rules between columns.
Allowed values: VRuleStyle
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
custom_format - controls formatting of any column using callable
padding_width - number of spaces on either side of column data (only used if
left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
horizontal_align_char - single character string used to indicate alignment
junction_char - single character string used to draw line junctions
junction_char - single character string used to draw line junctions
top_junction_char - single character string used to draw top line junctions
bottom_junction_char -
single character string used to draw bottom line junctions
right_junction_char - single character string used to draw right line junctions
left_junction_char - single character string used to draw left line junctions
top_right_junction_char -
single character string used to draw top-right line junctions
top_left_junction_char -
single character string used to draw top-left line junctions
bottom_right_junction_char -
single character string used to draw bottom-right line junctions
bottom_left_junction_char -
single character string used to draw bottom-left line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
row_filter - filter function applied on rows
print empty - if True, stringify just the header for an empty table,
if False return an empty string"""
options = self._get_options(kwargs)
lines: list[str] = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
dividers = self._get_dividers(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows)
# Compute column widths
self._compute_widths(formatted_rows, options)
self._hrule = self._stringify_hrule(options)
# Add title
title = options["title"] or self._title
if title:
lines.append(self._stringify_title(title, options))
# Add header or top of border
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] in (
HRuleStyle.ALL,
HRuleStyle.FRAME,
):
lines.append(self._stringify_hrule(options, where="top_"))
if title and options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
left_j_len = len(self.left_junction_char)
right_j_len = len(self.right_junction_char)
lines[-1] = (
self.left_junction_char
+ lines[-1][left_j_len:-right_j_len]
+ self.right_junction_char
)
# Add rows
for row, divider in zip(formatted_rows[:-1], dividers[:-1]):
lines.append(self._stringify_row(row, options, self._hrule))
if divider:
lines.append(self._stringify_hrule(options))
if formatted_rows:
lines.append(
self._stringify_row(
formatted_rows[-1],
options,
self._stringify_hrule(options, where="bottom_"),
)
)
# Add bottom of border
if options["border"] and options["hrules"] == HRuleStyle.FRAME:
lines.append(self._stringify_hrule(options, where="bottom_"))
if "orgmode" in self.__dict__ and self.orgmode:
left_j_len = len(self.left_junction_char)
right_j_len = len(self.right_junction_char)
lines = [
"|" + new_line[left_j_len:-right_j_len] + "|"
for old_line in lines
for new_line in old_line.split("\n")
]
return "\n".join(lines)
def _stringify_hrule(
self, options: OptionsType, where: Literal["top_", "bottom_", ""] = ""
) -> str:
if not options["border"] and not options["preserve_internal_border"]:
return ""
lpad, rpad = self._get_padding_widths(options)
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
bits = [options[where + "left_junction_char"]] # type: ignore[literal-required]
else:
bits = [options["horizontal_char"]]
# For tables with no data or fieldnames
if not self._field_names:
bits.append(options[where + "right_junction_char"]) # type: ignore[literal-required]
return "".join(bits)
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
line = (width + lpad + rpad) * options["horizontal_char"]
# If necessary, add column alignment characters (e.g. ":" for Markdown)
if self._horizontal_align_char:
if self._align[field] in ("l", "c"):
line = " " + self._horizontal_align_char + line[2:]
if self._align[field] in ("c", "r"):
line = line[:-2] + self._horizontal_align_char + " "
bits.append(line)
if options["vrules"] == VRuleStyle.ALL:
bits.append(options[where + "junction_char"]) # type: ignore[literal-required]
else:
bits.append(options["horizontal_char"])
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
bits.pop()
bits.append(options[where + "right_junction_char"]) # type: ignore[literal-required]
if options["preserve_internal_border"] and not options["border"]:
bits = bits[1:-1]
return "".join(bits)
def _stringify_title(self, title: str, options: OptionsType) -> str:
lines: list[str] = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["vrules"] == VRuleStyle.ALL:
options["vrules"] = VRuleStyle.FRAME
lines.append(self._stringify_hrule(options, "top_"))
options["vrules"] = VRuleStyle.ALL
elif options["vrules"] == VRuleStyle.FRAME:
lines.append(self._stringify_hrule(options, "top_"))
bits: list[str] = []
endpoint = (
options["vertical_char"]
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME)
and options["border"]
else " "
)
bits.append(endpoint)
title = " " * lpad + title + " " * rpad
lpad, rpad = self._get_padding_widths(options)
sum_widths = sum([n + lpad + rpad + 1 for n in self._widths])
bits.append(self._justify(title, sum_widths - 1, "c"))
bits.append(endpoint)
lines.append("".join(bits))
return "\n".join(lines)
def _stringify_header(self, options: OptionsType) -> str:
bits: list[str] = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (HRuleStyle.ALL, HRuleStyle.FRAME):
bits.append(self._stringify_hrule(options, "top_"))
if options["title"] and options["vrules"] in (
VRuleStyle.ALL,
VRuleStyle.FRAME,
):
left_j_len = len(self.left_junction_char)
right_j_len = len(self.right_junction_char)
bits[-1] = (
self.left_junction_char
+ bits[-1][left_j_len:-right_j_len]
+ self.right_junction_char
)
bits.append("\n")
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
# For tables with no data or field names
if not self._field_names:
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
if _str_block_width(fieldname) > width:
fieldname = fieldname[:width]
bits.append(
" " * lpad
+ self._justify(fieldname, width, self._align[field])
+ " " * rpad
)
if options["border"] or options["preserve_internal_border"]:
if options["vrules"] == VRuleStyle.ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
# If only preserve_internal_border is true, then we just appended
# a vertical character at the end when we wanted a space
if not options["border"] and options["preserve_internal_border"]:
bits.pop()
bits.append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options["border"] and options["vrules"] == VRuleStyle.FRAME:
bits.pop()
bits.append(options["vertical_char"])
if (options["border"] or options["preserve_internal_border"]) and options[
"hrules"
] != HRuleStyle.NONE:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
def _stringify_row(self, row: list[str], options: OptionsType, hrule: str) -> str:
import textwrap
for index, field, value, width in zip(
range(len(row)), self._field_names, row, self._widths
):
# Enforce max widths
lines = value.split("\n")
new_lines: list[str] = []
for line in lines:
if (
line == "None"
and (none_val := self.none_format.get(field)) is not None
):
line = none_val
if _str_block_width(line) > width:
line = textwrap.fill(
line, width, break_on_hyphens=options["break_on_hyphens"]
)
new_lines.append(line)
lines = new_lines
value = "\n".join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits: list[list[str]] = []
lpad, rpad = self._get_padding_widths(options)
for y in range(row_height):
bits.append([])
if options["border"]:
if options["vrules"] in (VRuleStyle.ALL, VRuleStyle.FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
for field, value, width in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split("\n")
d_height = row_height - len(lines)
if d_height:
if valign == "m":
lines = (
[""] * int(d_height / 2)
+ lines
+ [""] * (d_height - int(d_height / 2))
)
elif valign == "b":
lines = [""] * d_height + lines
else:
lines = lines + [""] * d_height
for y, line in enumerate(lines):
if options["fields"] and field not in options["fields"]:
continue
bits[y].append(
" " * lpad
+ self._justify(line, width, self._align[field])
+ " " * rpad
)
if options["border"] or options["preserve_internal_border"]:
if options["vrules"] == VRuleStyle.ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(" ")
# If only preserve_internal_border is true, then we just appended
# a vertical character at the end when we wanted a space
if not options["border"] and options["preserve_internal_border"]:
bits[-1].pop()
bits[-1].append(" ")
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
for y in range(row_height):
if options["border"] and options["vrules"] == VRuleStyle.FRAME:
bits[y].pop()
bits[y].append(options["vertical_char"])
if options["border"] and options["hrules"] == HRuleStyle.ALL:
bits[row_height - 1].append("\n")
bits[row_height - 1].append(hrule)
bits_str = ["".join(bits_y) for bits_y in bits]
return "\n".join(bits_str)
def paginate(self, page_length: int = 58, line_break: str = "\f", **kwargs) -> str:
pages: list[str] = []
kwargs["start"] = kwargs.get("start", 0)
true_end = kwargs.get("end", self.rowcount)
while True:
kwargs["end"] = min(kwargs["start"] + page_length, true_end)
pages.append(self.get_string(**kwargs))
if kwargs["end"] == true_end:
break
kwargs["start"] += page_length
return line_break.join(pages)
##############################
# CSV STRING METHODS #
##############################
def get_csv_string(self, **kwargs) -> str:
"""Return string representation of CSV formatted table in the current state
Keyword arguments are first interpreted as table formatting options, and
then any unused keyword arguments are passed to csv.writer(). For
example, get_csv_string(header=False, delimiter='\t') would use
header as a PrettyTable formatting option (skip the header row) and
delimiter as a csv.writer keyword argument.
"""
import csv
options = self._get_options(kwargs)
csv_options = {
key: value for key, value in kwargs.items() if key not in options
}
csv_buffer = io.StringIO()
csv_writer = csv.writer(csv_buffer, **csv_options)
if options.get("header"):
if options["fields"]:
csv_writer.writerow(
[f for f in self._field_names if f in options["fields"]]
)
else:
csv_writer.writerow(self._field_names)
rows = self._get_rows(options)
if options["fields"]:
rows = [
[d for f, d in zip(self._field_names, row) if f in options["fields"]]
for row in rows
]
for row in rows:
csv_writer.writerow(row)
return csv_buffer.getvalue()
##############################
# JSON STRING METHODS #
##############################
def get_json_string(self, **kwargs) -> str:
"""Return string representation of JSON formatted table in the current state
Keyword arguments are first interpreted as table formatting options, and
then any unused keyword arguments are passed to json.dumps(). For
example, get_json_string(header=False, indent=2) would use header as
a PrettyTable formatting option (skip the header row) and indent as a
json.dumps keyword argument.
"""
import json
options = self._get_options(kwargs)
json_options: dict[str, Any] = {
"indent": 4,
"separators": (",", ": "),
"sort_keys": True,
}
json_options.update(
{key: value for key, value in kwargs.items() if key not in options}
)
objects: list[list[str] | dict[str, Any]] = []
if options.get("header"):
if options["fields"]:
objects.append([f for f in self._field_names if f in options["fields"]])
else:
objects.append(self.field_names)
rows = self._get_rows(options)
if options["fields"]:
for row in rows:
objects.append(
{
f: d
for f, d in zip(self._field_names, row)
if f in options["fields"]
}
)
else:
for row in rows:
objects.append(dict(zip(self._field_names, row)))
return json.dumps(objects, **json_options)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs) -> str:
"""Return string representation of HTML formatted version of table in current
state.
Arguments:
title - optional table title
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
escape_header - escapes the text within a header (True or False)
border - print a border around the table (True or False)
preserve_internal_border - print a border inside the table even if
border is disabled (True or False)
hrules - controls printing of horizontal rules after rows.
Allowed values: HRuleStyle
vrules - controls printing of vertical rules between columns.
Allowed values: VRuleStyle
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
custom_format - controls formatting of any column using callable
padding_width - number of spaces on either side of column data (only used if
left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
row_filter - filter function applied on rows
attributes - dictionary of name/value pairs to include as HTML attributes in the
<table> tag
format - Controls whether or not HTML tables are formatted to match
styling options (True or False)
escape_data - escapes the text within a data field (True or False)
xhtml - print <br/> tags if True, <br> tags if False
break_on_hyphens - Whether long lines are broken on hypens or not, default: True
"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options: OptionsType) -> str:
from html import escape
lines: list[str] = []
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = ["<table"]
if options["attributes"]:
for attr_name, attr_value in options["attributes"].items():
open_tag.append(f' {escape(attr_name)}="{escape(attr_value)}"')
open_tag.append(">")
lines.append("".join(open_tag))
# Title
title = options["title"] or self._title
if title:
lines.append(f" <caption>{escape(title)}</caption>")
# Headers
if options["header"]:
lines.append(" <thead>")
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
if options["escape_header"]:
field = escape(field)
lines.append(
" <th>{}</th>".format(field.replace("\n", linebreak))
)
lines.append(" </tr>")
lines.append(" </thead>")
# Data
lines.append(" <tbody>")
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum in zip(self._field_names, row):
if options["fields"] and field not in options["fields"]:
continue
if options["escape_data"]:
datum = escape(datum)
lines.append(
" <td>{}</td>".format(datum.replace("\n", linebreak))
)
lines.append(" </tr>")
lines.append(" </tbody>")
lines.append("</table>")
return "\n".join(lines)
def _get_formatted_html_string(self, options: OptionsType) -> str:
from html import escape
lines: list[str] = []
lpad, rpad = self._get_padding_widths(options)
if options["xhtml"]:
linebreak = "<br/>"
else:
linebreak = "<br>"
open_tag = ["<table"]
if options["border"]:
if (
options["hrules"] == HRuleStyle.ALL
and options["vrules"] == VRuleStyle.ALL
):
open_tag.append(' frame="box" rules="all"')
elif (
options["hrules"] == HRuleStyle.FRAME
and options["vrules"] == VRuleStyle.FRAME
):
open_tag.append(' frame="box"')
elif (
options["hrules"] == HRuleStyle.FRAME
and options["vrules"] == VRuleStyle.ALL
):
open_tag.append(' frame="box" rules="cols"')
elif options["hrules"] == HRuleStyle.FRAME:
open_tag.append(' frame="hsides"')
elif options["hrules"] == HRuleStyle.ALL:
open_tag.append(' frame="hsides" rules="rows"')
elif options["vrules"] == VRuleStyle.FRAME:
open_tag.append(' frame="vsides"')
elif options["vrules"] == VRuleStyle.ALL:
open_tag.append(' frame="vsides" rules="cols"')
if not options["border"] and options["preserve_internal_border"]:
open_tag.append(' rules="cols"')
if options["attributes"]:
for attr_name, attr_value in options["attributes"].items():
open_tag.append(f' {escape(attr_name)}="{escape(attr_value)}"')
open_tag.append(">")
lines.append("".join(open_tag))
# Title
title = options["title"] or self._title
if title:
lines.append(f" <caption>{escape(title)}</caption>")
# Headers
if options["header"]:
lines.append(" <thead>")
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
if options["escape_header"]:
field = escape(field)
content = field.replace("\n", linebreak)
lines.append(
f' <th style="'
f"padding-left: {lpad}em; "
f"padding-right: {rpad}em; "
f'text-align: center">{content}</th>'
)
lines.append(" </tr>")
lines.append(" </thead>")
# Data
lines.append(" <tbody>")
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
aligns: list[str] = []
valigns: list[str] = []
for field in self._field_names:
aligns.append(
{"l": "left", "r": "right", "c": "center"}[self._align[field]]
)
valigns.append(
{"t": "top", "m": "middle", "b": "bottom"}[self._valign[field]]
)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum, align, valign in zip(
self._field_names, row, aligns, valigns
):
if options["fields"] and field not in options["fields"]:
continue
if options["escape_data"]:
datum = escape(datum)
content = datum.replace("\n", linebreak)
lines.append(
f' <td style="'
f"padding-left: {lpad}em; "
f"padding-right: {rpad}em; "
f"text-align: {align}; "
f'vertical-align: {valign}">{content}</td>'
)
lines.append(" </tr>")
lines.append(" </tbody>")
lines.append("</table>")
return "\n".join(lines)
##############################
# LATEX STRING METHODS #
##############################
def get_latex_string(self, **kwargs) -> str:
"""Return string representation of LaTex formatted version of table in current
state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
preserve_internal_border - print a border inside the table even if
border is disabled (True or False)
hrules - controls printing of horizontal rules after rows.
Allowed values: HRuleStyle
vrules - controls printing of vertical rules between columns.
Allowed values: VRuleStyle
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
row_filter - filter function applied on rows
format - Controls whether or not HTML tables are formatted to match
styling options (True or False)
"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_latex_string(options)
else:
string = self._get_simple_latex_string(options)
return string
def _get_simple_latex_string(self, options: OptionsType) -> str:
lines: list[str] = []
wanted_fields = []
if options["fields"]:
wanted_fields = [
field for field in self._field_names if field in options["fields"]
]
else:
wanted_fields = self._field_names
alignments = "".join([self._align[field] for field in wanted_fields])
begin_cmd = f"\\begin{{tabular}}{{{alignments}}}"
lines.append(begin_cmd)
# Headers
if options["header"]:
lines.append(" & ".join(wanted_fields) + " \\\\")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
for row in formatted_rows:
wanted_data = [
d for f, d in zip(self._field_names, row) if f in wanted_fields
]
lines.append(" & ".join(wanted_data) + " \\\\")
lines.append("\\end{tabular}")
return "\r\n".join(lines)
def _get_formatted_latex_string(self, options: OptionsType) -> str:
lines: list[str] = []
if options["fields"]:
wanted_fields = [
field for field in self._field_names if field in options["fields"]
]
else:
wanted_fields = self._field_names
wanted_alignments = [self._align[field] for field in wanted_fields]
if options["border"] and options["vrules"] == VRuleStyle.ALL:
alignment_str = "|".join(wanted_alignments)
elif not options["border"] and options["preserve_internal_border"]:
alignment_str = "|".join(wanted_alignments)
else:
alignment_str = "".join(wanted_alignments)
if options["border"] and options["vrules"] in [
VRuleStyle.ALL,
VRuleStyle.FRAME,
]:
alignment_str = "|" + alignment_str + "|"
begin_cmd = f"\\begin{{tabular}}{{{alignment_str}}}"
lines.append(begin_cmd)
if options["border"] and options["hrules"] in [
HRuleStyle.ALL,
HRuleStyle.FRAME,
]:
lines.append("\\hline")
# Headers
if options["header"]:
lines.append(" & ".join(wanted_fields) + " \\\\")
if (options["border"] or options["preserve_internal_border"]) and options[
"hrules"
] in [HRuleStyle.ALL, HRuleStyle.HEADER]:
lines.append("\\hline")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
rows = self._get_rows(options)
for row in formatted_rows:
wanted_data = [
d for f, d in zip(self._field_names, row) if f in wanted_fields
]
lines.append(" & ".join(wanted_data) + " \\\\")
if options["border"] and options["hrules"] == HRuleStyle.ALL:
lines.append("\\hline")
if options["border"] and options["hrules"] == HRuleStyle.FRAME:
lines.append("\\hline")
lines.append("\\end{tabular}")
return "\r\n".join(lines)
##############################
# MEDIAWIKI STRING METHODS #
##############################
def get_mediawiki_string(self, **kwargs) -> str:
"""
Return string representation of the table in MediaWiki table markup.
The generated markup follows simple MediaWiki syntax. For example:
{| class="wikitable"
|+ Optional caption
|-
! Header1 !! Header2 !! Header3
|-
| Data1 || Data2 || Data3
|-
| Data4 || Data5 || Data6
|}
"""
options = self._get_options(kwargs)
lines: list[str] = []
if (
options.get("attributes")
and isinstance(options["attributes"], dict)
and options["attributes"]
):
attr_str = " ".join(f'{k}="{v}"' for k, v in options["attributes"].items())
lines.append("{| " + attr_str)
else:
lines.append('{| class="wikitable"')
caption = options.get("title", self._title)
if caption:
lines.append("|+ " + caption)
if options.get("header"):
lines.append("|-")
headers = []
fields_option = options.get("fields")
for field in self._field_names:
if fields_option is not None and field not in fields_option:
continue
headers.append(field)
if headers:
header_line = " !! ".join(headers)
lines.append("! " + header_line)
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows)
for row in formatted_rows:
lines.append("|-")
cells = []
fields_option = options.get("fields")
for field, cell in zip(self._field_names, row):
if fields_option is not None and field not in fields_option:
continue
cells.append(cell)
if cells:
lines.append("| " + " || ".join(cells))
lines.append("|}")
return "\n".join(lines)
##############################
# UNICODE WIDTH FUNCTION #
##############################
@lru_cache
def _str_block_width(val: str) -> int:
import wcwidth
val = _osc8_re.sub(r"\1", val)
return wcwidth.wcswidth(_re.sub("", val))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names: Sequence[str] | None = None, **kwargs) -> PrettyTable:
import csv
fmtparams = {}
for param in [
"delimiter",
"doublequote",
"escapechar",
"lineterminator",
"quotechar",
"quoting",
"skipinitialspace",
"strict",
]:
if param in kwargs:
fmtparams[param] = kwargs.pop(param)
if fmtparams:
reader = csv.reader(fp, **fmtparams)
else:
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
else:
table.field_names = [x.strip() for x in next(reader)]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor: Cursor, **kwargs) -> PrettyTable | None:
if cursor.description:
table = PrettyTable(**kwargs)
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
return None
def from_json(json_string: str | bytes, **kwargs) -> PrettyTable:
import json
table = PrettyTable(**kwargs)
objects = json.loads(json_string)
table.field_names = objects[0]
for obj in objects[1:]:
row = [obj[key] for key in table.field_names]
table.add_row(row)
return table
|
PrettyTable
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/application_level_autoscaling.py
|
{
"start": 275,
"end": 501
}
|
class ____:
def __call__(self, preprocessed_data: str) -> str:
# Simulate model inference (takes longer than preprocessing)
time.sleep(0.1)
return f"result_{preprocessed_data}"
@serve.deployment
|
Model
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/io_test.py
|
{
"start": 5840,
"end": 7610
}
|
class ____(IOTest, checkpoint_test_base.CheckpointTestBase):
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPI(self):
dataset = dataset_ops.Dataset.range(40)
checkpoint_args = {"directory": self._checkpoint_prefix, "max_to_keep": 50}
io.save(dataset, self._save_dir, checkpoint_args=checkpoint_args)
num_checkpoint_files = len(list(os.listdir(self._checkpoint_prefix)))
# By default, we checkpoint every increment. Each checkpoint writes a
# file containing the data and a file containing the index. There is
# also an overall checkpoint file. Thus, we expect (2 * 40) + 1 files.
self.assertEqual(81, num_checkpoint_files)
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPICustomCheckpointInterval(self):
dataset = dataset_ops.Dataset.range(40)
step_counter = variables.Variable(0, trainable=False)
checkpoint_args = {
"checkpoint_interval": 5,
"step_counter": step_counter,
"directory": self._checkpoint_prefix,
"max_to_keep": 10,
}
io.save(dataset, self._save_dir, checkpoint_args=checkpoint_args)
num_checkpoint_files = len(list(os.listdir(self._checkpoint_prefix)))
# We expect (2 * 8) + 1 files.
self.assertEqual(17, num_checkpoint_files)
@combinations.generate(test_base.eager_only_combinations())
def testSaveCheckpointingAPIIncorrectArgs(self):
dataset = dataset_ops.Dataset.range(42)
checkpoint_args = {
"directory": self._checkpoint_prefix,
"incorrect_arg": "incorrect_arg"
}
with self.assertRaises(TypeError):
io.save(dataset, self._save_dir, checkpoint_args=checkpoint_args)
if __name__ == "__main__":
test.main()
|
SaveCheckpointTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.