language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 18481,
"end": 18895
} | class ____(BaseModel):
"""
Pool serializer for post bodies.
"""
model_config = ConfigDict(
extra="forbid",
)
name: Annotated[str, Field(max_length=256, title="Name")]
slots: Annotated[int, Field(title="Slots")]
description: Annotated[str | None, Field(title="Description")] = None
include_deferred: Annotated[bool | None, Field(title="Include Deferred")] = False
| PoolBody |
python | wandb__wandb | wandb/automations/_validators.py | {
"start": 1194,
"end": 4824
} | class ____(str, Enum):
"""A string enum allowing for case-insensitive lookups by value.
May include other internal customizations if needed.
Note: This is a bespoke, internal implementation and NOT intended as a
backport of `enum.StrEnum` from Python 3.11+.
"""
def __repr__(self) -> str:
return self.name
@classmethod
def _missing_(cls, value: object) -> Any:
# Accept case-insensitive enum values
if isinstance(value, str):
v = value.lower()
return next((e for e in cls if e.value.lower() == v), None)
return None
def default_if_none(v: Any) -> Any:
"""A "before"-mode field validator that coerces `None` to the field default.
See: https://docs.pydantic.dev/2.11/api/pydantic_core/#pydantic_core.PydanticUseDefault
"""
if v is None:
raise PydanticUseDefault
return v
def upper_if_str(v: Any) -> Any:
return v.strip().upper() if isinstance(v, str) else v
# ----------------------------------------------------------------------------
def parse_scope(v: Any) -> Any:
"""Convert eligible objects (including wandb types) to an automation scope."""
from wandb.apis.public import ArtifactCollection, Project
from .scopes import ProjectScope, _ArtifactPortfolioScope, _ArtifactSequenceScope
if isinstance(v, Project):
return ProjectScope.model_validate(v)
if isinstance(v, ArtifactCollection):
typ = _ArtifactSequenceScope if v.is_sequence() else _ArtifactPortfolioScope
return typ.model_validate(v)
return v
def parse_saved_action(v: Any) -> Any:
"""If necessary (and possible), convert the object to a saved action."""
from .actions import (
DoNothing,
SavedNoOpAction,
SavedNotificationAction,
SavedWebhookAction,
SendNotification,
SendWebhook,
)
if isinstance(v, SendNotification):
return SavedNotificationAction(
integration={"id": v.integration_id}, **v.model_dump()
)
if isinstance(v, SendWebhook):
return SavedWebhookAction(
integration={"id": v.integration_id}, **v.model_dump()
)
if isinstance(v, DoNothing):
return SavedNoOpAction(**v.model_dump())
return v
def parse_input_action(v: Any) -> Any:
"""If necessary (and possible), convert the object to an input action."""
from .actions import (
DoNothing,
SavedNoOpAction,
SavedNotificationAction,
SavedWebhookAction,
SendNotification,
SendWebhook,
)
if isinstance(v, SavedNotificationAction):
return SendNotification(integration_id=v.integration.id, **v.model_dump())
if isinstance(v, SavedWebhookAction):
return SendWebhook(integration_id=v.integration.id, **v.model_dump())
if isinstance(v, SavedNoOpAction):
return DoNothing(**v.model_dump())
return v
# ----------------------------------------------------------------------------
def wrap_run_event_run_filter(f: MongoLikeFilter) -> MongoLikeFilter:
"""Wrap a run filter in an `And` operator if it's not already.
This is a necessary constraint imposed elsewhere by backend/frontend code.
"""
return And.wrap(simplify_expr(f)) # simplify/flatten first if needed
def wrap_mutation_event_filter(f: MongoLikeFilter) -> MongoLikeFilter:
"""Wrap filters as `{"$or": [{"$and": [<original_filter>]}]}`.
This awkward format is necessary because the frontend expects it.
"""
return Or.wrap(And.wrap(simplify_expr(f))) # simplify/flatten first if needed
| LenientStrEnum |
python | run-llama__llama_index | llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py | {
"start": 16572,
"end": 18134
} | class ____:
"""Integration tests for AgentCoreMemory."""
@pytest.mark.asyncio
async def test_full_workflow(self, memory_context, mock_client):
"""Test a complete workflow with AgentCoreMemory."""
# Setup mock responses
mock_client.list_events.return_value = {
"events": [
{
"payload": [
{"blob": json.dumps({})},
{
"conversational": {
"role": "USER",
"content": {"text": "Hello"},
}
},
]
}
],
"nextToken": None,
}
mock_client.retrieve_memory_records.return_value = {
"memoryRecordSummaries": [{"content": {"text": "User likes greetings"}}]
}
# Create memory instance
memory = AgentCoreMemory(context=memory_context, client=mock_client)
# Add a message
message = ChatMessage(role=MessageRole.USER, content="New message")
await memory.aput(message)
# Verify create_event was called
assert mock_client.create_event.called
# Get messages (this will call list_events and retrieve_memories)
messages = memory.get()
# Should have system message + user message
assert len(messages) >= 1
assert mock_client.list_events.called
assert mock_client.retrieve_memory_records.called
| TestIntegration |
python | django__django | tests/gis_tests/layermap/models.py | {
"start": 210,
"end": 246
} | class ____(NamedModel):
pass
| State |
python | python__mypy | mypy/test/typefixture.py | {
"start": 15395,
"end": 16014
} | class ____(TypeFixture):
"""Extension of TypeFixture that contains additional generic
interface types."""
def __init__(self) -> None:
super().__init__()
# GF[T]
self.gfi = self.make_type_info("GF", typevars=["T"], is_abstract=True)
# M1 <: GF[A]
self.m1i = self.make_type_info(
"M1", is_abstract=True, mro=[self.gfi, self.oi], bases=[Instance(self.gfi, [self.a])]
)
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
self.m1 = Instance(self.m1i, []) # M1
| InterfaceTypeFixture |
python | django__django | tests/generic_relations/models.py | {
"start": 3027,
"end": 3105
} | class ____(Mineral):
tags = GenericRelation(ValuableTaggedItem)
| ValuableRock |
python | scikit-learn__scikit-learn | sklearn/utils/_param_validation.py | {
"start": 20615,
"end": 21881
} | class ____(_Constraint):
"""Helper constraint for the `missing_values` parameters.
Convenience for
[
Integral,
Interval(Real, None, None, closed="both"),
str, # when numeric_only is False
None, # when numeric_only is False
_NanConstraint(),
_PandasNAConstraint(),
]
Parameters
----------
numeric_only : bool, default=False
Whether to consider only numeric missing value markers.
"""
def __init__(self, numeric_only=False):
super().__init__()
self.numeric_only = numeric_only
self._constraints = [
_InstancesOf(Integral),
# we use an interval of Real to ignore np.nan that has its own constraint
Interval(Real, None, None, closed="both"),
_NanConstraint(),
_PandasNAConstraint(),
]
if not self.numeric_only:
self._constraints.extend([_InstancesOf(str), _NoneConstraint()])
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
| MissingValues |
python | scikit-learn__scikit-learn | sklearn/gaussian_process/kernels.py | {
"start": 73959,
"end": 79325
} | class ____(Kernel):
r"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting :math:`N(0, 1)` priors on the coefficients
of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)`
on the bias. The DotProduct kernel is invariant to a rotation of
the coordinates about the origin, but not translations.
It is parameterized by a parameter sigma_0 :math:`\sigma`
which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`,
the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
.. math::
k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
See [1]_, Chapter 4, Section 4.2, for further details regarding the
DotProduct kernel.
Read more in the :ref:`User Guide <gp_kernels>`.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default=1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogeneous.
sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5)
The lower and upper bound on 'sigma_0'.
If set to "fixed", 'sigma_0' cannot be changed during
hyperparameter tuning.
References
----------
.. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006).
"Gaussian Processes for Machine Learning". The MIT Press.
<http://www.gaussianprocess.org/gpml/>`_
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0, 592.1]), array([316.6, 316.6]))
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0**2
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0**2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0**2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y).
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X).
"""
return np.einsum("ij,ij->i", X, X) + self.sigma_0**2
def is_stationary(self):
"""Returns whether the kernel is stationary."""
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
| DotProduct |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 3869,
"end": 3947
} | class ____(Station):
inbound = models.BooleanField(default=False)
| BusStation |
python | ray-project__ray | rllib/examples/envs/classes/simple_corridor.py | {
"start": 145,
"end": 1414
} | class ____(gym.Env):
"""Example of a custom env in which you have to walk down a corridor.
You can configure the length of the corridor via the env config."""
def __init__(self, config=None):
config = config or {}
self.action_space = Discrete(2)
self.observation_space = Box(0.0, 999.0, shape=(1,), dtype=np.float32)
self.set_corridor_length(config.get("corridor_length", 10))
self._cur_pos = 0
def set_corridor_length(self, length):
self.end_pos = length
logger.info(f"Set corridor length to {self.end_pos}")
assert self.end_pos <= 999, "The maximum `corridor_length` allowed is 999!"
def reset(self, *, seed=None, options=None):
self._cur_pos = 0.0
return self._get_obs(), {}
def step(self, action):
assert action in [0, 1], action
if action == 0 and self._cur_pos > 0:
self._cur_pos -= 1.0
elif action == 1:
self._cur_pos += 1.0
terminated = self._cur_pos >= self.end_pos
truncated = False
reward = 1.0 if terminated else -0.01
return self._get_obs(), reward, terminated, truncated, {}
def _get_obs(self):
return np.array([self._cur_pos], np.float32)
| SimpleCorridor |
python | pyparsing__pyparsing | examples/bf.py | {
"start": 3294,
"end": 4371
} | class ____(Instruction):
def __init__(self, tokens):
super().__init__(tokens)
self.instructions = self.tokens[0][1:-1]
def execute(self, bf_engine: BFEngine):
while bf_engine.at_ptr:
for i in self.instructions:
i.execute(bf_engine)
# add parse actions to all BF instruction expressions
PLUS.add_parse_action(IncrPtrValue)
MINUS.add_parse_action(DecrPtrValue)
GT.add_parse_action(IncrPtr)
LT.add_parse_action(DecrPtr)
OUT.add_parse_action(OutputPtrValue)
INP.add_parse_action(InputPtrValue)
LOOP.add_parse_action(RunInstructionLoop)
@program_expr.add_parse_action
def run_program(tokens):
bf = BFEngine()
for t in tokens:
t.execute(bf)
print()
if __name__ == '__main__':
# generate railroad diagram
import contextlib
with contextlib.suppress(Exception):
program_expr.create_diagram("bf_diagram.html")
# execute an example BF program
hw = "+[-->-[>>+>-----<<]<--<---]>-.>>>+.>>..+++[.>]<<<<.+++.------.<<-.>>>>+."
program_expr.parse_string(hw)
| RunInstructionLoop |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 69660,
"end": 70340
} | class ____(TorchHigherOrderOperatorVariable):
"""
Wraps torch._functorch.autograd_function.custom_function_call
"""
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
return torch._dynamo.variables.UserMethodVariable(
self.value.__call__.__func__,
torch._dynamo.variables.UserDefinedObjectVariable(
self.value, source=self.source
),
source=AttrSource(self.source, "__call__"),
).call_function(tx, args, kwargs)
| CustomFunctionHigherOrderOperatorVariable |
python | apache__airflow | providers/teradata/src/airflow/providers/teradata/transfers/teradata_to_teradata.py | {
"start": 1141,
"end": 3891
} | class ____(BaseOperator):
"""
Moves data from Teradata source database to Teradata destination database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TeradataToTeradataOperator`
:param dest_teradata_conn_id: destination Teradata connection.
:param destination_table: destination table to insert rows.
:param source_teradata_conn_id: :ref:`Source Teradata connection <howto/connection:Teradata>`.
:param sql: SQL query to execute against the source Teradata database
:param sql_params: Parameters to use in sql query.
:param rows_chunk: number of rows per chunk to commit.
"""
template_fields: Sequence[str] = (
"sql",
"sql_params",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql", "sql_params": "py"}
ui_color = "#e07c24"
def __init__(
self,
*,
dest_teradata_conn_id: str,
destination_table: str,
source_teradata_conn_id: str,
sql: str,
sql_params: dict | None = None,
rows_chunk: int = 5000,
**kwargs,
) -> None:
super().__init__(**kwargs)
if sql_params is None:
sql_params = {}
self.dest_teradata_conn_id = dest_teradata_conn_id
self.destination_table = destination_table
self.source_teradata_conn_id = source_teradata_conn_id
self.sql = sql
self.sql_params = sql_params
self.rows_chunk = rows_chunk
@cached_property
def src_hook(self) -> TeradataHook:
return TeradataHook(teradata_conn_id=self.source_teradata_conn_id)
@cached_property
def dest_hook(self) -> TeradataHook:
return TeradataHook(teradata_conn_id=self.dest_teradata_conn_id)
def execute(self, context: Context) -> None:
src_hook = self.src_hook
dest_hook = self.dest_hook
with src_hook.get_conn() as src_conn:
cursor = src_conn.cursor()
cursor.execute(self.sql, self.sql_params)
target_fields = [field[0] for field in cursor.description]
rows_total = 0
if len(target_fields) != 0:
for rows in iter(lambda: cursor.fetchmany(self.rows_chunk), []):
dest_hook.insert_rows(
self.destination_table,
rows,
target_fields=target_fields,
commit_every=self.rows_chunk,
)
rows_total += len(rows)
self.log.info("Finished data transfer. Total number of rows transferred - %s", rows_total)
cursor.close()
| TeradataToTeradataOperator |
python | keon__algorithms | tests/test_graph.py | {
"start": 3161,
"end": 4514
} | class ____(unittest.TestCase):
"""
Test for the file maximum_flow.py
Arguments:
unittest {[type]} -- [description]
"""
def test_ford_fulkerson(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, ford_fulkerson(capacity, 0, 6))
def test_edmonds_karp(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, edmonds_karp(capacity, 0, 6))
def dinic(self):
capacity = [
[0, 10, 10, 0, 0, 0, 0],
[0, 0, 2, 0, 4, 8, 0],
[0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 10],
[0, 0, 0, 0, 6, 0, 10],
[0, 0, 0, 0, 0, 0, 0]
]
self.assertEqual(19, dinic(capacity, 0, 6))
| TestMaximumFlow |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/routers/test_router.py | {
"start": 1692,
"end": 6336
} | class ____:
@pytest.mark.asyncio
@pytest.mark.parametrize("stream_batching_interval_ms", [None, 0, 10000])
@pytest.mark.parametrize("stream", [True, False])
async def test_chat(self, stream_batching_interval_ms, client, stream):
"""Tests chat streaming with different stream_batching_interval_ms values.
0ms super fast batching (no batching)
10000ms basically should be equivalent to non-streaming
None is default, which is some fixed non-zero value.
"""
# Generate 1000 chunks
n_tokens = 1000
response = client.chat.completions.create(
model="llm_model_id",
messages=[dict(role="user", content="Hello")],
stream=stream,
max_tokens=n_tokens,
)
if stream:
text = ""
role = None
for chunk in response:
if chunk.choices[0].delta.role is not None and role is None:
role = chunk.choices[0].delta.role
if chunk.choices[0].delta.content:
text += chunk.choices[0].delta.content
else:
text = response.choices[0].message.content
role = response.choices[0].message.role
assert role == "assistant"
assert text.strip() == " ".join([f"test_{i}" for i in range(n_tokens)])
@pytest.mark.asyncio
@pytest.mark.parametrize("stream_batching_interval_ms", [None, 0, 10000])
@pytest.mark.parametrize("stream", [True, False])
async def test_completion(self, stream_batching_interval_ms, client, stream):
"""Tests text completions streaming with different stream_batching_interval_ms values."""
# Generate tokens
n_tokens = 1000
response = client.completions.create(
model="llm_model_id",
prompt="Hello",
stream=stream,
max_tokens=n_tokens,
)
if stream:
text = ""
for chunk in response:
text += chunk.choices[0].text
else:
text = response.choices[0].text
# The mock engine produces "test_0 test_1 test_2 ..." pattern
expected_text = " ".join([f"test_{i}" for i in range(n_tokens)])
assert text.strip() == expected_text
@pytest.mark.asyncio
@pytest.mark.parametrize("stream", [True, False])
async def test_tool_call(self, client, stream):
response = client.chat.completions.create(
model="llm_model_id",
messages=[
{
"role": "user",
"content": "Can you tell me what the temperate will be in Dallas, in fahrenheit?",
},
{
"content": None,
"role": "assistant",
"tool_calls": [
{
"id": "RBS92VTjJ",
"function": {
"arguments": '{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}',
"name": "get_current_weather",
},
"type": "function",
}
],
},
{
"role": "tool",
"content": "The weather in Dallas, TX is 85 degrees fahrenheit. It is partly cloudly, with highs in the 90's.",
"tool_call_id": "n3OMUpydP",
},
],
stream=stream,
max_tokens=200,
)
if stream:
text = ""
role = None
for chunk in response:
if chunk.choices[0].delta.role is not None and role is None:
role = chunk.choices[0].delta.role
if chunk.choices[0].delta.content:
text += chunk.choices[0].delta.content
else:
text = response.choices[0].message.content
role = response.choices[0].message.role
assert text
@pytest.mark.asyncio
async def test_check_health(self, llm_config: LLMConfig):
"""Test health check functionality."""
server = MagicMock()
server.llm_config = MagicMock()
server.llm_config.remote = AsyncMock(return_value=llm_config)
server.check_health = MagicMock()
server.check_health.remote = AsyncMock()
router = OpenAiIngress(llm_deployments=[server])
await router.check_health()
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestOpenAiIngress |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py | {
"start": 20645,
"end": 21631
} | class ____(Benchmark):
r"""
Bukin06 objective function.
The Bukin06 [1]_ global optimization problem is a multimodal minimization
problem defined as follows:
.. math::
f_{\text{Bukin06}}(x) = 100 \sqrt{ \lvert{x_2 - 0.01 x_1^{2}}
\rvert} + 0.01 \lvert{x_1 + 10} \rvert
with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]`
*Global optimum*: :math:`f(x) = 0` for :math:`x = [-10, 1]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [[-10.0, 1.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 100 * sqrt(abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * abs(x[0] + 10)
| Bukin06 |
python | astropy__astropy | astropy/io/fits/fitsrec.py | {
"start": 55215,
"end": 56999
} | class ____(UnicodeEncodeError):
def __init__(self, encoding, object_, start, end, reason, index):
super().__init__(encoding, object_, start, end, reason)
self.index = index
def _ascii_encode(inarray, out=None):
"""
Takes a unicode array and fills the output string array with the ASCII
encodings (if possible) of the elements of the input array. The two arrays
must be the same size (though not necessarily the same shape).
This is like an inplace version of `np.char.encode` though simpler since
it's only limited to ASCII, and hence the size of each character is
guaranteed to be 1 byte.
If any strings are non-ASCII an UnicodeArrayEncodeError is raised--this is
just a `UnicodeEncodeError` with an additional attribute for the index of
the item that couldn't be encoded.
"""
out_dtype = np.dtype((f"S{inarray.dtype.itemsize // 4}", inarray.dtype.shape))
if out is not None:
out = out.view(out_dtype)
if inarray.size == 0:
return out
op_dtypes = [inarray.dtype, out_dtype]
op_flags = [["readonly"], ["writeonly", "allocate"]]
it = np.nditer(
[inarray, out], op_dtypes=op_dtypes, op_flags=op_flags, flags=["zerosize_ok"]
)
try:
for initem, outitem in it:
outitem[...] = initem.item().encode("ascii")
except UnicodeEncodeError as exc:
index = np.unravel_index(it.iterindex, inarray.shape)
raise _UnicodeArrayEncodeError(*(exc.args + (index,)))
return it.operands[1]
def _has_unicode_fields(array):
"""
Returns True if any fields in a structured array have Unicode dtype.
"""
dtypes = (d[0] for d in array.dtype.fields.values())
return any(d.kind == "U" for d in dtypes)
| _UnicodeArrayEncodeError |
python | kamyu104__LeetCode-Solutions | Python/number-of-great-partitions.py | {
"start": 47,
"end": 519
} | class ____(object):
def countPartitions(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
MOD = 10**9+7
if sum(nums) < 2*k:
return 0
dp = [0]*k
dp[0] = 1
for x in nums:
for i in reversed(xrange(k-x)):
dp[i+x] = (dp[i+x]+dp[i])%MOD
return (pow(2, len(nums), MOD)-2*reduce(lambda total, x: (total+x)%MOD, dp, 0))%MOD
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/query.py | {
"start": 179,
"end": 457
} | class ____(BaseEvent):
"""
QueryStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
"""
query: QueryType
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "QueryStartEvent"
| QueryStartEvent |
python | redis__redis-py | redis/commands/search/query.py | {
"start": 11316,
"end": 11466
} | class ____:
def __init__(self, keyword: str, field: str, *args: Union[str, float]) -> None:
self.args = [keyword, field] + list(args)
| Filter |
python | getsentry__sentry | src/sentry/deletions/defaults/group.py | {
"start": 7141,
"end": 7353
} | class ____(EventsBaseDeletionTask):
"""
This class helps delete Issue Platform events which use the new Clickhouse light deletes.
"""
dataset = Dataset.IssuePlatform
| IssuePlatformEventsDeletionTask |
python | PrefectHQ__prefect | src/prefect/server/events/schemas/automations.py | {
"start": 3116,
"end": 5300
} | class ____(Trigger, abc.ABC):
"""
Requires some number of triggers to have fired within the given time period.
"""
type: Literal["compound", "sequence"]
triggers: List["ServerTriggerTypes"]
within: Optional[timedelta]
def create_automation_state_change_event(
self, firing: Firing, trigger_state: TriggerState
) -> ReceivedEvent:
"""Returns a ReceivedEvent for an automation state change
into a triggered or resolved state."""
automation = firing.trigger.automation
triggering_event = firing.triggering_event
return ReceivedEvent(
occurred=firing.triggered,
event=f"prefect.automation.{trigger_state.value.lower()}",
resource={
"prefect.resource.id": f"prefect.automation.{automation.id}",
"prefect.resource.name": automation.name,
},
related=(
[
{
"prefect.resource.id": f"prefect.event.{triggering_event.id}",
"prefect.resource.role": "triggering-event",
}
]
if triggering_event
else []
),
payload={
"triggering_labels": firing.triggering_labels,
"triggering_event": (
triggering_event.model_dump(mode="json")
if triggering_event
else None
),
},
id=uuid7(),
)
def _set_parent(self, value: "Union[Trigger , Automation]"):
super()._set_parent(value)
for trigger in self.triggers:
trigger._set_parent(self)
def all_triggers(self) -> Sequence[Trigger]:
return [self] + [t for child in self.triggers for t in child.all_triggers()]
@property
def child_trigger_ids(self) -> List[UUID]:
return [trigger.id for trigger in self.triggers]
@property
def num_expected_firings(self) -> int:
return len(self.triggers)
@abc.abstractmethod
def ready_to_fire(self, firings: Sequence["Firing"]) -> bool: ...
| CompositeTrigger |
python | tornadoweb__tornado | tornado/util.py | {
"start": 1761,
"end": 6307
} | class ____:
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self) -> None:
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value: bytes, max_length: int = 0) -> bytes:
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self) -> bytes:
"""Returns the unconsumed portion left over"""
return self.decompressobj.unconsumed_tail
def flush(self) -> bytes:
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name: str) -> Any:
"""Imports an object by name.
``import_object('x')`` is equivalent to ``import x``.
``import_object('x.y.z')`` is equivalent to ``from x.y import z``.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count(".") == 0:
return __import__(name)
parts = name.split(".")
obj = __import__(".".join(parts[:-1]), fromlist=[parts[-1]])
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
def exec_in(
code: Any, glob: Dict[str, Any], loc: Optional[Optional[Mapping[str, Any]]] = None
) -> None:
if isinstance(code, str):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, "<string>", "exec", dont_inherit=True)
exec(code, glob, loc)
def raise_exc_info(
exc_info: Tuple[Optional[type], Optional[BaseException], Optional["TracebackType"]],
) -> typing.NoReturn:
try:
if exc_info[1] is not None:
raise exc_info[1].with_traceback(exc_info[2])
else:
raise TypeError("raise_exc_info called with no exception")
finally:
# Clear the traceback reference from our stack frame to
# minimize circular references that slow down GC.
exc_info = (None, None, None)
def errno_from_exception(e: BaseException) -> Optional[int]:
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, "errno"):
return e.errno # type: ignore
elif e.args:
return e.args[0]
else:
return None
_alphanum = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def _re_unescape_replacement(match: Match[str]) -> str:
group = match.group(1)
if group[0] in _alphanum:
raise ValueError("cannot unescape '\\\\%s'" % group[0])
return group
_re_unescape_pattern = re.compile(r"\\(.)", re.DOTALL)
def re_unescape(s: str) -> str:
r"""Unescape a string escaped by `re.escape`.
May raise ``ValueError`` for regular expressions which could not
have been produced by `re.escape` (for example, strings containing
``\d`` cannot be unescaped).
.. versionadded:: 4.4
"""
return _re_unescape_pattern.sub(_re_unescape_replacement, s)
| GzipDecompressor |
python | ray-project__ray | rllib/utils/exploration/random_encoder.py | {
"start": 3810,
"end": 10695
} | class ____(Exploration):
"""Random Encoder for Efficient Exploration.
Implementation of:
[1] State entropy maximization with random encoders for efficient
exploration. Seo, Chen, Shin, Lee, Abbeel, & Lee, (2021).
arXiv preprint arXiv:2102.09430.
Estimates state entropy using a particle-based k-nearest neighbors (k-NN)
estimator in the latent space. The state's latent representation is
calculated using an encoder with randomly initialized parameters.
The entropy of a state is considered as intrinsic reward and added to the
environment's extrinsic reward for policy optimization.
Entropy is calculated per batch, it does not take the distribution of
the entire replay buffer into consideration.
"""
def __init__(
self,
action_space: Space,
*,
framework: str,
model: ModelV2,
embeds_dim: int = 128,
encoder_net_config: Optional[ModelConfigDict] = None,
beta: float = 0.2,
beta_schedule: str = "constant",
rho: float = 0.1,
k_nn: int = 50,
random_timesteps: int = 10000,
sub_exploration: Optional[FromConfigSpec] = None,
**kwargs
):
"""Initialize RE3.
Args:
action_space: The action space in which to explore.
framework: Supports "tf", this implementation does not
support torch.
model: The policy's model.
embeds_dim: The dimensionality of the observation embedding
vectors in latent space.
encoder_net_config: Optional model
configuration for the encoder network, producing embedding
vectors from observations. This can be used to configure
fcnet- or conv_net setups to properly process any
observation space.
beta: Hyperparameter to choose between exploration and
exploitation.
beta_schedule: Schedule to use for beta decay, one of
"constant" or "linear_decay".
rho: Beta decay factor, used for on-policy algorithm.
k_nn: Number of neighbours to set for K-NN entropy
estimation.
random_timesteps: The number of timesteps to act completely
randomly (see [1]).
sub_exploration: The config dict for the underlying Exploration
to use (e.g. epsilon-greedy for DQN). If None, uses the
FromSpecDict provided in the Policy's default config.
Raises:
ValueError: If the input framework is Torch.
"""
# TODO(gjoliver): Add supports for Pytorch.
if framework == "torch":
raise ValueError("This RE3 implementation does not support Torch.")
super().__init__(action_space, model=model, framework=framework, **kwargs)
self.beta = beta
self.rho = rho
self.k_nn = k_nn
self.embeds_dim = embeds_dim
if encoder_net_config is None:
encoder_net_config = self.policy_config["model"].copy()
self.encoder_net_config = encoder_net_config
# Auto-detection of underlying exploration functionality.
if sub_exploration is None:
# For discrete action spaces, use an underlying EpsilonGreedy with
# a special schedule.
if isinstance(self.action_space, Discrete):
sub_exploration = {
"type": "EpsilonGreedy",
"epsilon_schedule": {
"type": "PiecewiseSchedule",
# Step function (see [2]).
"endpoints": [
(0, 1.0),
(random_timesteps + 1, 1.0),
(random_timesteps + 2, 0.01),
],
"outside_value": 0.01,
},
}
elif isinstance(self.action_space, Box):
sub_exploration = {
"type": "OrnsteinUhlenbeckNoise",
"random_timesteps": random_timesteps,
}
else:
raise NotImplementedError
self.sub_exploration = sub_exploration
# Creates ModelV2 embedding module / layers.
self._encoder_net = ModelCatalog.get_model_v2(
self.model.obs_space,
self.action_space,
self.embeds_dim,
model_config=self.encoder_net_config,
framework=self.framework,
name="encoder_net",
)
if self.framework == "tf":
self._obs_ph = get_placeholder(
space=self.model.obs_space, name="_encoder_obs"
)
self._obs_embeds = tf.stop_gradient(
self._encoder_net({SampleBatch.OBS: self._obs_ph})[0]
)
# This is only used to select the correct action
self.exploration_submodule = from_config(
cls=Exploration,
config=self.sub_exploration,
action_space=self.action_space,
framework=self.framework,
policy_config=self.policy_config,
model=self.model,
num_workers=self.num_workers,
worker_index=self.worker_index,
)
@override(Exploration)
def get_exploration_action(
self,
*,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True
):
# Simply delegate to sub-Exploration module.
return self.exploration_submodule.get_exploration_action(
action_distribution=action_distribution, timestep=timestep, explore=explore
)
@override(Exploration)
def postprocess_trajectory(self, policy, sample_batch, tf_sess=None):
"""Calculate states' latent representations/embeddings.
Embeddings are added to the SampleBatch object such that it doesn't
need to be calculated during each training step.
"""
if self.framework != "torch":
sample_batch = self._postprocess_tf(policy, sample_batch, tf_sess)
else:
raise ValueError("Not implemented for Torch.")
return sample_batch
def _postprocess_tf(self, policy, sample_batch, tf_sess):
"""Calculate states' embeddings and add it to SampleBatch."""
if self.framework == "tf":
obs_embeds = tf_sess.run(
self._obs_embeds,
feed_dict={self._obs_ph: sample_batch[SampleBatch.OBS]},
)
else:
obs_embeds = tf.stop_gradient(
self._encoder_net({SampleBatch.OBS: sample_batch[SampleBatch.OBS]})[0]
).numpy()
sample_batch[SampleBatch.OBS_EMBEDS] = obs_embeds
return sample_batch
| RE3 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_sets.py | {
"start": 8123,
"end": 19611
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_get_partition_status(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
"forceSynchronousSubmission": True,
}
},
)
assert not result.errors
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
assert len(result.data["launchPartitionBackfill"]["launchedRunIds"]) == 2
result = execute_dagster_graphql(
graphql_context,
query=GET_PARTITION_SET_STATUS_QUERY,
variables={
"partitionSetName": "integers_partition_set",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
partitionStatuses = result.data["partitionSetOrError"]["partitionStatusesOrError"][
"results"
]
assert len(partitionStatuses) == 10
for partitionStatus in partitionStatuses:
if partitionStatus["partitionName"] in ("2", "3"):
assert partitionStatus["runStatus"] == "SUCCESS"
else:
assert partitionStatus["runStatus"] is None
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": [str(num) for num in range(10)],
"forceSynchronousSubmission": True,
}
},
)
assert not result.errors
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
assert len(result.data["launchPartitionBackfill"]["launchedRunIds"]) == 10
result = execute_dagster_graphql(
graphql_context,
query=GET_PARTITION_SET_STATUS_QUERY,
variables={
"partitionSetName": "integers_partition_set",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
partitionStatuses = result.data["partitionSetOrError"]["partitionStatusesOrError"][
"results"
]
assert len(partitionStatuses) == 10
for partitionStatus in partitionStatuses:
assert partitionStatus["runStatus"] == "SUCCESS"
def test_get_status_failure_cancelation_states(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3", "4"],
"forceSynchronousSubmission": True,
}
},
)
assert not result.errors
runs = graphql_context.instance.get_runs()
graphql_context.instance.report_run_failed(runs[1])
graphql_context.instance.report_run_canceled(runs[2])
result = execute_dagster_graphql(
graphql_context,
query=GET_PARTITION_SET_STATUS_QUERY,
variables={
"partitionSetName": "integers_partition_set",
"repositorySelector": repository_selector,
},
)
assert not result.errors
partitionStatuses = result.data["partitionSetOrError"]["partitionStatusesOrError"][
"results"
]
failure = 0
canceled = 0
success = 0
for partitionStatus in partitionStatuses:
if partitionStatus["runStatus"] == "FAILURE":
failure += 1
if partitionStatus["runStatus"] == "CANCELED":
canceled += 1
if partitionStatus["runStatus"] == "SUCCESS":
success += 1
# Note: Canceled run is not reflected in partition status
assert failure == 1
assert success == 1
assert canceled == 0
def test_get_status_time_window_partitioned_job(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "daily_partitioned_job_partition_set",
},
"partitionNames": ["2022-06-01", "2022-06-02"],
"forceSynchronousSubmission": True,
}
},
)
assert not result.errors
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
assert len(result.data["launchPartitionBackfill"]["launchedRunIds"]) == 2
result = execute_dagster_graphql(
graphql_context,
query=GET_PARTITION_SET_STATUS_QUERY,
variables={
"partitionSetName": "daily_partitioned_job_partition_set",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
partitionStatuses = result.data["partitionSetOrError"]["partitionStatusesOrError"][
"results"
]
assert len(partitionStatuses) > 2
for partitionStatus in partitionStatuses:
if partitionStatus["partitionName"] in ["2022-06-01", "2022-06-02"]:
assert partitionStatus["runStatus"] == "SUCCESS"
else:
assert partitionStatus["runStatus"] is None
def test_get_status_static_partitioned_job(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "static_partitioned_job_partition_set",
},
"partitionNames": ["2", "3"],
"forceSynchronousSubmission": True,
}
},
)
assert not result.errors
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
assert len(result.data["launchPartitionBackfill"]["launchedRunIds"]) == 2
result = execute_dagster_graphql(
graphql_context,
query=GET_PARTITION_SET_STATUS_QUERY,
variables={
"partitionSetName": "static_partitioned_job_partition_set",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
partitionStatuses = result.data["partitionSetOrError"]["partitionStatusesOrError"][
"results"
]
assert len(partitionStatuses) == 5
for partitionStatus in partitionStatuses:
if partitionStatus["partitionName"] in ["2", "3"]:
assert partitionStatus["runStatus"] == "SUCCESS"
else:
assert partitionStatus["runStatus"] is None
def test_add_dynamic_partitions(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
ADD_DYNAMIC_PARTITION_MUTATION,
variables={
"partitionsDefName": "foo",
"partitionKey": "bar",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data["addDynamicPartition"]["__typename"] == "AddDynamicPartitionSuccess"
assert result.data["addDynamicPartition"]["partitionsDefName"] == "foo"
assert result.data["addDynamicPartition"]["partitionKey"] == "bar"
assert set(graphql_context.instance.get_dynamic_partitions("foo")) == {"bar"}
result = execute_dagster_graphql(
graphql_context,
ADD_DYNAMIC_PARTITION_MUTATION,
variables={
"partitionsDefName": "foo",
"partitionKey": "bar",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data["addDynamicPartition"]["__typename"] == "DuplicateDynamicPartitionError"
def test_delete_dynamic_partitions(self, graphql_context):
graphql_context.instance.add_dynamic_partitions("foo", ["bar", "biz", "baz"])
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
DELETE_DYNAMIC_PARTITIONS_MUTATION,
variables={
"partitionsDefName": "foo",
"partitionKeys": ["bar", "biz"],
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert (
result.data["deleteDynamicPartitions"]["__typename"] == "DeleteDynamicPartitionsSuccess"
), str(result.data)
assert result.data["deleteDynamicPartitions"]["partitionsDefName"] == "foo"
assert set(graphql_context.instance.get_dynamic_partitions("foo")) == {"baz"}
def test_nonexistent_dynamic_partitions_def_throws_error(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
ADD_DYNAMIC_PARTITION_MUTATION,
variables={
"partitionsDefName": "nonexistent",
"partitionKey": "bar",
"repositorySelector": repository_selector,
},
)
assert not result.errors
assert result.data
assert result.data["addDynamicPartition"]["__typename"] == "UnauthorizedError"
# If the selected repository does not contain a matching dynamic partitions definition
# we should throw an unauthorized error
assert (
"does not contain a dynamic partitions definition"
in result.data["addDynamicPartition"]["message"]
)
| TestPartitionSetRuns |
python | numba__numba | numba/tests/test_pycc.py | {
"start": 1846,
"end": 2942
} | class ____(TestCase):
def setUp(self):
unset_macosx_deployment_target()
self.tmpdir = temp_directory('test_pycc')
# Make sure temporary files and directories created by
# distutils don't clutter the top-level /tmp
tempfile.tempdir = self.tmpdir
def tearDown(self):
tempfile.tempdir = None
# Since we're executing the module-under-test several times
# from the same process, we must clear the exports registry
# between invocations.
# This is a local import to avoid deprecation warnings being generated
# through the use of the numba.pycc module.
from numba.pycc.decorators import clear_export_registry
clear_export_registry()
@contextlib.contextmanager
def check_c_ext(self, extdir, name):
sys.path.append(extdir)
try:
lib = import_dynamic(name)
yield lib
finally:
sys.path.remove(extdir)
sys.modules.pop(name, None)
@needs_setuptools
@skip_if_py313plus_on_windows
@skip_if_linux_aarch64
| BasePYCCTest |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 28475,
"end": 28902
} | class ____:
def setup(self):
N = 120000
transition_points = np.sort(np.random.choice(np.arange(N), 1400))
transitions = np.zeros(N, dtype=np.bool_)
transitions[transition_points] = True
self.g = transitions.cumsum()
self.df = DataFrame({"signal": np.random.rand(N)})
def time_transform_mean(self):
self.df["signal"].groupby(self.g).transform("mean")
| TransformBools |
python | huggingface__transformers | src/transformers/models/informer/modular_informer.py | {
"start": 2137,
"end": 2196
} | class ____(TimeSeriesMeanScaler):
pass
| InformerMeanScaler |
python | pytorch__pytorch | test/test_cuda_nvml_based_avail.py | {
"start": 3715,
"end": 7258
} | class ____(TestCase):
def test_env_var_parsing(self):
def _parse_visible_devices(val):
from torch.cuda import _parse_visible_devices as _pvd
with patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": val}, clear=True):
return _pvd()
# rest of the string is ignored
self.assertEqual(_parse_visible_devices("1gpu2,2ampere"), [1, 2])
# Negatives abort parsing
self.assertEqual(_parse_visible_devices("0, 1, 2, -1, 3"), [0, 1, 2])
# Double mention of ordinal returns empty set
self.assertEqual(_parse_visible_devices("0, 1, 2, 1"), [])
# Unary pluses and minuses
self.assertEqual(_parse_visible_devices("2, +3, -0, 5"), [2, 3, 0, 5])
# Random string is used as empty set
self.assertEqual(_parse_visible_devices("one,two,3,4"), [])
# Random string is used as separator
self.assertEqual(_parse_visible_devices("4,3,two,one"), [4, 3])
# GPU ids are parsed
self.assertEqual(_parse_visible_devices("GPU-9e8d35e3"), ["GPU-9e8d35e3"])
# Ordinals are not included in GPUid set
self.assertEqual(_parse_visible_devices("GPU-123, 2"), ["GPU-123"])
# MIG ids are parsed
self.assertEqual(_parse_visible_devices("MIG-89c850dc"), ["MIG-89c850dc"])
def test_partial_uuid_resolver(self):
from torch.cuda import _transform_uuid_to_ordinals
uuids = [
"GPU-9942190a-aa31-4ff1-4aa9-c388d80f85f1",
"GPU-9e8d35e3-a134-0fdd-0e01-23811fdbd293",
"GPU-e429a63e-c61c-4795-b757-5132caeb8e70",
"GPU-eee1dfbc-0a0f-6ad8-5ff6-dc942a8b9d98",
"GPU-bbcd6503-5150-4e92-c266-97cc4390d04e",
"GPU-472ea263-58d7-410d-cc82-f7fdece5bd28",
"GPU-e56257c4-947f-6a5b-7ec9-0f45567ccf4e",
"GPU-1c20e77d-1c1a-d9ed-fe37-18b8466a78ad",
]
self.assertEqual(_transform_uuid_to_ordinals(["GPU-9e8d35e3"], uuids), [1])
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-e4", "GPU-9e8d35e3"], uuids), [2, 1]
)
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-1", "GPU-47"], uuids),
[1, 7, 5],
)
# First invalid UUID aborts parsing
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-123", "GPU-9e8d35e3"], uuids), []
)
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-123", "GPU-47"], uuids),
[1],
)
# First ambiguous UUID aborts parsing
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-e", "GPU-47"], uuids), [1]
)
# Duplicate UUIDs result in empty set
self.assertEqual(
_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-47", "GPU-9e8"], uuids),
[],
)
def test_ordinal_parse_visible_devices(self):
def _device_count_nvml(val):
from torch.cuda import _device_count_nvml as _dc
with patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": val}, clear=True):
return _dc()
with patch.object(torch.cuda, "_raw_device_count_nvml", return_value=2):
self.assertEqual(_device_count_nvml("1, 0"), 2)
# Ordinal out of bounds aborts parsing
self.assertEqual(_device_count_nvml("1, 5, 0"), 1)
instantiate_parametrized_tests(TestExtendedCUDAIsAvail)
if __name__ == "__main__":
run_tests()
| TestVisibleDeviceParses |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/jupyter_widget.py | {
"start": 1537,
"end": 1639
} | class ____(FrontendWidget):
"""Dummy class for config inheritance. Destroyed below."""
| IPythonWidget |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_type_lookup.py | {
"start": 12189,
"end": 12314
} | class ____(AbstractFoo):
# Can't resolve this one due to unannotated `x` param
def qux(self):
pass
| ConcreteFoo1 |
python | walkccc__LeetCode | solutions/3356. Zero Array Transformation II/3356.py | {
"start": 0,
"end": 460
} | class ____:
def minZeroArray(self, nums: list[int], queries: list[list[int]]) -> int:
line = [0] * (len(nums) + 1)
decrement = 0
k = 0
for i, num in enumerate(nums):
while decrement + line[i] < num:
if k == len(queries):
return -1
l, r, val = queries[k]
k += 1
if r < i:
continue
line[max(l, i)] += val
line[r + 1] -= val
decrement += line[i]
return k
| Solution |
python | google__jax | jax/_src/pallas/core.py | {
"start": 20976,
"end": 21234
} | class ____(Protocol):
"""Transforms a memory reference on load or store."""
def undo(self, ref: TransformedRef) -> TransformedRef:
raise NotImplementedError("Abstract evaluation not implemented.")
@dataclasses.dataclass(frozen=True)
| MemoryRefTransform |
python | falconry__falcon | tests/test_after_hooks.py | {
"start": 4015,
"end": 4178
} | class ____(WrappedClassResource):
def on_head(self, req, resp):
# Test passing no extra args
super().on_head(req, resp)
| WrappedClassResourceChild |
python | coleifer__peewee | playhouse/psycopg3_ext.py | {
"start": 1623,
"end": 1952
} | class ____(_Psycopg3JsonLookupBase):
def __sql__(self, ctx):
return (ctx
.sql(self.node)
.literal('#>' if self._as_json else '#>>')
.sql(Value('{%s}' % ','.join(map(str, self.parts)))))
def cast_jsonb(node):
return NodeList((node, SQL('::jsonb')), glue='')
| JsonPath |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 13016,
"end": 14794
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: ApertusConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ApertusAttention(config=config, layer_idx=layer_idx)
self.mlp = ApertusMLP(config)
self.attention_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.feedforward_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.attention_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| ApertusDecoderLayer |
python | aimacode__aima-python | logic4e.py | {
"start": 43293,
"end": 52096
} | class ____(KB):
"""A knowledge base consisting of first-order definite clauses.
>>> kb0 = FolKB([expr('Farmer(Mac)'), expr('Rabbit(Pete)'),
... expr('(Rabbit(r) & Farmer(f)) ==> Hates(f, r)')])
>>> kb0.tell(expr('Rabbit(Flopsie)'))
>>> kb0.retract(expr('Rabbit(Pete)'))
>>> kb0.ask(expr('Hates(Mac, x)'))[x]
Flopsie
>>> kb0.ask(expr('Wife(Pete, x)'))
False
"""
def __init__(self, initial_clauses=None):
self.clauses = [] # inefficient: no indexing
if initial_clauses:
for clause in initial_clauses:
self.tell(clause)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
else:
raise Exception("Not a definite clause: {}".format(sentence))
def ask_generator(self, query):
return fol_bc_ask(self, query)
def retract(self, sentence):
self.clauses.remove(sentence)
def fetch_rules_for_goal(self, goal):
return self.clauses
# ______________________________________________________________________________
# 9.3 Forward Chaining
# 9.3.2 A simple forward-chaining algorithm
def fol_fc_ask(KB, alpha):
"""A simple forward-chaining algorithm. [Figure 9.3]"""
kb_consts = list({c for clause in KB.clauses for c in constant_symbols(clause)})
def enum_subst(p):
query_vars = list({v for clause in p for v in variables(clause)})
for assignment_list in itertools.product(kb_consts, repeat=len(query_vars)):
theta = {x: y for x, y in zip(query_vars, assignment_list)}
yield theta
# check if we can answer without new inferences
for q in KB.clauses:
phi = unify(q, alpha, {})
if phi is not None:
yield phi
while True:
new = []
for rule in KB.clauses:
p, q = parse_definite_clause(rule)
for theta in enum_subst(p):
if set(subst(theta, p)).issubset(set(KB.clauses)):
q_ = subst(theta, q)
if all([unify(x, q_, {}) is None for x in KB.clauses + new]):
new.append(q_)
phi = unify(q_, alpha, {})
if phi is not None:
yield phi
if not new:
break
for clause in new:
KB.tell(clause)
return None
def subst(s, x):
"""Substitute the substitution s into the expression x.
>>> subst({x: 42, y:0}, F(x) + y)
(F(42) + 0)
"""
if isinstance(x, list):
return [subst(s, xi) for xi in x]
elif isinstance(x, tuple):
return tuple([subst(s, xi) for xi in x])
elif not isinstance(x, Expr):
return x
elif is_var_symbol(x.op):
return s.get(x, x)
else:
return Expr(x.op, *[subst(s, arg) for arg in x.args])
def standardize_variables(sentence, dic=None):
"""Replace all the variables in sentence with new variables."""
if dic is None:
dic = {}
if not isinstance(sentence, Expr):
return sentence
elif is_var_symbol(sentence.op):
if sentence in dic:
return dic[sentence]
else:
v = Expr('v_{}'.format(next(standardize_variables.counter)))
dic[sentence] = v
return v
else:
return Expr(sentence.op,
*[standardize_variables(a, dic) for a in sentence.args])
standardize_variables.counter = itertools.count()
# __________________________________________________________________
# 9.4 Backward Chaining
def fol_bc_ask(KB, query):
"""A simple backward-chaining algorithm for first-order logic. [Figure 9.6]
KB should be an instance of FolKB, and query an atomic sentence."""
return fol_bc_or(KB, query, {})
def fol_bc_or(KB, goal, theta):
for rule in KB.fetch_rules_for_goal(goal):
lhs, rhs = parse_definite_clause(standardize_variables(rule))
for theta1 in fol_bc_and(KB, lhs, unify(rhs, goal, theta)):
yield theta1
def fol_bc_and(KB, goals, theta):
if theta is None:
pass
elif not goals:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in fol_bc_or(KB, subst(theta, first), theta):
for theta2 in fol_bc_and(KB, rest, theta1):
yield theta2
# ______________________________________________________________________________
# A simple KB that defines the relevant conditions of the Wumpus World as in Fig 7.4.
# See Sec. 7.4.3
wumpus_kb = PropKB()
P11, P12, P21, P22, P31, B11, B21 = expr('P11, P12, P21, P22, P31, B11, B21')
wumpus_kb.tell(~P11)
wumpus_kb.tell(B11 | '<=>' | (P12 | P21))
wumpus_kb.tell(B21 | '<=>' | (P11 | P22 | P31))
wumpus_kb.tell(~B11)
wumpus_kb.tell(B21)
test_kb = FolKB(
map(expr, ['Farmer(Mac)',
'Rabbit(Pete)',
'Mother(MrsMac, Mac)',
'Mother(MrsRabbit, Pete)',
'(Rabbit(r) & Farmer(f)) ==> Hates(f, r)',
'(Mother(m, c)) ==> Loves(m, c)',
'(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)',
'(Farmer(f)) ==> Human(f)',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'(Mother(m, h) & Human(h)) ==> Human(m)']))
crime_kb = FolKB(
map(expr, ['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)',
'Owns(Nono, M1)',
'Missile(M1)',
'(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)',
'Missile(x) ==> Weapon(x)',
'Enemy(x, America) ==> Hostile(x)',
'American(West)',
'Enemy(Nono, America)']))
# ______________________________________________________________________________
# Example application (not in the book).
# You can use the Expr class to do symbolic differentiation. This used to be
# a part of AI; now it is considered a separate field, Symbolic Algebra.
def diff(y, x):
"""Return the symbolic derivative, dy/dx, as an Expr.
However, you probably want to simplify the results with simp.
>>> diff(x * x, x)
((x * 1) + (x * 1))
"""
if y == x:
return 1
elif not y.args:
return 0
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+':
return diff(u, x) + diff(v, x)
elif op == '-' and len(y.args) == 1:
return -diff(u, x)
elif op == '-':
return diff(u, x) - diff(v, x)
elif op == '*':
return u * diff(v, x) + v * diff(u, x)
elif op == '/':
return (v * diff(u, x) - u * diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return (v * u ** (v - 1) * diff(u, x))
elif op == '**':
return (v * u ** (v - 1) * diff(u, x) +
u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log':
return diff(u, x) / u
else:
raise ValueError("Unknown op: {} in diff({}, {})".format(op, y, x))
def simp(x):
"""Simplify the expression x."""
if isnumber(x) or not x.args:
return x
args = list(map(simp, x.args))
u, op, v = args[0], x.op, args[-1]
if op == '+':
if v == 0:
return u
if u == 0:
return v
if u == v:
return 2 * u
if u == -v or v == -u:
return 0
elif op == '-' and len(args) == 1:
if u.op == '-' and len(u.args) == 1:
return u.args[0] # --y ==> y
elif op == '-':
if v == 0:
return u
if u == 0:
return -v
if u == v:
return 0
if u == -v or v == -u:
return 0
elif op == '*':
if u == 0 or v == 0:
return 0
if u == 1:
return v
if v == 1:
return u
if u == v:
return u ** 2
elif op == '/':
if u == 0:
return 0
if v == 0:
return Expr('Undefined')
if u == v:
return 1
if u == -v or v == -u:
return 0
elif op == '**':
if u == 0:
return 0
if v == 0:
return 1
if u == 1:
return 1
if v == 1:
return u
elif op == 'log':
if u == 1:
return 0
else:
raise ValueError("Unknown op: " + op)
# If we fall through to here, we can not simplify further
return Expr(op, *args)
def d(y, x):
"""Differentiate and then simplify.
>>> d(x * x - x, x)
((2 * x) - 1)
"""
return simp(diff(y, x))
| FolKB |
python | fluentpython__example-code-2e | 08-def-type-hints/birds/protocol/parrot.py | {
"start": 24,
"end": 199
} | class ____:
def honk(self, times: int) -> None: # <1>
print('Honk! ' * times * 2)
ze_carioca = Parrot()
alert(ze_carioca) # <2>
| Parrot |
python | falconry__falcon | examples/ws_tutorial/ws_tutorial/app.py | {
"start": 2537,
"end": 3665
} | class ____:
async def on_websocket(self, req: Request, ws: WebSocket):
while True:
try:
query = await ws.receive_text()
report = REPORTS.get(query, None)
logger.info('selected report: %s', report)
if report is None:
await ws.send_media({'error': 'report not found'})
continue
await ws.send_media({'report': report['title']})
except WebSocketDisconnected:
return
app.add_route('/hello', HelloWorldResource())
app.add_route('/echo', EchoWebSocketResource())
app.add_route('/reports', ReportsResource())
app.add_middleware(LoggerMiddleware())
app.add_middleware(AuthMiddleware(['/reports']))
# usually a web server, like Nginx or Caddy, should serve static assets, but
# for the purpose of this example we use falcon.
static_path = pathlib.Path(__file__).parent / 'static'
app.add_static_route('/', static_path, fallback_filename='index.html')
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8000) # pragma: no cover
| ReportsResource |
python | FactoryBoy__factory_boy | factory/fuzzy.py | {
"start": 768,
"end": 1136
} | class ____(BaseFuzzyAttribute):
"""Similar to LazyAttribute, but yields random values.
Attributes:
function (callable): function taking no parameters and returning a
random value.
"""
def __init__(self, fuzzer):
super().__init__()
self.fuzzer = fuzzer
def fuzz(self):
return self.fuzzer()
| FuzzyAttribute |
python | scikit-learn__scikit-learn | sklearn/model_selection/tests/test_successive_halving.py | {
"start": 1653,
"end": 29010
} | class ____(DummyClassifier):
def __init__(
self,
strategy="stratified",
random_state=None,
constant=None,
n_estimators=10,
fail_fit=False,
fail_predict=False,
a=0,
):
self.fail_fit = fail_fit
self.fail_predict = fail_predict
self.n_estimators = n_estimators
self.a = a
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def fit(self, X, y):
if self.fail_fit:
raise Exception("fitting failed")
return super().fit(X, y)
def predict(self, X):
if self.fail_predict:
raise Exception("predict failed")
return super().predict(X)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning")
@pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning")
@pytest.mark.filterwarnings("ignore:One or more of the:UserWarning")
@pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize("fail_at", ("fit", "predict"))
def test_nan_handling(HalvingSearch, fail_at):
"""Check the selection of the best scores in presence of failure represented by
NaN values."""
n_samples = 1_000
X, y = make_classification(n_samples=n_samples, random_state=0)
search = HalvingSearch(
SometimesFailClassifier(),
{f"fail_{fail_at}": [False, True], "a": range(3)},
resource="n_estimators",
max_resources=6,
min_resources=1,
factor=2,
)
search.fit(X, y)
# estimators that failed during fit/predict should always rank lower
# than ones where the fit/predict succeeded
assert not search.best_params_[f"fail_{fail_at}"]
scores = search.cv_results_["mean_test_score"]
ranks = search.cv_results_["rank_test_score"]
# some scores should be NaN
assert np.isnan(scores).any()
unique_nan_ranks = np.unique(ranks[np.isnan(scores)])
# all NaN scores should have the same rank
assert unique_nan_ranks.shape[0] == 1
# NaNs should have the lowest rank
assert (unique_nan_ranks[0] >= ranks).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"aggressive_elimination,"
"max_resources,"
"expected_n_iterations,"
"expected_n_required_iterations,"
"expected_n_possible_iterations,"
"expected_n_remaining_candidates,"
"expected_n_candidates,"
"expected_n_resources,"
),
[
# notice how it loops at the beginning
# also, the number of candidates evaluated at the last iteration is
# <= factor
(True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
# no aggressive elimination: we end up with less iterations, and
# the number of candidates at the last iter is > factor, which isn't
# ideal
(False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
# # When the amount of resource isn't limited, aggressive_elimination
# # has no effect. Here the default min_resources='exhaust' will take
# # over.
(True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
(False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
],
)
def test_aggressive_elimination(
Est,
aggressive_elimination,
max_resources,
expected_n_iterations,
expected_n_required_iterations,
expected_n_possible_iterations,
expected_n_remaining_candidates,
expected_n_candidates,
expected_n_resources,
):
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
if max_resources == "limited":
max_resources = 180
else:
max_resources = n_samples
sh = Est(
base_estimator,
param_grid,
aggressive_elimination=aggressive_elimination,
max_resources=max_resources,
factor=3,
)
sh.set_params(verbose=True) # just for test coverage
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
assert sh.n_candidates_ == expected_n_candidates
assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"min_resources,"
"max_resources,"
"expected_n_iterations,"
"expected_n_possible_iterations,"
"expected_n_resources,"
),
[
# with enough resources
("smallest", "auto", 2, 4, [20, 60]),
# with enough resources but min_resources set manually
(50, "auto", 2, 3, [50, 150]),
# without enough resources, only one iteration can be done
("smallest", 30, 1, 1, [20]),
# with exhaust: use as much resources as possible at the last iter
("exhaust", "auto", 2, 2, [333, 999]),
("exhaust", 1000, 2, 2, [333, 999]),
("exhaust", 999, 2, 2, [333, 999]),
("exhaust", 600, 2, 2, [200, 600]),
("exhaust", 599, 2, 2, [199, 597]),
("exhaust", 300, 2, 2, [100, 300]),
("exhaust", 60, 2, 2, [20, 60]),
("exhaust", 50, 1, 1, [20]),
("exhaust", 20, 1, 1, [20]),
],
)
def test_min_max_resources(
Est,
min_resources,
max_resources,
expected_n_iterations,
expected_n_possible_iterations,
expected_n_resources,
):
# Test the min_resources and max_resources parameters, and how they affect
# the number of resources used at each iteration
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": [1, 2, 3]}
base_estimator = FastClassifier()
sh = Est(
base_estimator,
param_grid,
factor=3,
min_resources=min_resources,
max_resources=max_resources,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=6) # same number as with the grid
sh.fit(X, y)
expected_n_required_iterations = 2 # given 6 combinations and factor = 3
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
if min_resources == "exhaust":
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
@pytest.mark.parametrize(
"max_resources, n_iterations, n_possible_iterations",
[
("auto", 5, 9), # all resources are used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_resources == min_resources, only one iteration is
# possible
],
)
def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_resources
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
factor = 2
sh = Est(
base_estimator,
param_grid,
cv=2,
factor=factor,
max_resources=max_resources,
min_resources=4,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_resource_parameter(Est):
# Test the resource parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3)
sh.fit(X, y)
assert set(sh.n_resources_) == set([1, 3, 9])
for r_i, params, param_c in zip(
sh.cv_results_["n_resources"],
sh.cv_results_["params"],
sh.cv_results_["param_c"],
):
assert r_i == params["c"] == param_c
with pytest.raises(
ValueError, match="Cannot use resource=1234 which is not supported "
):
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="1234", max_resources=10
)
sh.fit(X, y)
with pytest.raises(
ValueError,
match=(
"Cannot use parameter c as the resource since it is part "
"of the searched parameters."
),
):
param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]}
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="c", max_resources=10
)
sh.fit(X, y)
@pytest.mark.parametrize(
"max_resources, n_candidates, expected_n_candidates",
[
(512, "exhaust", 128), # generate exactly as much as needed
(32, "exhaust", 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
],
)
def test_random_search(max_resources, n_candidates, expected_n_candidates):
# Test random search and make sure the number of generated candidates is
# as expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": norm, "b": norm}
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(
base_estimator,
param_grid,
n_candidates=n_candidates,
cv=2,
max_resources=max_resources,
factor=2,
min_resources=4,
)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
if n_candidates == "exhaust":
# Make sure 'exhaust' makes the last iteration use as much resources as
# we can
assert sh.n_resources_[-1] == max_resources
@pytest.mark.parametrize(
"param_distributions, expected_n_candidates",
[
({"a": [1, 2]}, 2), # all lists, sample less than n_candidates
({"a": randint(1, 3)}, 10), # not all list, respect n_candidates
],
)
def test_random_search_discrete_distributions(
param_distributions, expected_n_candidates
):
# Make sure random search samples the appropriate number of candidates when
# we ask for more than what's possible. How many parameters are sampled
# depends whether the distributions are 'all lists' or not (see
# ParameterSampler for details). This is somewhat redundant with the checks
# in ParameterSampler but interaction bugs were discovered during
# development of SH
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"resource": "not_a_parameter"},
"Cannot use resource=not_a_parameter which is not supported",
),
(
{"resource": "a", "max_resources": 100},
"Cannot use parameter a as the resource since it is part of",
),
(
{"max_resources": "auto", "resource": "b"},
"resource can only be 'n_samples' when max_resources='auto'",
),
(
{"min_resources": 15, "max_resources": 14},
"min_resources_=15 is greater than max_resources_=14",
),
({"cv": KFold(shuffle=True)}, "must yield consistent folds"),
({"cv": ShuffleSplit()}, "must yield consistent folds"),
],
)
def test_input_errors(Est, params, expected_error_message):
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = Est(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"n_candidates": "exhaust", "min_resources": "exhaust"},
"cannot be both set to 'exhaust'",
),
],
)
def test_input_errors_randomized(params, expected_error_message):
# tests specific to HalvingRandomSearchCV
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"fraction, subsample_test, expected_train_size, expected_test_size",
[
(0.5, True, 40, 10),
(0.5, False, 40, 20),
(0.2, True, 16, 4),
(0.2, False, 16, 20),
],
)
def test_subsample_splitter_shapes(
fraction, subsample_test, expected_train_size, expected_test_size
):
# Make sure splits returned by SubsampleMetaSplitter are of appropriate
# size
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5),
fraction=fraction,
subsample_test=subsample_test,
random_state=None,
)
for train, test in cv.split(X, y):
assert train.shape[0] == expected_train_size
assert test.shape[0] == expected_test_size
if subsample_test:
assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
else:
assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
@pytest.mark.parametrize("subsample_test", (True, False))
def test_subsample_splitter_determinism(subsample_test):
# Make sure _SubsampleMetaSplitter is consistent across calls to split():
# - we're OK having training sets differ (they're always sampled with a
# different fraction anyway)
# - when we don't subsample the test set, we want it to be always the same.
# This check is the most important. This is ensured by the determinism
# of the base_cv.
# Note: we could force both train and test splits to be always the same if
# we drew an int seed in _SubsampleMetaSplitter.__init__
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None
)
folds_a = list(cv.split(X, y, groups=None))
folds_b = list(cv.split(X, y, groups=None))
for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
assert not np.all(train_a == train_b)
if subsample_test:
assert not np.all(test_a == test_b)
else:
assert np.all(test_a == test_b)
assert np.all(X[test_a] == X[test_b])
@pytest.mark.parametrize(
"k, itr, expected",
[
(1, 0, ["c"]),
(2, 0, ["a", "c"]),
(4, 0, ["d", "b", "a", "c"]),
(10, 0, ["d", "b", "a", "c"]),
(1, 1, ["e"]),
(2, 1, ["f", "e"]),
(10, 1, ["f", "e"]),
(1, 2, ["i"]),
(10, 2, ["g", "h", "i"]),
],
)
def test_top_k(k, itr, expected):
results = { # this isn't a 'real world' result dict
"iter": [0, 0, 0, 0, 1, 1, 2, 2, 2],
"mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9],
"params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
got = _top_k(results, k=k, itr=itr)
assert np.all(got == expected)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_cv_results(Est):
# test that the cv_results_ matches correctly the logic of the
# tournament: in particular that the candidates continued in each
# successive iteration are those that were best in the previous iteration
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
# generate random scores: we want to avoid ties, which would otherwise
# mess with the ordering and make testing harder
def scorer(est, X, y):
return rng.rand()
sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
# non-regression check for
# https://github.com/scikit-learn/scikit-learn/issues/19203
assert isinstance(sh.cv_results_["iter"], np.ndarray)
assert isinstance(sh.cv_results_["n_resources"], np.ndarray)
cv_results_df = pd.DataFrame(sh.cv_results_)
# just make sure we don't have ties
assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df)
cv_results_df["params_str"] = cv_results_df["params"].apply(str)
table = cv_results_df.pivot(
index="params_str", columns="iter", values="mean_test_score"
)
# table looks like something like this:
# iter 0 1 2 3 4 5
# params_str
# {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
# {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
# {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
# ...
# where a NaN indicates that the candidate wasn't evaluated at a given
# iteration, because it wasn't part of the top-K at some previous
# iteration. We here make sure that candidates that aren't in the top-k at
# any given iteration are indeed not evaluated at the subsequent
# iterations.
nan_mask = pd.isna(table)
n_iter = sh.n_iterations_
for it in range(n_iter - 1):
already_discarded_mask = nan_mask[it]
# make sure that if a candidate is already discarded, we don't evaluate
# it later
assert (
already_discarded_mask & nan_mask[it + 1] == already_discarded_mask
).all()
# make sure that the number of discarded candidate is correct
discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
kept_mask = ~already_discarded_mask & ~discarded_now_mask
assert kept_mask.sum() == sh.n_candidates_[it + 1]
# make sure that all discarded candidates have a lower score than the
# kept candidates
discarded_max_score = table[it].where(discarded_now_mask).max()
kept_min_score = table[it].where(kept_mask).min()
assert discarded_max_score < kept_min_score
# We now make sure that the best candidate is chosen only from the last
# iteration.
# We also make sure this is true even if there were higher scores in
# earlier rounds (this isn't generally the case, but worth ensuring it's
# possible).
last_iter = cv_results_df["iter"].max()
idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][
"mean_test_score"
].idxmax()
idx_best_all_iters = cv_results_df["mean_test_score"].idxmax()
assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"]
assert (
cv_results_df.iloc[idx_best_last_iter]["mean_test_score"]
< cv_results_df.iloc[idx_best_all_iters]["mean_test_score"]
)
assert (
cv_results_df.iloc[idx_best_last_iter]["params"]
!= cv_results_df.iloc[idx_best_all_iters]["params"]
)
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_base_estimator_inputs(Est):
# make sure that the base estimators are passed the correct parameters and
# number of samples at each iteration.
pd = pytest.importorskip("pandas")
passed_n_samples_fit = []
passed_n_samples_predict = []
passed_params = []
class FastClassifierBookKeeping(FastClassifier):
def fit(self, X, y):
passed_n_samples_fit.append(X.shape[0])
return super().fit(X, y)
def predict(self, X):
passed_n_samples_predict.append(X.shape[0])
return super().predict(X)
def set_params(self, **params):
passed_params.append(params)
return super().set_params(**params)
n_samples = 1024
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifierBookKeeping()
sh = Est(
base_estimator,
param_grid,
factor=2,
cv=n_splits,
return_train_score=False,
refit=False,
)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
passed_n_samples = [
x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)
]
# Lists are of length n_splits * n_iter * n_candidates_at_i.
# Each chunk of size n_splits corresponds to the n_splits folds for the
# same candidate at the same iteration, so they contain equal values. We
# subsample such that the lists are of length n_iter * n_candidates_at_it
passed_n_samples = passed_n_samples[::n_splits]
passed_params = passed_params[::n_splits]
cv_results_df = pd.DataFrame(sh.cv_results_)
assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
uniques, counts = np.unique(passed_n_samples, return_counts=True)
assert (sh.n_resources_ == uniques).all()
assert (sh.n_candidates_ == counts).all()
assert (cv_results_df["params"] == passed_params).all()
assert (cv_results_df["n_resources"] == passed_n_samples).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_groups_support(Est):
# Check if ValueError (when groups is None) propagates to
# HalvingGridSearchCV and HalvingRandomSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 50)
clf = LinearSVC(random_state=0)
grid = {"C": [1]}
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(n_splits=3),
GroupShuffleSplit(random_state=0),
]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = Est(clf, grid, cv=cv, random_state=0)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
for cv in non_group_cvs:
gs = Est(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
"""Check that we raise an error if the minimum resources is set to 0."""
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources="smallest")
err_msg = "min_resources_=0: you might have passed an empty dataset X."
with pytest.raises(ValueError, match=err_msg):
search.fit(X, [])
@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
"""Check the selection strategy of the halving search."""
results = { # this isn't a 'real world' result dict
"iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
"mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
"params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]),
}
# we expect the index of 'i'
best_index = SearchCV._select_best_index(None, None, results)
assert best_index == 8
def test_halving_random_search_list_of_dicts():
"""Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution`
being a list of dictionary.
"""
X, y = make_classification(n_samples=150, n_features=4, random_state=42)
params = [
{"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)},
{"kernel": ["poly"], "degree": [2, 3]},
]
param_keys = (
"param_C",
"param_degree",
"param_gamma",
"param_kernel",
)
score_keys = (
"mean_test_score",
"mean_train_score",
"rank_test_score",
"split0_test_score",
"split1_test_score",
"split2_test_score",
"split0_train_score",
"split1_train_score",
"split2_train_score",
"std_test_score",
"std_train_score",
"mean_fit_time",
"std_fit_time",
"mean_score_time",
"std_score_time",
)
extra_keys = ("n_resources", "iter")
search = HalvingRandomSearchCV(
SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0
)
search.fit(X, y)
n_candidates = sum(search.n_candidates_)
cv_results = search.cv_results_
# Check results structure
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys)
expected_cv_results_kinds = {
"param_C": "f",
"param_degree": "i",
"param_gamma": "f",
"param_kernel": "O",
}
check_cv_results_array_types(
search, param_keys, score_keys, expected_cv_results_kinds
)
assert all(
(
cv_results["param_C"].mask[i]
and cv_results["param_gamma"].mask[i]
and not cv_results["param_degree"].mask[i]
)
for i in range(n_candidates)
if cv_results["param_kernel"][i] == "poly"
)
assert all(
(
not cv_results["param_C"].mask[i]
and not cv_results["param_gamma"].mask[i]
and cv_results["param_degree"].mask[i]
)
for i in range(n_candidates)
if cv_results["param_kernel"][i] == "rbf"
)
| SometimesFailClassifier |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 76186,
"end": 80186
} | class ____(GoogleCloudBaseOperator):
"""
Lists DlpJobs that match the specified filter in the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListDLPJobsOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param job_type: (Optional) The type of job.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobsListLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
results_filter: str | None = None,
page_size: int | None = None,
job_type: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.results_filter = results_filter
self.page_size = page_size
self.job_type = job_type
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
jobs = hook.list_dlp_jobs(
project_id=self.project_id,
results_filter=self.results_filter,
page_size=self.page_size,
job_type=self.job_type,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobsListLink.persist(
context=context,
project_id=project_id,
)
# the DlpJob.to_dict does not have the right type defined as possible to pass in constructor
return [DlpJob.to_dict(job) for job in jobs]
| CloudDLPListDLPJobsOperator |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 31604,
"end": 31843
} | class ____(JsProxy, Generic[P, T]):
"""A JavaScript callable
A JavaScript object is treated as a callable if `typeof x` returns
`"function"`.
"""
_js_type_flags = ["IS_CALLABLE"]
__call__: Callable[P, T]
| JsCallable |
python | apache__airflow | dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py | {
"start": 3660,
"end": 4333
} | class ____(Enum):
DOCUMENTATION = "d"
BUGFIX = "b"
FEATURE = "f"
BREAKING_CHANGE = "x"
SKIP = "s"
MISC = "m"
MIN_AIRFLOW_VERSION_BUMP = "v"
# defines the precedence order for provider version bumps
# BREAKING_CHANGE > FEATURE > MIN_AIRFLOW_VERSION_BUMP > BUGFIX > MISC > DOCUMENTATION > SKIP
# When MIN_AIRFLOW_VERSION_BUMP is provided, it means that the bump is at least feature
precedence_order = {
TypeOfChange.SKIP: 0,
TypeOfChange.DOCUMENTATION: 1,
TypeOfChange.MISC: 2,
TypeOfChange.BUGFIX: 3,
TypeOfChange.MIN_AIRFLOW_VERSION_BUMP: 3.5,
TypeOfChange.FEATURE: 4,
TypeOfChange.BREAKING_CHANGE: 5,
}
| TypeOfChange |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 1559,
"end": 1997
} | class ____(ArrowBatchCollateFn):
"""Collate function that returns id and value as a tuple of tensors."""
def __call__(self, batch: pa.Table) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return id and value as a tuple of tensors."""
assert isinstance(batch, pa.Table)
tensor_dict = arrow_batch_to_tensors(batch, combine_chunks=True)
return tensor_dict["id"], tensor_dict["value"]
| TupleArrowBatchCollateFn |
python | crytic__slither | slither/slithir/operations/phi_callback.py | {
"start": 424,
"end": 1575
} | class ____(Phi):
def __init__(
self,
left_variable: StateIRVariable,
nodes: Set["Node"],
call_ir: Union[InternalCall, HighLevelCall],
rvalue: StateIRVariable,
) -> None:
assert is_valid_lvalue(left_variable)
assert isinstance(nodes, set)
super().__init__(left_variable, nodes)
self._call_ir = call_ir
self._rvalues = [rvalue]
self._rvalue_no_callback = rvalue
@property
def callee_ir(self) -> Union[InternalCall, HighLevelCall]:
return self._call_ir
@property
def read(self) -> List[StateIRVariable]:
return self.rvalues
@property
def rvalues(self):
return self._rvalues
@rvalues.setter
def rvalues(self, vals):
self._rvalues = vals
@property
def rvalue_no_callback(self):
"""
rvalue if callback are not considered
"""
return self._rvalue_no_callback
@property
def nodes(self):
return self._nodes
def __str__(self):
return f"{self.lvalue}({self.lvalue.type}) := \u03D5({[v.ssa_name for v in self._rvalues]})"
| PhiCallback |
python | getsentry__sentry | src/sentry/api/endpoints/organization_api_key_details.py | {
"start": 752,
"end": 4060
} | class ____(ControlSiloOrganizationEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationAdminPermission,)
def get(self, request: Request, organization_context, organization, api_key_id) -> Response:
"""
Retrieves API Key details
`````````````````````````
:pparam string organization_id_or_slug: the id or slug of the organization the
team belongs to.
:pparam string api_key_id: the ID of the api key to delete
:auth: required
"""
try:
api_key = ApiKey.objects.get(id=api_key_id, organization_id=organization.id)
except ApiKey.DoesNotExist:
raise ResourceDoesNotExist
return Response(serialize(api_key, request.user))
def put(self, request: Request, organization_context, organization, api_key_id) -> Response:
"""
Update an API Key
`````````````````
:pparam string organization_id_or_slug: the id or slug of the organization the
team belongs to.
:pparam string api_key_id: the ID of the api key to delete
:param string label: the new label for the api key
:param array scope_list: an array of scopes available for api key
:param string allowed_origins: list of allowed origins
:auth: required
"""
try:
api_key = ApiKey.objects.get(id=api_key_id, organization_id=organization.id)
except ApiKey.DoesNotExist:
raise ResourceDoesNotExist
serializer = ApiKeySerializer(api_key, data=request.data, partial=True)
if serializer.is_valid():
api_key = serializer.save()
self.create_audit_entry(
request=request,
organization=organization,
target_object=api_key_id,
event=audit_log.get_event_id("APIKEY_EDIT"),
data=api_key.get_audit_log_data(),
)
return Response(serialize(api_key, request.user))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request: Request, organization_context, organization, api_key_id) -> Response:
"""
Deletes an API Key
``````````````````
:pparam string organization_id_or_slug: the id or slug of the organization the
team belongs to.
:pparam string api_key_id: the ID of the api key to delete
:auth: required
"""
try:
api_key = ApiKey.objects.get(id=api_key_id, organization_id=organization.id)
except ApiKey.DoesNotExist:
raise ResourceDoesNotExist
audit_data = api_key.get_audit_log_data()
api_key.delete()
self.create_audit_entry(
request,
organization=organization,
target_object=api_key.id,
event=audit_log.get_event_id("APIKEY_REMOVE"),
data=audit_data,
)
return Response(status=status.HTTP_204_NO_CONTENT)
| OrganizationApiKeyDetailsEndpoint |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 3736,
"end": 7691
} | class ____(fixtures.TestBase):
@testing.combinations(True, False, argnames="pickleit")
def test_pickle_parent_multi_attrs(self, registry, connection, pickleit):
"""test #8133"""
local_foo = Table(
"lf",
registry.metadata,
Column("id", Integer, primary_key=True),
Column("j1", MutableDict.as_mutable(PickleType)),
Column("j2", MutableDict.as_mutable(PickleType)),
Column("j3", MutableDict.as_mutable(PickleType)),
Column("j4", MutableDict.as_mutable(PickleType)),
)
registry.map_imperatively(Foo2, local_foo)
registry.metadata.create_all(connection)
with Session(connection) as sess:
data = dict(
j1={"a": 1},
j2={"b": 2},
j3={"c": 3},
j4={"d": 4},
)
lf = Foo2(**data)
sess.add(lf)
sess.commit()
all_attrs = {"j1", "j2", "j3", "j4"}
for attr in all_attrs:
for loads, dumps in picklers():
with Session(connection) as sess:
f1 = sess.scalars(select(Foo2)).first()
if pickleit:
f2 = loads(dumps(f1))
else:
f2 = f1
existing_dict = getattr(f2, attr)
existing_dict["q"] = "c"
eq_(
inspect(f2).attrs[attr].history,
([existing_dict], (), ()),
)
for other_attr in all_attrs.difference([attr]):
a = inspect(f2).attrs[other_attr].history
b = ((), [data[other_attr]], ())
eq_(a, b)
@testing.combinations("key_present", "key_non_present", argnames="present")
@testing.combinations(
("transient", True),
("detached", True),
("detached", False),
argnames="merge_subject, load",
)
@testing.requires.json_type
def test_session_merge(
self, decl_base, connection, present, load, merge_subject
):
"""test #8446"""
class Thing(decl_base):
__tablename__ = "thing"
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(JSON))
decl_base.metadata.create_all(connection)
with Session(connection) as sess:
sess.add(Thing(id=1, data={"foo": "bar"}))
sess.commit()
if merge_subject == "transient":
t1_to_merge = Thing(id=1, data={"foo": "bar"})
elif merge_subject == "detached":
with Session(connection) as sess:
t1_to_merge = sess.get(Thing, 1)
with Session(connection) as sess:
already_present = None
if present == "key_present":
already_present = sess.get(Thing, 1)
t1_merged = sess.merge(t1_to_merge, load=load)
t1_merged.data["foo"] = "bat"
if present == "key_present":
is_(t1_merged, already_present)
is_true(inspect(t1_merged).attrs.data.history.added)
def test_no_duplicate_reg_w_inheritance(self, decl_base):
"""test #9676"""
class A(decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
json: Mapped[Dict[str, Any]] = mapped_column(
MutableDict.as_mutable(JSON())
)
class B(A):
pass
class C(B):
pass
decl_base.registry.configure()
# the event hook itself doesnt do anything for repeated calls
# already, so there's really nothing else to assert other than there's
# only one "set" event listener
eq_(len(A.json.dispatch.set), 1)
eq_(len(B.json.dispatch.set), 1)
eq_(len(C.json.dispatch.set), 1)
| MiscTest |
python | sphinx-doc__sphinx | sphinx/builders/epub3.py | {
"start": 698,
"end": 1711
} | class ____(NamedTuple):
text: str
refuri: str
children: list[NavPoint]
# writing modes
PAGE_PROGRESSION_DIRECTIONS = {
'horizontal': 'ltr',
'vertical': 'rtl',
}
IBOOK_SCROLL_AXIS = {
'horizontal': 'vertical',
'vertical': 'horizontal',
}
THEME_WRITING_MODES = {
'vertical': 'vertical-rl',
'horizontal': 'horizontal-tb',
}
DOCTYPE = """<!DOCTYPE html>"""
HTML_TAG = (
'<html xmlns="http://www.w3.org/1999/xhtml" '
'xmlns:epub="http://www.idpf.org/2007/ops">'
)
# https://www.w3.org/TR/REC-xml/#NT-Name
_xml_name_start_char = (
':|[A-Z]|_|[a-z]|[\u00c0-\u00d6]'
'|[\u00d8-\u00f6]|[\u00f8-\u02ff]|[\u0370-\u037d]'
'|[\u037f-\u1fff]|[\u200c-\u200d]|[\u2070-\u218f]'
'|[\u2c00-\u2fef]|[\u3001-\ud7ff]|[\uf900-\ufdcf]'
'|[\ufdf0-\ufffd]|[\U00010000-\U000effff]'
)
_xml_name_char = (
_xml_name_start_char + r'\-|\.|[0-9]|\u00b7|[\u0300-\u036f]|[\u203f-\u2040]'
)
_XML_NAME_PATTERN = re.compile(f'({_xml_name_start_char})({_xml_name_char})*')
| NavPoint |
python | django__django | tests/indexes/tests.py | {
"start": 5350,
"end": 12691
} | class ____(TransactionTestCase):
available_apps = ["indexes"]
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = '%s'
"""
def test_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = [
str(statement)
for statement in connection.schema_editor()._model_indexes_sql(
IndexedArticle
)
]
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
def test_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
def test_ops_class(self):
index = Index(
name="test_ops_class",
fields=["headline"],
opclasses=["varchar_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % "test_ops_class")
self.assertEqual(
cursor.fetchall(), [("varchar_pattern_ops", "test_ops_class")]
)
def test_ops_class_multiple_columns(self):
index = Index(
name="test_ops_class_multiple",
fields=["headline", "body"],
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % "test_ops_class_multiple")
expected_ops_classes = (
("varchar_pattern_ops", "test_ops_class_multiple"),
("text_pattern_ops", "test_ops_class_multiple"),
)
self.assertCountEqual(cursor.fetchall(), expected_ops_classes)
def test_ops_class_partial(self):
index = Index(
name="test_ops_class_partial",
fields=["body"],
opclasses=["text_pattern_ops"],
condition=Q(headline__contains="China"),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % "test_ops_class_partial")
self.assertCountEqual(
cursor.fetchall(), [("text_pattern_ops", "test_ops_class_partial")]
)
def test_ops_class_partial_tablespace(self):
indexname = "test_ops_class_tblspace"
index = Index(
name=indexname,
fields=["body"],
opclasses=["text_pattern_ops"],
condition=Q(headline__contains="China"),
db_tablespace="pg_default",
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
self.assertIn(
'TABLESPACE "pg_default" ',
str(index.create_sql(IndexedArticle2, editor)),
)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)])
def test_ops_class_descending(self):
indexname = "test_ops_class_ordered"
index = Index(
name=indexname,
fields=["-body"],
opclasses=["text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)])
def test_ops_class_descending_partial(self):
indexname = "test_ops_class_ordered_partial"
index = Index(
name=indexname,
fields=["-body"],
opclasses=["text_pattern_ops"],
condition=Q(headline__contains="China"),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", indexname)])
@skipUnlessDBFeature("supports_covering_indexes")
def test_ops_class_include(self):
index_name = "test_ops_class_include"
index = Index(
name=index_name,
fields=["body"],
opclasses=["text_pattern_ops"],
include=["headline"],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % index_name)
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
@skipUnlessDBFeature("supports_covering_indexes")
def test_ops_class_include_tablespace(self):
index_name = "test_ops_class_include_tblspace"
index = Index(
name=index_name,
fields=["body"],
opclasses=["text_pattern_ops"],
include=["headline"],
db_tablespace="pg_default",
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
self.assertIn(
'TABLESPACE "pg_default"',
str(index.create_sql(IndexedArticle2, editor)),
)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % index_name)
self.assertCountEqual(cursor.fetchall(), [("text_pattern_ops", index_name)])
def test_ops_class_columns_lists_sql(self):
index = Index(
fields=["headline"],
name="whitespace_idx",
opclasses=["text_pattern_ops"],
)
with connection.schema_editor() as editor:
self.assertIn(
"(%s text_pattern_ops)" % editor.quote_name("headline"),
str(index.create_sql(Article, editor)),
)
def test_ops_class_descending_columns_list_sql(self):
index = Index(
fields=["-headline"],
name="whitespace_idx",
opclasses=["text_pattern_ops"],
)
with connection.schema_editor() as editor:
self.assertIn(
"(%s text_pattern_ops DESC)" % editor.quote_name("headline"),
str(index.create_sql(Article, editor)),
)
@skipUnless(connection.vendor == "mysql", "MySQL tests")
| SchemaIndexesPostgreSQLTests |
python | mlflow__mlflow | tests/telemetry/test_track.py | {
"start": 595,
"end": 6805
} | class ____(Event):
name = "test_event"
def test_record_usage_event(mock_requests, mock_telemetry_client: TelemetryClient):
@record_usage_event(TestEvent)
def succeed_func():
# sleep to make sure duration_ms > 0
time.sleep(0.01)
return True
@record_usage_event(TestEvent)
def fail_func():
time.sleep(0.01)
raise ValueError("test")
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
succeed_func()
with pytest.raises(ValueError, match="test"):
fail_func()
mock_telemetry_client.flush()
records = [
record["data"] for record in mock_requests if record["data"]["event_name"] == TestEvent.name
]
assert len(records) == 2
succeed_record = records[0]
assert succeed_record["schema_version"] == 2
assert succeed_record["event_name"] == TestEvent.name
assert succeed_record["status"] == Status.SUCCESS.value
assert succeed_record["params"] is None
assert succeed_record["duration_ms"] > 0
fail_record = records[1]
assert fail_record["schema_version"] == 2
assert fail_record["event_name"] == TestEvent.name
assert fail_record["status"] == Status.FAILURE.value
assert fail_record["params"] is None
assert fail_record["duration_ms"] > 0
telemetry_info = mock_telemetry_client.info
assert telemetry_info.items() <= succeed_record.items()
assert telemetry_info.items() <= fail_record.items()
def test_backend_store_info(tmp_path, mock_telemetry_client: TelemetryClient):
sqlite_uri = f"sqlite:///{tmp_path.joinpath('test.db')}"
with _use_tracking_uri(sqlite_uri):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "sqlite"
with _use_tracking_uri(tmp_path):
mock_telemetry_client._update_backend_store()
assert mock_telemetry_client.info["tracking_uri_scheme"] == "file"
@pytest.mark.parametrize(
("env_var", "value", "expected_result"),
[
(MLFLOW_DISABLE_TELEMETRY.name, "true", None),
(MLFLOW_DISABLE_TELEMETRY.name, "false", TelemetryClient),
("DO_NOT_TRACK", "true", None),
("DO_NOT_TRACK", "false", TelemetryClient),
],
)
def test_record_usage_event_respect_env_var(
monkeypatch, env_var, value, expected_result, bypass_env_check
):
monkeypatch.setenv(env_var, value)
# mimic the behavior of `import mlflow`
set_telemetry_client()
telemetry_client = get_telemetry_client()
if expected_result is None:
assert is_telemetry_disabled() is True
assert telemetry_client is None
else:
assert isinstance(telemetry_client, expected_result)
telemetry_client._clean_up()
def test_record_usage_event_update_env_var_after_import(
monkeypatch, mock_requests, mock_telemetry_client
):
assert isinstance(mock_telemetry_client, TelemetryClient)
@record_usage_event(TestEvent)
def test_func():
pass
with mock.patch(
"mlflow.telemetry.track.get_telemetry_client", return_value=mock_telemetry_client
):
test_func()
mock_telemetry_client.flush()
events = {record["data"]["event_name"] for record in mock_requests}
assert TestEvent.name in events
mock_requests.clear()
monkeypatch.setenv("MLFLOW_DISABLE_TELEMETRY", "true")
test_func()
# no new record should be added
assert len(mock_requests) == 0
@pytest.mark.no_mock_requests_get
def test_is_telemetry_disabled_for_event():
def mock_requests_get(*args, **kwargs):
time.sleep(1)
return mock.Mock(
status_code=200,
json=mock.Mock(
return_value={
"mlflow_version": VERSION,
"disable_telemetry": False,
"ingestion_url": "http://localhost:9999",
"rollout_percentage": 100,
"disable_events": ["test_event"],
}
),
)
with mock.patch("mlflow.telemetry.client.requests.get", side_effect=mock_requests_get):
client = TelemetryClient()
assert client is not None
client.activate()
assert client.config is None
with mock.patch("mlflow.telemetry.track.get_telemetry_client", return_value=client):
# do not skip when config is not fetched yet
assert _is_telemetry_disabled_for_event(TestEvent) is False
assert _is_telemetry_disabled_for_event(TestEvent) is False
time.sleep(2)
assert client._is_config_fetched is True
assert client.config is not None
# event not in disable_events, do not skip
assert _is_telemetry_disabled_for_event(CreateLoggedModelEvent) is False
# event in disable_events, skip
assert _is_telemetry_disabled_for_event(TestEvent) is True
# clean up
client._clean_up()
# test telemetry disabled after config is fetched
def mock_requests_get(*args, **kwargs):
time.sleep(1)
return mock.Mock(status_code=403)
with mock.patch("mlflow.telemetry.client.requests.get", side_effect=mock_requests_get):
client = TelemetryClient()
assert client is not None
client.activate()
assert client.config is None
with (
mock.patch("mlflow.telemetry.track.get_telemetry_client", return_value=client),
mock.patch(
"mlflow.telemetry.client._set_telemetry_client"
) as mock_set_telemetry_client,
):
# do not skip when config is not fetched yet
assert _is_telemetry_disabled_for_event(CreateLoggedModelEvent) is False
assert _is_telemetry_disabled_for_event(TestEvent) is False
time.sleep(2)
assert client._is_config_fetched is True
assert client.config is None
# global telemetry client is set to None when telemetry is disabled
mock_set_telemetry_client.assert_called_once_with(None)
# clean up
client._clean_up()
| TestEvent |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_build_signature.py | {
"start": 2787,
"end": 3203
} | class ____:
__annotations__ = get_type_hints(use_annotations)
__signature__ = signature(use_bad_signature)
def __init__(self, **kwargs):
assert set(kwargs) == {"testX"}
assert isinstance(kwargs["testX"], float)
@given(st.builds(ModelWithBadAliasSignature))
def test_build_with_non_types_in_signature(val):
assert isinstance(val, ModelWithBadAliasSignature)
| ModelWithBadAliasSignature |
python | ray-project__ray | python/ray/llm/_internal/serve/core/ingress/builder.py | {
"start": 777,
"end": 1751
} | class ____(BaseModelExtended):
ingress_cls: Union[str, Type[OpenAiIngress]] = Field(
default=OpenAiIngress,
description="The class name of the ingress to use. It can be in form of `module_name.class_name` or `module_name:class_name` or the class itself. The class constructor should take the following arguments: `(llm_deployments: List[DeploymentHandle], **extra_kwargs)` where `llm_deployments` is a list of DeploymentHandle objects from `LLMServer` deployments.",
)
ingress_extra_kwargs: Optional[dict] = Field(
default_factory=dict,
description="""The kwargs to bind to the ingress deployment. This will be passed to the ingress class constructor.""",
)
@field_validator("ingress_cls")
@classmethod
def validate_class(
cls, value: Union[str, Type[OpenAiIngress]]
) -> Type[OpenAiIngress]:
if isinstance(value, str):
return load_class(value)
return value
| IngressClsConfig |
python | sqlalchemy__sqlalchemy | test/orm/test_cycles.py | {
"start": 43900,
"end": 46472
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column(
"child_id",
Integer,
ForeignKey("child.id", name="c1"),
nullable=True,
),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50), nullable=False),
Column("child_id", Integer, ForeignKey("child.id")),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=True
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
def __init__(self, name=""):
self.name = name
class Child(cls.Basic):
def __init__(self, name=""):
self.name = name
def test_one(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
self.mapper_registry.map_imperatively(
Parent,
parent,
properties={
"children": relationship(
Child, primaryjoin=parent.c.id == child.c.parent_id
),
"child": relationship(
Child,
primaryjoin=parent.c.child_id == child.c.id,
post_update=True,
),
},
)
self.mapper_registry.map_imperatively(
Child,
child,
properties={"parent": relationship(Child, remote_side=child.c.id)},
)
session = fixture_session()
p1 = Parent("p1")
c1 = Child("c1")
c2 = Child("c2")
p1.children = [c1, c2]
c2.parent = c1
p1.child = c2
session.add_all([p1, c1, c2])
session.flush()
p2 = Parent("p2")
c3 = Child("c3")
p2.children = [c3]
p2.child = c3
session.add(p2)
session.delete(c2)
p1.children.remove(c2)
p1.child = None
session.flush()
p2.child = None
session.flush()
| SelfReferentialPostUpdateTest3 |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/engine.py | {
"start": 7553,
"end": 8569
} | class ____(TypedDict):
status: str
runtime: float
drawtime: float
gctime: float
events: list[str]
PhaseStatistics = TypedDict(
"PhaseStatistics",
{
"duration-seconds": float,
"test-cases": list[CallStats],
"distinct-failures": int,
"shrinks-successful": int,
},
)
StatisticsDict = TypedDict(
"StatisticsDict",
{
"generate-phase": NotRequired[PhaseStatistics],
"reuse-phase": NotRequired[PhaseStatistics],
"shrink-phase": NotRequired[PhaseStatistics],
"stopped-because": NotRequired[str],
"targets": NotRequired[dict[str, float]],
"nodeid": NotRequired[str],
},
)
def choice_count(choices: Sequence[ChoiceT | ChoiceTemplate]) -> int | None:
count = 0
for choice in choices:
if isinstance(choice, ChoiceTemplate):
if choice.count is None:
return None
count += choice.count
else:
count += 1
return count
| CallStats |
python | django__django | tests/admin_views/admin.py | {
"start": 13470,
"end": 13590
} | class ____(admin.ModelAdmin):
list_display = ("id", "collector", "order")
list_editable = ("order",)
| CategoryAdmin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 364687,
"end": 365357
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdatePullRequestBranch"""
__schema__ = github_schema
__field_names__ = ("pull_request_id", "expected_head_oid", "client_mutation_id")
pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId")
"""The Node ID of the pull request."""
expected_head_oid = sgqlc.types.Field(GitObjectID, graphql_name="expectedHeadOid")
"""The head ref oid for the upstream branch."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdatePullRequestBranchInput |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_ecs.py | {
"start": 3735,
"end": 4046
} | class ____:
@pytest.fixture(autouse=True)
def _setup_test_cases(self, monkeypatch):
self.client = boto3.client("ecs", region_name="eu-west-3")
monkeypatch.setattr(EcsHook, "conn", self.client)
monkeypatch.setenv("AIRFLOW_CONN_AWS_TEST_CONN", '{"conn_type": "aws"}')
| EcsBaseTestCase |
python | numpy__numpy | numpy/ma/extras.py | {
"start": 54231,
"end": 55119
} | class ____(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
__slots__ = ()
concatenate = staticmethod(concatenate)
@classmethod
def makemat(cls, arr):
# There used to be a view as np.matrix here, but we may eventually
# deprecate that class. In preparation, we use the unmasked version
# to construct the matrix (with copy=False for backwards compatibility
# with the .view)
data = super().makemat(arr.data, copy=False)
return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super().__getitem__(key)
| MAxisConcatenator |
python | mlflow__mlflow | mlflow/utils/time.py | {
"start": 538,
"end": 1260
} | class ____:
"""
Measures elapsed time.
.. code-block:: python
from mlflow.utils.time import Timer
with Timer() as t:
...
print(f"Elapsed time: {t:.2f} seconds")
"""
def __init__(self):
self.elapsed = 0.0
def __enter__(self):
self.elapsed = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.elapsed = time.perf_counter() - self.elapsed
def __format__(self, format_spec: str) -> str:
return self.elapsed.__format__(format_spec)
def __repr__(self) -> str:
return self.elapsed.__repr__()
def __str__(self) -> str:
return self.elapsed.__str__()
| Timer |
python | pytorch__pytorch | torch/_dynamo/variables/iter.py | {
"start": 13914,
"end": 18109
} | class ____(IteratorVariable):
"""
Represents zip(*iterables)
"""
_nonvar_fields = {
"index",
"strict",
*IteratorVariable._nonvar_fields,
}
def __init__(
self,
iterables: list[VariableTracker],
strict: bool = False,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
assert isinstance(iterables, list)
# can be list[Variable] or VariableTracker (with next_variable implemented)
self.iterables = iterables
self.index = 0
self.strict = strict
def python_type(self) -> type[zip]: # type: ignore[type-arg]
return zip
def has_unpack_var_sequence(self, tx: "InstructionTranslator") -> bool:
return all(
isinstance(it, list) or it.has_unpack_var_sequence(tx)
for it in self.iterables
)
def unpack_var_sequence(
self, tx: "InstructionTranslator"
) -> list["VariableTracker"]:
assert self.has_unpack_var_sequence(tx)
iterables = []
for it in self.iterables:
if isinstance(it, list):
iterables.append(it[self.index :])
else:
iterables.append(it.unpack_var_sequence(tx))
kwargs = {"strict": self.strict} if self.strict else {}
zipped = zip(*iterables, **kwargs)
return [variables.TupleVariable(list(var)) for var in zipped]
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
assert self.is_mutable()
if len(self.iterables) == 0:
raise_observed_exception(StopIteration, tx)
old_index = self.index
args = []
def get_item(
it: Union[list[VariableTracker], VariableTracker],
) -> VariableTracker:
if isinstance(it, list):
if old_index >= len(it):
raise_observed_exception(StopIteration, tx)
return it[old_index]
else:
return it.next_variable(tx)
idx: int | None = None
try:
for idx, it in enumerate(self.iterables): # noqa:B007
args.append(get_item(it))
except ObservedUserStopIteration:
if self.strict:
if idx == 0:
# all other iterables should be exhausted
for it in self.iterables:
try:
get_item(it)
except ObservedUserStopIteration:
handle_observed_exception(tx)
continue
# no ObservedUserStopIteration - fall through to UserError
break
else:
# all iterables exhausted, raise original error
raise
handle_observed_exception(tx)
raise UserError(
ValueError, # type: ignore[arg-type]
"zip() has one argument of len differing from others",
) from None
raise
tx.output.side_effects.mutation(self)
self.index += 1
return variables.TupleVariable(args)
def reconstruct_items(self, codegen: "PyCodegen") -> None:
for it in self.iterables:
if isinstance(it, list):
remaining_items = it[self.index :]
codegen.foreach(remaining_items)
codegen.append_output(create_build_tuple(len(remaining_items)))
else:
codegen(it)
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from("builtins", "zip"), call_function_ex=True
)
self.reconstruct_items(codegen)
codegen.append_output(create_build_tuple(len(self.iterables)))
codegen.extend_output(
[
codegen.create_load_const("strict"),
codegen.create_load_const(self.strict),
create_instruction("BUILD_MAP", arg=1),
*create_call_function_ex(True, False),
]
)
| ZipVariable |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py | {
"start": 1817,
"end": 4169
} | class ____(GoogleCloudBaseOperator):
"""The base class for operators that launch AutoML jobs on VertexAI."""
def __init__(
self,
*,
project_id: str,
region: str,
display_name: str,
labels: dict[str, str] | None = None,
parent_model: str | None = None,
is_default_version: bool | None = None,
model_version_aliases: list[str] | None = None,
model_version_description: str | None = None,
training_encryption_spec_key_name: str | None = None,
model_encryption_spec_key_name: str | None = None,
# RUN
training_fraction_split: float | None = None,
test_fraction_split: float | None = None,
model_display_name: str | None = None,
model_labels: dict[str, str] | None = None,
sync: bool = True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.display_name = display_name
self.labels = labels
self.parent_model = parent_model
self.is_default_version = is_default_version
self.model_version_aliases = model_version_aliases
self.model_version_description = model_version_description
self.training_encryption_spec_key_name = training_encryption_spec_key_name
self.model_encryption_spec_key_name = model_encryption_spec_key_name
# START Run param
self.training_fraction_split = training_fraction_split
self.test_fraction_split = test_fraction_split
self.model_display_name = model_display_name
self.model_labels = model_labels
self.sync = sync
# END Run param
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.hook: AutoMLHook | None = None
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"project_id": self.project_id,
}
def on_kill(self) -> None:
"""Act as a callback called when the operator is killed; cancel any running job."""
if self.hook:
self.hook.cancel_auto_ml_job()
| AutoMLTrainingJobBaseOperator |
python | Textualize__textual | tests/css/test_screen_css.py | {
"start": 1108,
"end": 8637
} | class ____(BaseApp):
"""Base app for testing screen CSS when switching a screen."""
def on_mount(self):
self.push_screen(BaseScreen())
def check_colors_before_screen_css(app: BaseApp):
assert app.screen.query_one("#app-css").styles.background == GREEN
assert app.screen.query_one("#screen-css-path").styles.background == GREEN
assert app.screen.query_one("#screen-css").styles.background == GREEN
def check_colors_after_screen_css(app: BaseApp):
assert app.screen.query_one("#app-css").styles.background == GREEN
assert app.screen.query_one("#screen-css-path").styles.background == BLUE
assert app.screen.query_one("#screen-css").styles.background == RED
async def test_screen_pushing_and_popping_does_not_reparse_css():
"""Check that pushing and popping the same screen doesn't trigger CSS reparses."""
class MyApp(BaseApp):
def key_p(self):
self.push_screen(ScreenWithCSS())
def key_o(self):
self.pop_screen()
counter = 0
def reparse_wrapper(reparse):
def _reparse(*args, **kwargs):
nonlocal counter
counter += 1
return reparse(*args, **kwargs)
return _reparse
app = MyApp()
app.stylesheet.reparse = reparse_wrapper(app.stylesheet.reparse)
async with app.run_test() as pilot:
await pilot.press("p")
await pilot.press("o")
await pilot.press("p")
await pilot.press("o")
await pilot.press("p")
await pilot.press("o")
await pilot.press("p")
await pilot.press("o")
assert counter == 1
async def test_screen_css_push_screen_instance():
"""Check that screen CSS is loaded and applied when pushing a screen instance."""
class MyApp(BaseApp):
def key_p(self):
self.push_screen(ScreenWithCSS())
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_push_screen_instance_by_name():
"""Check that screen CSS is loaded and applied when pushing a screen name that points to a screen instance."""
class MyApp(BaseApp):
SCREENS = {"screenwithcss": ScreenWithCSS}
def key_p(self):
self.push_screen("screenwithcss")
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_push_screen_type_by_name():
"""Check that screen CSS is loaded and applied when pushing a screen name that points to a screen class."""
class MyApp(BaseApp):
SCREENS = {"screenwithcss": ScreenWithCSS}
def key_p(self):
self.push_screen("screenwithcss")
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_screen_instance():
"""Check that screen CSS is loaded and applied when switching to a screen instance."""
class MyApp(SwitchBaseApp):
def key_p(self):
self.switch_screen(ScreenWithCSS())
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_screen_instance_by_name():
"""Check that screen CSS is loaded and applied when switching a screen name that points to a screen instance."""
class MyApp(SwitchBaseApp):
SCREENS = {"screenwithcss": ScreenWithCSS}
def key_p(self):
self.switch_screen("screenwithcss")
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_screen_type_by_name():
"""Check that screen CSS is loaded and applied when switching a screen name that points to a screen class."""
class MyApp(SwitchBaseApp):
SCREENS = {"screenwithcss": ScreenWithCSS}
async def key_p(self):
self.switch_screen("screenwithcss")
def key_o(self):
self.pop_screen()
app = MyApp()
async with app.run_test() as pilot:
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_mode_screen_instance():
"""Check that screen CSS is loaded and applied when switching to a mode with a screen instance."""
class MyApp(BaseApp):
MODES = {
"base": BaseScreen,
"mode": ScreenWithCSS,
}
def key_p(self):
self.switch_mode("mode")
def key_o(self):
self.switch_mode("base")
app = MyApp()
async with app.run_test() as pilot:
await pilot.press("o")
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_mode_screen_instance_by_name():
"""Check that screen CSS is loaded and applied when switching to a mode with a screen instance name."""
class MyApp(BaseApp):
SCREENS = {
"screenwithcss": ScreenWithCSS,
}
MODES = {
"base": BaseScreen,
"mode": "screenwithcss",
}
def key_p(self):
self.switch_mode("mode")
def key_o(self):
self.switch_mode("base")
app = MyApp()
async with app.run_test() as pilot:
await pilot.press("o")
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
async def test_screen_css_switch_mode_screen_type_by_name():
"""Check that screen CSS is loaded and applied when switching to a mode with a screen type name."""
class MyApp(BaseApp):
SCREENS = {
"screenwithcss": ScreenWithCSS,
}
MODES = {
"base": BaseScreen,
"mode": "screenwithcss",
}
def key_p(self):
self.switch_mode("mode")
def key_o(self):
self.switch_mode("base")
app = MyApp()
async with app.run_test() as pilot:
await pilot.press("o")
check_colors_before_screen_css(app)
await pilot.press("p")
check_colors_after_screen_css(app)
await pilot.press("o")
check_colors_after_screen_css(app)
| SwitchBaseApp |
python | getsentry__sentry | tests/sentry/grouping/test_enhancer.py | {
"start": 29073,
"end": 33442
} | class ____(TestCase):
@dataclass
class DummyRustFrame:
contributes: bool | None
hint: str | None
@dataclass
class DummyRustStacktraceResult:
contributes: bool | None
hint: str | None
DummyRustExceptionData = dict[str, bytes | None]
DummyMatchFrame = dict[str, Any]
class MockRustEnhancements:
def __init__(
self,
frame_results: Sequence[tuple[bool, str | None]],
stacktrace_results: tuple[bool, str | None] = (True, None),
):
self.frame_results = frame_results
self.stacktrace_results = stacktrace_results
def assemble_stacktrace_component(
self,
_match_frames: list[AssembleStacktraceComponentTest.DummyMatchFrame],
_exception_data: AssembleStacktraceComponentTest.DummyRustExceptionData,
rust_frames: list[AssembleStacktraceComponentTest.DummyRustFrame],
) -> AssembleStacktraceComponentTest.DummyRustStacktraceResult:
# The real (rust) version of this function modifies the RustFrames in `rust_frames` in
# place, but that's not possible from python, so instead we replace the contents of the
# list with our own RustFrames
dummy_rust_frames = [
AssembleStacktraceComponentTest.DummyRustFrame(contributes, hint)
for contributes, hint in self.frame_results
]
rust_frames[:] = dummy_rust_frames
return AssembleStacktraceComponentTest.DummyRustStacktraceResult(
*self.stacktrace_results
)
def in_app_frame(self, contributes: bool, hint: str | None) -> FrameGroupingComponent:
return FrameGroupingComponent(values=[], in_app=True, contributes=contributes, hint=hint)
def system_frame(self, contributes: bool, hint: str | None) -> FrameGroupingComponent:
return FrameGroupingComponent(values=[], in_app=False, contributes=contributes, hint=hint)
def assert_frame_values_match_expected(
self,
stacktrace_component: StacktraceGroupingComponent,
expected_frame_results: Sequence[tuple[bool, str | None]],
) -> None:
num_frames = len(stacktrace_component.values)
assert len(expected_frame_results) == num_frames
for i, frame_component, (expected_contributes, expected_hint) in zip(
range(num_frames),
stacktrace_component.values,
expected_frame_results,
):
assert (
frame_component.contributes is expected_contributes
), f"frame {i} has incorrect `contributes` value. Expected {expected_contributes} but got {frame_component.contributes}."
assert (
frame_component.hint == expected_hint
), f"frame {i} has incorrect `hint` value. Expected '{expected_hint}' but got '{frame_component.hint}'."
def test_marks_system_frames_non_contributing_in_app_variant(self) -> None:
# For the app variant, out-of-app frames are automatically marked non-contributing when
# they're created. Thus the only way they could even _try_ to contribute is if they match
# an un-ignore rule.
incoming_frames = [{"in_app": False}]
frame_components = [self.system_frame(contributes=False, hint="non app frame")]
rust_frame_results = [(True, "un-ignored by stacktrace rule (...)")]
app_expected_frame_results = [(False, "non app frame")]
enhancements = EnhancementsConfig.from_rules_text("")
mock_rust_enhancements = self.MockRustEnhancements(
frame_results=rust_frame_results, stacktrace_results=(False, "some stacktrace hint")
)
with mock.patch.object(
enhancements, "contributes_rust_enhancements", mock_rust_enhancements
):
app_stacktrace_component = enhancements.assemble_stacktrace_component(
variant_name="app",
frame_components=frame_components,
frames=incoming_frames,
platform="javascript",
exception_data={},
)
self.assert_frame_values_match_expected(
app_stacktrace_component, expected_frame_results=app_expected_frame_results
)
| AssembleStacktraceComponentTest |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/action_log_probs.py | {
"start": 952,
"end": 4703
} | class ____(NamedTuple):
"""
A NamedTuple containing the tensor for continuous log probs and list of tensors for
discrete log probs of individual actions as well as all the log probs for an entire branch.
Utility functions provide numpy <=> tensor conversions to be used by the optimizers.
:param continuous_tensor: Torch tensor corresponding to log probs of continuous actions
:param discrete_list: List of Torch tensors each corresponding to log probs of the discrete actions that were
sampled.
:param all_discrete_list: List of Torch tensors each corresponding to all log probs of
a discrete action branch, even the discrete actions that were not sampled. all_discrete_list is a list of Tensors,
each Tensor corresponds to one discrete branch log probabilities.
"""
continuous_tensor: torch.Tensor
discrete_list: Optional[List[torch.Tensor]]
all_discrete_list: Optional[List[torch.Tensor]]
@property
def discrete_tensor(self):
"""
Returns the discrete log probs list as a stacked tensor
"""
return torch.stack(self.discrete_list, dim=-1)
@property
def all_discrete_tensor(self):
"""
Returns the discrete log probs of each branch as a tensor
"""
return torch.cat(self.all_discrete_list, dim=1)
def to_log_probs_tuple(self) -> LogProbsTuple:
"""
Returns a LogProbsTuple. Only adds if tensor is not None. Otherwise,
LogProbsTuple uses a default.
"""
log_probs_tuple = LogProbsTuple()
if self.continuous_tensor is not None:
continuous = ModelUtils.to_numpy(self.continuous_tensor)
log_probs_tuple.add_continuous(continuous)
if self.discrete_list is not None:
discrete = ModelUtils.to_numpy(self.discrete_tensor)
log_probs_tuple.add_discrete(discrete)
return log_probs_tuple
def _to_tensor_list(self) -> List[torch.Tensor]:
"""
Returns the tensors in the ActionLogProbs as a flat List of torch Tensors. This
is private and serves as a utility for self.flatten()
"""
tensor_list: List[torch.Tensor] = []
if self.continuous_tensor is not None:
tensor_list.append(self.continuous_tensor)
if self.discrete_list is not None:
tensor_list.append(self.discrete_tensor)
return tensor_list
def flatten(self) -> torch.Tensor:
"""
A utility method that returns all log probs in ActionLogProbs as a flattened tensor.
This is useful for algorithms like PPO which can treat all log probs in the same way.
"""
return torch.cat(self._to_tensor_list(), dim=1)
@staticmethod
def from_buffer(buff: AgentBuffer) -> "ActionLogProbs":
"""
A static method that accesses continuous and discrete log probs fields in an AgentBuffer
and constructs the corresponding ActionLogProbs from the retrieved np arrays.
"""
continuous: torch.Tensor = None
discrete: List[torch.Tensor] = None # type: ignore
if BufferKey.CONTINUOUS_LOG_PROBS in buff:
continuous = ModelUtils.list_to_tensor(buff[BufferKey.CONTINUOUS_LOG_PROBS])
if BufferKey.DISCRETE_LOG_PROBS in buff:
discrete_tensor = ModelUtils.list_to_tensor(
buff[BufferKey.DISCRETE_LOG_PROBS]
)
# This will keep discrete_list = None which enables flatten()
if discrete_tensor.shape[1] > 0:
discrete = [
discrete_tensor[..., i] for i in range(discrete_tensor.shape[-1])
]
return ActionLogProbs(continuous, discrete, None)
| ActionLogProbs |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 33914,
"end": 35836
} | class ____(BaseAsyncRealtimeConnectionResource):
async def create(
self,
*,
event_id: str | NotGiven = NOT_GIVEN,
response: response_create_event_param.Response | NotGiven = NOT_GIVEN,
) -> None:
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions`, and `temperature`. These fields will override the Session's
configuration for this Response only.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
)
)
async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | NotGiven = NOT_GIVEN) -> None:
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
)
)
| AsyncRealtimeResponseResource |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-aimon-rerank/llama_index/postprocessor/aimon_rerank/base.py | {
"start": 535,
"end": 4669
} | class ____(BaseNodePostprocessor):
model: str = Field(description="AIMon's reranking model name.")
top_n: int = Field(description="Top N nodes to return.")
task_definition: str = Field(
default="Determine the relevance of context documents with respect to the user query.",
description="The task definition for the AIMon reranker.",
)
_api_key: str = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "retrieval_relevance",
api_key: Optional[str] = None,
task_definition: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
self.task_definition = task_definition or (
"Determine the relevance of context documents with respect to the user query."
)
try:
api_key = api_key or os.environ["AIMON_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in AIMon API key or specify via AIMON_API_KEY environment variable"
)
try:
from aimon import Client
except ImportError:
raise ImportError(
"Cannot import AIMon package, please `pip install aimon`."
)
self._client = Client(auth_header=f"Bearer {api_key}")
@classmethod
def class_name(cls) -> str:
return "AIMonRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
# Extract text content from each node.
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes
]
# Build batches where the total number of words is <= MAX_WORDS_PER_BATCH.
batches = [] # List of (batch_texts, corresponding indices)
current_batch = [] # List of texts for the current batch.
current_indices = [] # Corresponding indices of texts in the original list.
current_word_count = 0
for i, text in enumerate(texts):
word_count = len(text.split())
if current_word_count + word_count <= MAX_WORDS_PER_BATCH:
current_batch.append(text)
current_indices.append(i)
current_word_count += word_count
else:
batches.append((current_batch, current_indices))
# Start a new batch with the current text.
current_batch = [text]
current_indices = [i]
current_word_count = word_count
if current_batch:
batches.append((current_batch, current_indices))
# Prepare a list to hold scores for each text in the original order.
all_scores = [None] * len(texts)
for batch_num, (batch_texts, indices) in enumerate(batches, start=1):
scores_batch = self._client.retrieval.rerank(
context_docs=batch_texts,
queries=[query_bundle.query_str],
task_definition=self.task_definition,
)
batch_scores = scores_batch[0]
for idx, score in zip(indices, batch_scores):
all_scores[idx] = score
normalized_scores = [score / 100 for score in all_scores]
# Attach scores to nodes.
scored_nodes = [
NodeWithScore(node=nodes[i].node, score=normalized_scores[i])
for i in range(len(nodes))
]
# Sort nodes by score in descending order and keep the top N.
scored_nodes.sort(key=lambda x: x.score, reverse=True)
new_nodes = scored_nodes[: self.top_n]
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
| AIMonRerank |
python | numpy__numpy | numpy/ma/core.py | {
"start": 28106,
"end": 30544
} | class ____(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
Parameters
----------
mufunc : callable
The function for which to define a masked version. Made available
as ``_MaskedUnaryOperation.f``.
fill : scalar, optional
Filling value, default is 0.
domain : class instance
Domain for the function. Should be one of the ``_Domain*``
classes. Default is None.
"""
def __init__(self, mufunc, fill=0, domain=None):
super().__init__(mufunc)
self.fill = fill
self.domain = domain
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
def __call__(self, a, *args, **kwargs):
"""
Execute the call behavior.
"""
d = getdata(a)
# Deal with domain
if self.domain is not None:
# Case 1.1. : Domained function
# nans at masked positions cause RuntimeWarnings, even though
# they are masked. To avoid this we suppress warnings.
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
# Make a mask
m = ~umath.isfinite(result)
m |= self.domain(d)
m |= getmask(a)
else:
# Case 1.2. : Function without a domain
# Get the result and the mask
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(d, *args, **kwargs)
m = getmask(a)
if not result.ndim:
# Case 2.1. : The result is scalarscalar
if m:
return masked
return result
if m is not nomask:
# Case 2.2. The result is an array
# We need to fill the invalid data back w/ the input Now,
# that's plain silly: in C, we would just skip the element and
# keep the original, but we do have to do it that way in Python
# In case result has a lower dtype than the inputs (as in
# equal)
try:
np.copyto(result, d, where=m)
except TypeError:
pass
# Transform to
masked_result = result.view(get_masked_subclass(a))
masked_result._mask = m
masked_result._update_from(a)
return masked_result
| _MaskedUnaryOperation |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/dataset.py | {
"start": 18827,
"end": 22385
} | class ____(GoogleCloudBaseOperator):
"""
Lists Datasets in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param order_by: A comma-separated list of fields to order by, sorted in ascending order. Use "desc"
after a field name for descending.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("region", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIDatasetListLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.filter = filter
self.page_size = page_size
self.page_token = page_token
self.read_mask = read_mask
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
results = hook.list_datasets(
project_id=self.project_id,
region=self.region,
filter=self.filter,
page_size=self.page_size,
page_token=self.page_token,
read_mask=self.read_mask,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIDatasetListLink.persist(context=context)
return [Dataset.to_dict(result) for result in results]
| ListDatasetsOperator |
python | pytorch__pytorch | torch/_dynamo/replay_record.py | {
"start": 1086,
"end": 1267
} | class ____:
name: str
is_torch: bool = False
value: object = None
@property
def __name__(self) -> str:
return self.name
@dataclasses.dataclass
| DummyModule |
python | huggingface__transformers | src/transformers/models/olmoe/modular_olmoe.py | {
"start": 7105,
"end": 10069
} | class ____(MixtralModel):
def __init__(self, config: OlmoeConfig):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[OlmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = OlmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = OlmoeRotaryEmbedding(config=config)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask( # diff with mixtral: no sliding
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
| OlmoeModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 958485,
"end": 958869
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("SavedReply", graphql_name="node")
"""The item at the end of the edge."""
| SavedReplyEdge |
python | pallets__werkzeug | tests/test_wrappers.py | {
"start": 43916,
"end": 45795
} | class ____:
def test_request(self):
value = {"ä": "b"}
request = wrappers.Request.from_values(json=value)
assert request.json == value
assert request.get_data()
def test_response(self):
value = {"ä": "b"}
response = wrappers.Response(
response=json.dumps(value), content_type="application/json"
)
assert response.json == value
def test_bad_content_type(self):
value = [1, 2, 3]
request = wrappers.Request.from_values(json=value, content_type="text/plain")
with pytest.raises(UnsupportedMediaType):
request.get_json()
assert request.get_json(silent=True) is None
assert request.get_json(force=True) == value
def test_bad_data(self):
request = wrappers.Request.from_values(
data=b'{"a":}', content_type="application/json"
)
assert request.get_json(silent=True) is None
with pytest.raises(BadRequest):
request.get_json()
def test_cache_disabled(self):
value = [1, 2, 3]
request = wrappers.Request.from_values(json=value)
assert request.get_json(cache=False) == [1, 2, 3]
assert not request.get_data()
with pytest.raises(BadRequest):
request.get_json()
def test_response_coop():
response = wrappers.Response("Hello World")
assert response.cross_origin_opener_policy is COOP.UNSAFE_NONE
response.cross_origin_opener_policy = COOP.SAME_ORIGIN
assert response.headers["Cross-Origin-Opener-Policy"] == "same-origin"
def test_response_coep():
response = wrappers.Response("Hello World")
assert response.cross_origin_embedder_policy is COEP.UNSAFE_NONE
response.cross_origin_embedder_policy = COEP.REQUIRE_CORP
assert response.headers["Cross-Origin-Embedder-Policy"] == "require-corp"
| TestJSON |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 15767,
"end": 16520
} | class ____(IntegrationBase, unittest.TestCase):
# test that forbidden exception has ACLDenied result attached
package = 'tests.pkgs.forbiddenapp'
def test_it(self):
res = self.testapp.get('/x', status=403)
message, result = (x.strip() for x in res.body.split(b'\n'))
self.assertTrue(message.endswith(b'failed permission check'))
self.assertTrue(
result.startswith(
b"ACLDenied permission 'private' via ACE "
b"'<default deny>' in ACL "
b"'<No ACL found on any object in resource "
b"lineage>' on context"
)
)
self.assertTrue(result.endswith(b"for principals ['system.Everyone']"))
| TestForbiddenAppHasResult |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_clsregistry.py | {
"start": 625,
"end": 902
} | class ____:
def __init__(self, base, name):
self._sa_class_manager = mock.Mock(registry=base)
tokens = name.split(".")
self.__module__ = ".".join(tokens[0:-1])
self.name = self.__name__ = tokens[-1]
self.metadata = MetaData()
| MockClass |
python | openai__openai-python | src/openai/types/realtime/realtime_conversation_item_user_message_param.py | {
"start": 279,
"end": 1291
} | class ____(TypedDict, total=False):
audio: str
"""
Base64-encoded audio bytes (for `input_audio`), these will be parsed as the
format specified in the session input audio type configuration. This defaults to
PCM 16-bit 24kHz mono if not specified.
"""
detail: Literal["auto", "low", "high"]
"""The detail level of the image (for `input_image`).
`auto` will default to `high`.
"""
image_url: str
"""Base64-encoded image bytes (for `input_image`) as a data URI.
For example `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported
formats are PNG and JPEG.
"""
text: str
"""The text content (for `input_text`)."""
transcript: str
"""Transcript of the audio (for `input_audio`).
This is not sent to the model, but will be attached to the message item for
reference.
"""
type: Literal["input_text", "input_audio", "input_image"]
"""The content type (`input_text`, `input_audio`, or `input_image`)."""
| Content |
python | Textualize__textual | examples/merlin.py | {
"start": 1505,
"end": 2283
} | class ____(Digits):
"""Displays a timer that stops when you win."""
DEFAULT_CSS = """
Timer {
text-align: center;
width: auto;
margin: 2 8;
color: $warning;
&:light {
color: $secondary;
}
}
"""
start_time = var(0.0)
running = var(True)
def on_mount(self) -> None:
"""Start the timer on mount."""
self.start_time = monotonic()
self.set_interval(1, self.tick)
self.tick()
def tick(self) -> None:
"""Called from `set_interval` to update the clock."""
if self.start_time == 0 or not self.running:
return
time_elapsed = timedelta(seconds=int(monotonic() - self.start_time))
self.update(str(time_elapsed))
| Timer |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/modules/main.py | {
"start": 869,
"end": 1136
} | class ____(webapp2.RequestHandler):
def get(self):
module = modules.get_current_module_name()
instance_id = modules.get_current_instance_id()
self.response.write("module_id={}&instance_id={}".format(module, instance_id))
| GetModuleInfoHandler |
python | kamyu104__LeetCode-Solutions | Python/maximum-frequency-stack.py | {
"start": 50,
"end": 823
} | class ____(object):
def __init__(self):
self.__freq = collections.Counter()
self.__group = collections.defaultdict(list)
self.__maxfreq = 0
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.__freq[x] += 1
if self.__freq[x] > self.__maxfreq:
self.__maxfreq = self.__freq[x]
self.__group[self.__freq[x]].append(x)
def pop(self):
"""
:rtype: int
"""
x = self.__group[self.__maxfreq].pop()
if not self.__group[self.__maxfreq]:
self.__group.pop(self.__maxfreq)
self.__maxfreq -= 1
self.__freq[x] -= 1
if not self.__freq[x]:
self.__freq.pop(x)
return x
| FreqStack |
python | boto__boto3 | tests/functional/test_s3.py | {
"start": 1904,
"end": 4546
} | class ____(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(
aws_access_key_id='foo',
aws_secret_access_key='bar',
region_name='us-west-2',
)
self.s3 = self.session.resource('s3')
self.stubber = Stubber(self.s3.meta.client)
self.bucket = 'mybucket'
self.key = 'mykey'
self.upload_id = 'uploadid'
self.etag = '"example0etag"'
self.progress = 0
self.progress_times_called = 0
def stub_head(self, content_length=4, expected_params=None):
head_response = {
'AcceptRanges': 'bytes',
'ContentLength': content_length,
'ContentType': 'binary/octet-stream',
'ETag': self.etag,
'Metadata': {},
'ResponseMetadata': {
'HTTPStatusCode': 200,
},
}
if expected_params is None:
expected_params = {'Bucket': self.bucket, 'Key': self.key}
self.stubber.add_response(
method='head_object',
service_response=head_response,
expected_params=expected_params,
)
def stub_create_multipart_upload(
self,
extra_expected_params=None,
):
# Add the response and assert params for CreateMultipartUpload
create_upload_response = {
"Bucket": self.bucket,
"Key": self.key,
"UploadId": self.upload_id,
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
}
if extra_expected_params:
expected_params.update(extra_expected_params)
self.stubber.add_response(
method='create_multipart_upload',
service_response=create_upload_response,
expected_params=expected_params,
)
def stub_complete_multipart_upload(
self, parts, extra_expected_params=None
):
complete_upload_response = {
"Location": "us-west-2",
"Bucket": self.bucket,
"Key": self.key,
"ETag": self.etag,
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"MultipartUpload": {"Parts": parts},
"UploadId": self.upload_id,
}
if extra_expected_params:
expected_params.update(extra_expected_params)
self.stubber.add_response(
method='complete_multipart_upload',
service_response=complete_upload_response,
expected_params=expected_params,
)
| BaseTransferTest |
python | kamyu104__LeetCode-Solutions | Python/count-covered-buildings.py | {
"start": 37,
"end": 624
} | class ____(object):
def countCoveredBuildings(self, n, buildings):
"""
:type n: int
:type buildings: List[List[int]]
:rtype: int
"""
left = [n]*n
right = [-1]*n
up = [-1]*n
down = [n]*n
for x, y in buildings:
x -= 1
y -= 1
left[y] = min(left[y], x)
right[y] = max(right[y], x)
up[x] = max(up[x], y)
down[x] = min(down[x], y)
return sum(left[y-1] < x-1 < right[y-1] and down[x-1] < y-1 < up[x-1] for x, y in buildings)
| Solution |
python | sympy__sympy | sympy/matrices/expressions/special.py | {
"start": 4128,
"end": 5175
} | class ____(Identity):
"""
An identity matrix without a specified shape
This exists primarily so MatMul() with no arguments can return something
meaningful.
"""
def __new__(cls):
# super(Identity, cls) instead of super(GenericIdentity, cls) because
# Identity.__new__ doesn't have the same signature
return super(Identity, cls).__new__(cls)
@property
def rows(self):
raise TypeError("GenericIdentity does not have a specified shape")
@property
def cols(self):
raise TypeError("GenericIdentity does not have a specified shape")
@property
def shape(self):
raise TypeError("GenericIdentity does not have a specified shape")
@property
def is_square(self):
return True
# Avoid Matrix.__eq__ which might call .shape
def __eq__(self, other):
return isinstance(other, GenericIdentity)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return super().__hash__()
| GenericIdentity |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 22656,
"end": 29590
} | class ____(TapasPreTrainedModel):
"""
This class is a small change compared to [`BertModel`], taking into account the additional token type ids.
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = TapasEmbeddings(config)
self.encoder = TapasEncoder(config)
self.pooler = TapasPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, 7)`, *optional*):
Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
class for more info.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. If
`reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
used. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
Examples:
```python
>>> from transformers import AutoTokenizer, TapasModel
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasModel.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(
(*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring
| TapasModel |
python | pytorch__pytorch | test/inductor/test_inductor_freezing.py | {
"start": 2106,
"end": 2470
} | class ____(torch.nn.Module):
def __init__(self, in_channels, out_channels, bias=False, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001, dtype=torch.float)
def forward(self, x):
return self.bn(self.conv(x))
| ConvBN |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 92620,
"end": 92835
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuCount', c_uint),
('vgpuTypeIds', POINTER(c_uint)),
]
nvmlVgpuTypeIdInfo_v1 = 0x1000010
| c_nvmlVgpuTypeIdInfo_v1_t |
python | doocs__leetcode | lcci/16.04.Tic-Tac-Toe/Solution.py | {
"start": 0,
"end": 859
} | class ____:
def tictactoe(self, board: List[str]) -> str:
n = len(board)
rows = [0] * n
cols = [0] * n
dg = udg = 0
has_empty_grid = False
for i, row in enumerate(board):
for j, c in enumerate(row):
v = 1 if c == 'X' else -1
if c == ' ':
has_empty_grid = True
v = 0
rows[i] += v
cols[j] += v
if i == j:
dg += v
if i + j + 1 == n:
udg += v
if (
abs(rows[i]) == n
or abs(cols[j]) == n
or abs(dg) == n
or abs(udg) == n
):
return c
return 'Pending' if has_empty_grid else 'Draw'
| Solution |
python | tartley__colorama | colorama/ansi.py | {
"start": 2321,
"end": 2506
} | class ____(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
| AnsiStyle |
python | getsentry__sentry | src/sentry/identity/providers/dummy.py | {
"start": 759,
"end": 1232
} | class ____(Provider):
name = "Dummy"
key = "dummy"
TEMPLATE = '<form method="POST"><input type="email" name="email" /></form>'
def get_pipeline_views(self) -> list[PipelineView[IdentityPipeline]]:
return [AskEmail()]
def build_identity(self, state):
return {"id": state["email"], "email": state["email"], "name": "Dummy"}
def refresh_identity(self, identity: Identity | RpcIdentity, **kwargs: Any) -> None:
pass
| DummyProvider |
python | tornadoweb__tornado | maint/test/cython/cythonapp_test.py | {
"start": 450,
"end": 1196
} | class ____(unittest.TestCase):
def test_arg_replacer_function(self):
replacer = ArgReplacer(cythonapp.function_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
def test_arg_replacer_method(self):
replacer = ArgReplacer(cythonapp.AClass().method_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
| CythonArgReplacerTest |
python | pytorch__pytorch | functorch/examples/maml_regression/evjang_transforms_module.py | {
"start": 391,
"end": 3411
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
# TODO: Use F.mse_loss
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = make_functional(ThreeLayerNet())
opt = torch.optim.Adam(params, lr=1e-3)
alpha = 0.1
K = 20
losses = []
num_tasks = 4
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=0.5))
phases.append(np.random.uniform(low=0.0, high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(low=-5.0, high=5.0, size=(inner_batch_size, 1))
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float), torch.tensor(ys, dtype=torch.float)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
for it in range(20000):
loss2 = 0.0
opt.zero_grad()
def get_loss_for_task(x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
grads = grad(inner_loss)(params, x1, y1)
new_params = [(params[i] - alpha * grads[i]) for i in range(len(params))]
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
inner_losses = vmap(get_loss_for_task)(task[0], task[1], task[2], task[3])
loss2 = sum(inner_losses) / len(inner_losses)
loss2.backward()
opt.step()
if it % 100 == 0:
print(f"Iteration {it:d} -- Outer Loss: {loss2:.4f}")
losses.append(loss2.detach())
t_A = torch.tensor(0.0).uniform_(0.1, 0.5)
t_b = torch.tensor(0.0).uniform_(0.0, math.pi)
t_x = torch.empty(4, 1).uniform_(-5, 5)
t_y = t_A * torch.sin(t_x + t_b)
opt.zero_grad()
t_params = params
for k in range(5):
t_f = net(t_params, t_x)
t_loss = F.l1_loss(t_f, t_y)
grads = torch.autograd.grad(t_loss, t_params, create_graph=True)
t_params = [(t_params[i] - alpha * grads[i]) for i in range(len(params))]
test_x = torch.arange(-2 * math.pi, 2 * math.pi, step=0.01).unsqueeze(1)
test_y = t_A * torch.sin(test_x + t_b)
test_f = net(t_params, test_x)
plt.plot(test_x.data.numpy(), test_y.data.numpy(), label="sin(x)")
plt.plot(test_x.data.numpy(), test_f.data.numpy(), label="net(x)")
plt.plot(t_x.data.numpy(), t_y.data.numpy(), "o", label="Examples")
plt.legend()
plt.savefig("maml-sine.png")
plt.figure()
plt.plot(np.convolve(losses, [0.05] * 20))
plt.savefig("losses.png")
| ThreeLayerNet |
python | sqlalchemy__sqlalchemy | test/orm/test_core_compilation.py | {
"start": 112192,
"end": 112854
} | class ____(test_compiler.CrudParamOverlapTest):
@testing.fixture(
params=Variation.generate_cases("type_", ["orm"]),
ids=["orm"],
)
def crud_table_fixture(self, request):
type_ = request.param
if type_.orm:
from sqlalchemy.orm import declarative_base
Base = declarative_base()
class Foo(Base):
__tablename__ = "mytable"
myid = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
table1 = Foo
else:
type_.fail()
yield table1
| CrudParamOverlapTest |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | {
"start": 14162,
"end": 15583
} | class ____(Benchmark):
r"""
Dixon and Price objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{DixonPrice}}(x) = (x_i - 1)^2
+ \sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x_i) = 0` for
:math:`x_i = 2^{- \frac{(2^i - 2)}{2^i}}` for :math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: Gavana code not correct. i array should start from 2.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i)
for i in range(1, self.N + 1)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(2, self.N + 1)
s = i * (2.0 * x[1:] ** 2.0 - x[:-1]) ** 2.0
return sum(s) + (x[0] - 1.0) ** 2.0
| DixonPrice |
python | urllib3__urllib3 | src/urllib3/contrib/emscripten/fetch.py | {
"start": 10527,
"end": 23520
} | class ____(io.RawIOBase):
"""
A read stream that uses pyodide.ffi.run_sync to read from a JavaScript fetch
response. This requires support for WebAssembly JavaScript Promise Integration
in the containing browser, and for pyodide to be launched via runPythonAsync.
:param js_read_stream:
The JavaScript stream reader
:param timeout:
Timeout in seconds
:param request:
The request we're handling
:param response:
The response this stream relates to
:param js_abort_controller:
A JavaScript AbortController object, used for timeouts
"""
def __init__(
self,
js_read_stream: Any,
timeout: float,
request: EmscriptenRequest,
response: EmscriptenResponse,
js_abort_controller: Any, # JavaScript AbortController for timeouts
):
self.js_read_stream = js_read_stream
self.timeout = timeout
self._is_closed = False
self._is_done = False
self.request: EmscriptenRequest | None = request
self.response: EmscriptenResponse | None = response
self.current_buffer = None
self.current_buffer_pos = 0
self.js_abort_controller = js_abort_controller
def __del__(self) -> None:
self.close()
# this is compatible with _base_connection
def is_closed(self) -> bool:
return self._is_closed
# for compatibility with RawIOBase
@property
def closed(self) -> bool:
return self.is_closed()
def close(self) -> None:
if self.is_closed():
return
self.read_len = 0
self.read_pos = 0
self.js_read_stream.cancel()
self.js_read_stream = None
self._is_closed = True
self._is_done = True
self.request = None
self.response = None
super().close()
def readable(self) -> bool:
return True
def writable(self) -> bool:
return False
def seekable(self) -> bool:
return False
def _get_next_buffer(self) -> bool:
result_js = _run_sync_with_timeout(
self.js_read_stream.read(),
self.timeout,
self.js_abort_controller,
request=self.request,
response=self.response,
)
if result_js.done:
self._is_done = True
return False
else:
self.current_buffer = result_js.value.to_py()
self.current_buffer_pos = 0
return True
def readinto(self, byte_obj: Buffer) -> int:
if self.current_buffer is None:
if not self._get_next_buffer() or self.current_buffer is None:
self.close()
return 0
ret_length = min(
len(byte_obj), len(self.current_buffer) - self.current_buffer_pos
)
byte_obj[0:ret_length] = self.current_buffer[
self.current_buffer_pos : self.current_buffer_pos + ret_length
]
self.current_buffer_pos += ret_length
if self.current_buffer_pos == len(self.current_buffer):
self.current_buffer = None
return ret_length
# check if we are in a worker or not
def is_in_browser_main_thread() -> bool:
return hasattr(js, "window") and hasattr(js, "self") and js.self == js.window
def is_cross_origin_isolated() -> bool:
return hasattr(js, "crossOriginIsolated") and js.crossOriginIsolated
def is_in_node() -> bool:
return (
hasattr(js, "process")
and hasattr(js.process, "release")
and hasattr(js.process.release, "name")
and js.process.release.name == "node"
)
def is_worker_available() -> bool:
return hasattr(js, "Worker") and hasattr(js, "Blob")
_fetcher: _StreamingFetcher | None = None
if is_worker_available() and (
(is_cross_origin_isolated() and not is_in_browser_main_thread())
and (not is_in_node())
):
_fetcher = _StreamingFetcher()
else:
_fetcher = None
NODE_JSPI_ERROR = (
"urllib3 only works in Node.js with pyodide.runPythonAsync"
" and requires the flag --experimental-wasm-stack-switching in "
" versions of node <24."
)
def send_streaming_request(request: EmscriptenRequest) -> EmscriptenResponse | None:
if has_jspi():
return send_jspi_request(request, True)
elif is_in_node():
raise _RequestError(
message=NODE_JSPI_ERROR,
request=request,
response=None,
)
if _fetcher and streaming_ready():
return _fetcher.send(request)
else:
_show_streaming_warning()
return None
_SHOWN_TIMEOUT_WARNING = False
def _show_timeout_warning() -> None:
global _SHOWN_TIMEOUT_WARNING
if not _SHOWN_TIMEOUT_WARNING:
_SHOWN_TIMEOUT_WARNING = True
message = "Warning: Timeout is not available on main browser thread"
js.console.warn(message)
_SHOWN_STREAMING_WARNING = False
def _show_streaming_warning() -> None:
global _SHOWN_STREAMING_WARNING
if not _SHOWN_STREAMING_WARNING:
_SHOWN_STREAMING_WARNING = True
message = "Can't stream HTTP requests because: \n"
if not is_cross_origin_isolated():
message += " Page is not cross-origin isolated\n"
if is_in_browser_main_thread():
message += " Python is running in main browser thread\n"
if not is_worker_available():
message += " Worker or Blob classes are not available in this environment." # Defensive: this is always False in browsers that we test in
if streaming_ready() is False:
message += """ Streaming fetch worker isn't ready. If you want to be sure that streaming fetch
is working, you need to call: 'await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready()`"""
from js import console
console.warn(message)
def send_request(request: EmscriptenRequest) -> EmscriptenResponse:
if has_jspi():
return send_jspi_request(request, False)
elif is_in_node():
raise _RequestError(
message=NODE_JSPI_ERROR,
request=request,
response=None,
)
try:
js_xhr = js.XMLHttpRequest.new()
if not is_in_browser_main_thread():
js_xhr.responseType = "arraybuffer"
if request.timeout:
js_xhr.timeout = int(request.timeout * 1000)
else:
js_xhr.overrideMimeType("text/plain; charset=ISO-8859-15")
if request.timeout:
# timeout isn't available on the main thread - show a warning in console
# if it is set
_show_timeout_warning()
js_xhr.open(request.method, request.url, False)
for name, value in request.headers.items():
if name.lower() not in HEADERS_TO_IGNORE:
js_xhr.setRequestHeader(name, value)
js_xhr.send(to_js(request.body))
headers = dict(Parser().parsestr(js_xhr.getAllResponseHeaders()))
if not is_in_browser_main_thread():
body = js_xhr.response.to_py().tobytes()
else:
body = js_xhr.response.encode("ISO-8859-15")
return EmscriptenResponse(
status_code=js_xhr.status, headers=headers, body=body, request=request
)
except JsException as err:
if err.name == "TimeoutError":
raise _TimeoutError(err.message, request=request)
elif err.name == "NetworkError":
raise _RequestError(err.message, request=request)
else:
# general http error
raise _RequestError(err.message, request=request)
def send_jspi_request(
request: EmscriptenRequest, streaming: bool
) -> EmscriptenResponse:
"""
Send a request using WebAssembly JavaScript Promise Integration
to wrap the asynchronous JavaScript fetch api (experimental).
:param request:
Request to send
:param streaming:
Whether to stream the response
:return: The response object
:rtype: EmscriptenResponse
"""
timeout = request.timeout
js_abort_controller = js.AbortController.new()
headers = {k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE}
req_body = request.body
fetch_data = {
"headers": headers,
"body": to_js(req_body),
"method": request.method,
"signal": js_abort_controller.signal,
}
# Node.js returns the whole response (unlike opaqueredirect in browsers),
# so urllib3 can set `redirect: manual` to control redirects itself.
# https://stackoverflow.com/a/78524615
if _is_node_js():
fetch_data["redirect"] = "manual"
# Call JavaScript fetch (async api, returns a promise)
fetcher_promise_js = js.fetch(request.url, _obj_from_dict(fetch_data))
# Now suspend WebAssembly until we resolve that promise
# or time out.
response_js = _run_sync_with_timeout(
fetcher_promise_js,
timeout,
js_abort_controller,
request=request,
response=None,
)
headers = {}
header_iter = response_js.headers.entries()
while True:
iter_value_js = header_iter.next()
if getattr(iter_value_js, "done", False):
break
else:
headers[str(iter_value_js.value[0])] = str(iter_value_js.value[1])
status_code = response_js.status
body: bytes | io.RawIOBase = b""
response = EmscriptenResponse(
status_code=status_code, headers=headers, body=b"", request=request
)
if streaming:
# get via inputstream
if response_js.body is not None:
# get a reader from the fetch response
body_stream_js = response_js.body.getReader()
body = _JSPIReadStream(
body_stream_js, timeout, request, response, js_abort_controller
)
else:
# get directly via arraybuffer
# n.b. this is another async JavaScript call.
body = _run_sync_with_timeout(
response_js.arrayBuffer(),
timeout,
js_abort_controller,
request=request,
response=response,
).to_py()
response.body = body
return response
def _run_sync_with_timeout(
promise: Any,
timeout: float,
js_abort_controller: Any,
request: EmscriptenRequest | None,
response: EmscriptenResponse | None,
) -> Any:
"""
Await a JavaScript promise synchronously with a timeout which is implemented
via the AbortController
:param promise:
Javascript promise to await
:param timeout:
Timeout in seconds
:param js_abort_controller:
A JavaScript AbortController object, used on timeout
:param request:
The request being handled
:param response:
The response being handled (if it exists yet)
:raises _TimeoutError: If the request times out
:raises _RequestError: If the request raises a JavaScript exception
:return: The result of awaiting the promise.
"""
timer_id = None
if timeout > 0:
timer_id = js.setTimeout(
js_abort_controller.abort.bind(js_abort_controller), int(timeout * 1000)
)
try:
from pyodide.ffi import run_sync
# run_sync here uses WebAssembly JavaScript Promise Integration to
# suspend python until the JavaScript promise resolves.
return run_sync(promise)
except JsException as err:
if err.name == "AbortError":
raise _TimeoutError(
message="Request timed out", request=request, response=response
)
else:
raise _RequestError(message=err.message, request=request, response=response)
finally:
if timer_id is not None:
js.clearTimeout(timer_id)
def has_jspi() -> bool:
"""
Return true if jspi can be used.
This requires both browser support and also WebAssembly
to be in the correct state - i.e. that the javascript
call into python was async not sync.
:return: True if jspi can be used.
:rtype: bool
"""
try:
from pyodide.ffi import can_run_sync, run_sync # noqa: F401
return bool(can_run_sync())
except ImportError:
return False
def _is_node_js() -> bool:
"""
Check if we are in Node.js.
:return: True if we are in Node.js.
:rtype: bool
"""
return (
hasattr(js, "process")
and hasattr(js.process, "release")
# According to the Node.js documentation, the release name is always "node".
and js.process.release.name == "node"
)
def streaming_ready() -> bool | None:
if _fetcher:
return _fetcher.streaming_ready
else:
return None # no fetcher, return None to signify that
async def wait_for_streaming_ready() -> bool:
if _fetcher:
await _fetcher.js_worker_ready_promise
return True
else:
return False
| _JSPIReadStream |
python | pandas-dev__pandas | pandas/io/stata.py | {
"start": 33684,
"end": 85536
} | class ____(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
_path_or_buf: IO[bytes] # Will be assigned by `_open_file`.
def __init__(
self,
path_or_buf: FilePath | ReadBuffer[bytes],
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> None:
super().__init__()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._original_path_or_buf = path_or_buf
self._compression = compression
self._storage_options = storage_options
self._encoding = ""
self._chunksize = chunksize
self._using_iterator = False
self._entered = False
if self._chunksize is None:
self._chunksize = 1
elif not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._close_file: Callable[[], None] | None = None
self._column_selector_set = False
self._value_label_dict: dict[str, dict[int, str]] = {}
self._value_labels_read = False
self._dtype: np.dtype | None = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
def _ensure_open(self) -> None:
"""
Ensure the file has been opened and its header data read.
"""
if not hasattr(self, "_path_or_buf"):
self._open_file()
def _open_file(self) -> None:
"""
Open the file (with compression options, etc.), and read header information.
"""
if not self._entered:
warnings.warn(
"StataReader is being used without using a context manager. "
"Using StataReader as a context manager is the only supported method.",
ResourceWarning,
stacklevel=find_stack_level(),
)
handles = get_handle(
self._original_path_or_buf,
"rb",
storage_options=self._storage_options,
is_text=False,
compression=self._compression,
)
if hasattr(handles.handle, "seekable") and handles.handle.seekable():
# If the handle is directly seekable, use it without an extra copy.
self._path_or_buf = handles.handle
self._close_file = handles.close
else:
# Copy to memory, and ensure no encoding.
with handles:
self._path_or_buf = BytesIO(handles.handle.read())
self._close_file = self._path_or_buf.close
self._read_header()
self._setup_dtype()
def __enter__(self) -> Self:
"""enter context manager"""
self._entered = True
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
if self._close_file:
self._close_file()
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self._format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_int8(self) -> int:
return struct.unpack("b", self._path_or_buf.read(1))[0]
def _read_uint8(self) -> int:
return struct.unpack("B", self._path_or_buf.read(1))[0]
def _read_uint16(self) -> int:
return struct.unpack(f"{self._byteorder}H", self._path_or_buf.read(2))[0]
def _read_uint32(self) -> int:
return struct.unpack(f"{self._byteorder}I", self._path_or_buf.read(4))[0]
def _read_uint64(self) -> int:
return struct.unpack(f"{self._byteorder}Q", self._path_or_buf.read(8))[0]
def _read_int16(self) -> int:
return struct.unpack(f"{self._byteorder}h", self._path_or_buf.read(2))[0]
def _read_int32(self) -> int:
return struct.unpack(f"{self._byteorder}i", self._path_or_buf.read(4))[0]
def _read_int64(self) -> int:
return struct.unpack(f"{self._byteorder}q", self._path_or_buf.read(8))[0]
def _read_char8(self) -> bytes:
return struct.unpack("c", self._path_or_buf.read(1))[0]
def _read_int16_count(self, count: int) -> tuple[int, ...]:
return struct.unpack(
f"{self._byteorder}{'h' * count}",
self._path_or_buf.read(2 * count),
)
def _read_header(self) -> None:
first_char = self._read_char8()
if first_char == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self._path_or_buf.read(27) # stata_dta><header><release>
self._format_version = int(self._path_or_buf.read(3))
if self._format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self._format_version))
self._set_encoding()
self._path_or_buf.read(21) # </release><byteorder>
self._byteorder = ">" if self._path_or_buf.read(3) == b"MSF" else "<"
self._path_or_buf.read(15) # </byteorder><K>
self._nvar = (
self._read_uint16() if self._format_version <= 118 else self._read_uint32()
)
self._path_or_buf.read(7) # </K><N>
self._nobs = self._get_nobs()
self._path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self._path_or_buf.read(19) # </label><timestamp>
self._time_stamp = self._get_time_stamp()
self._path_or_buf.read(26) # </timestamp></header><map>
self._path_or_buf.read(8) # 0x0000000000000000
self._path_or_buf.read(8) # position of <map>
self._seek_vartypes = self._read_int64() + 16
self._seek_varnames = self._read_int64() + 10
self._seek_sortlist = self._read_int64() + 10
self._seek_formats = self._read_int64() + 9
self._seek_value_label_names = self._read_int64() + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self._path_or_buf.read(8) # <characteristics>
self._data_location = self._read_int64() + 6
self._seek_strls = self._read_int64() + 7
self._seek_value_labels = self._read_int64() + 14
self._typlist, self._dtyplist = self._get_dtypes(self._seek_vartypes)
self._path_or_buf.seek(self._seek_varnames)
self._varlist = self._get_varlist()
self._path_or_buf.seek(self._seek_sortlist)
self._srtlist = self._read_int16_count(self._nvar + 1)[:-1]
self._path_or_buf.seek(self._seek_formats)
self._fmtlist = self._get_fmtlist()
self._path_or_buf.seek(self._seek_value_label_names)
self._lbllist = self._get_lbllist()
self._path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> tuple[list[int | str], list[str | np.dtype]]:
self._path_or_buf.seek(seek_vartypes)
typlist = []
dtyplist = []
for _ in range(self._nvar):
typ = self._read_uint16()
if typ <= 2045:
typlist.append(typ)
dtyplist.append(str(typ))
else:
try:
typlist.append(self.TYPE_MAP_XML[typ]) # type: ignore[arg-type]
dtyplist.append(self.DTYPE_MAP_XML[typ]) # type: ignore[arg-type]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
return typlist, dtyplist # type: ignore[return-value]
def _get_varlist(self) -> list[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self._format_version < 118 else 129
return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)]
# Returns the format list
def _get_fmtlist(self) -> list[str]:
if self._format_version >= 118:
b = 57
elif self._format_version > 113:
b = 49
elif self._format_version > 104:
b = 12
else:
b = 7
return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)]
# Returns the label list
def _get_lbllist(self) -> list[str]:
if self._format_version >= 118:
b = 129
elif self._format_version > 108:
b = 33
else:
b = 9
return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)]
def _get_variable_labels(self) -> list[str]:
if self._format_version >= 118:
vlblist = [
self._decode(self._path_or_buf.read(321)) for _ in range(self._nvar)
]
elif self._format_version > 105:
vlblist = [
self._decode(self._path_or_buf.read(81)) for _ in range(self._nvar)
]
else:
vlblist = [
self._decode(self._path_or_buf.read(32)) for _ in range(self._nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self._format_version >= 118:
return self._read_uint64()
elif self._format_version >= 103:
return self._read_uint32()
else:
return self._read_uint16()
def _get_data_label(self) -> str:
if self._format_version >= 118:
strlen = self._read_uint16()
return self._decode(self._path_or_buf.read(strlen))
elif self._format_version == 117:
strlen = self._read_int8()
return self._decode(self._path_or_buf.read(strlen))
elif self._format_version > 105:
return self._decode(self._path_or_buf.read(81))
else:
return self._decode(self._path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self._format_version >= 118:
strlen = self._read_int8()
return self._path_or_buf.read(strlen).decode("utf-8")
elif self._format_version == 117:
strlen = self._read_int8()
return self._decode(self._path_or_buf.read(strlen))
elif self._format_version > 104:
return self._decode(self._path_or_buf.read(18))
else:
raise ValueError
def _get_seek_variable_labels(self) -> int:
if self._format_version == 117:
self._path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self._nvar) + 20 + 17
elif self._format_version >= 118:
return self._read_int64() + 17
else:
raise ValueError
def _read_old_header(self, first_char: bytes) -> None:
self._format_version = int(first_char[0])
if self._format_version not in [
102,
103,
104,
105,
108,
110,
111,
113,
114,
115,
]:
raise ValueError(_version_error.format(version=self._format_version))
self._set_encoding()
# Note 102 format will have a zero in this header position, so support
# relies on little-endian being set whenever this value isn't one,
# even though for later releases strictly speaking the value should
# be either one or two to be valid
self._byteorder = ">" if self._read_int8() == 0x1 else "<"
self._filetype = self._read_int8()
self._path_or_buf.read(1) # unused
self._nvar = self._read_uint16()
self._nobs = self._get_nobs()
self._data_label = self._get_data_label()
if self._format_version >= 105:
self._time_stamp = self._get_time_stamp()
# descriptors
if self._format_version >= 111:
typlist = [int(c) for c in self._path_or_buf.read(self._nvar)]
else:
buf = self._path_or_buf.read(self._nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self._typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self._dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join([str(x) for x in typlist])
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self._format_version > 108:
self._varlist = [
self._decode(self._path_or_buf.read(33)) for _ in range(self._nvar)
]
else:
self._varlist = [
self._decode(self._path_or_buf.read(9)) for _ in range(self._nvar)
]
self._srtlist = self._read_int16_count(self._nvar + 1)[:-1]
self._fmtlist = self._get_fmtlist()
self._lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self._format_version > 104:
while True:
data_type = self._read_int8()
if self._format_version > 108:
data_len = self._read_int32()
else:
data_len = self._read_int16()
if data_type == 0:
break
self._path_or_buf.read(data_len)
# necessary data to continue parsing
self._data_location = self._path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self._typlist):
if typ in self.NUMPY_TYPE_MAP:
typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP
dtypes.append((f"s{i}", f"{self._byteorder}{self.NUMPY_TYPE_MAP[typ]}"))
else:
dtypes.append((f"s{i}", f"S{typ}"))
self._dtype = np.dtype(dtypes)
return self._dtype
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(
msg,
UnicodeWarning,
stacklevel=find_stack_level(),
)
return s.decode("latin-1")
def _read_new_value_labels(self) -> None:
"""Reads value labels with variable length strings (108 and later format)"""
if self._format_version >= 117:
self._path_or_buf.seek(self._seek_value_labels)
else:
assert self._dtype is not None
offset = self._nobs * self._dtype.itemsize
self._path_or_buf.seek(self._data_location + offset)
while True:
if self._format_version >= 117:
if self._path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self._path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117), or end-of-file
if self._format_version == 108:
labname = self._decode(self._path_or_buf.read(9))
elif self._format_version <= 117:
labname = self._decode(self._path_or_buf.read(33))
else:
labname = self._decode(self._path_or_buf.read(129))
self._path_or_buf.read(3) # padding
n = self._read_uint32()
txtlen = self._read_uint32()
off = np.frombuffer(
self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n
)
val = np.frombuffer(
self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self._path_or_buf.read(txtlen)
self._value_label_dict[labname] = {}
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self._value_label_dict[labname][val[i]] = self._decode(
txt[off[i] : end]
)
if self._format_version >= 117:
self._path_or_buf.read(6) # </lbl>
def _read_old_value_labels(self) -> None:
"""Reads value labels with fixed-length strings (105 and earlier format)"""
assert self._dtype is not None
offset = self._nobs * self._dtype.itemsize
self._path_or_buf.seek(self._data_location + offset)
while True:
if not self._path_or_buf.read(2):
# end-of-file may have been reached, if so stop here
break
# otherwise back up and read again, taking byteorder into account
self._path_or_buf.seek(-2, os.SEEK_CUR)
n = self._read_uint16()
labname = self._decode(self._path_or_buf.read(9))
self._path_or_buf.read(1) # padding
codes = np.frombuffer(
self._path_or_buf.read(2 * n), dtype=f"{self._byteorder}i2", count=n
)
self._value_label_dict[labname] = {}
for i in range(n):
self._value_label_dict[labname][codes[i]] = self._decode(
self._path_or_buf.read(8)
)
def _read_value_labels(self) -> None:
self._ensure_open()
if self._value_labels_read:
# Don't read twice
return
if self._format_version >= 108:
self._read_new_value_labels()
else:
self._read_old_value_labels()
self._value_labels_read = True
def _read_strls(self) -> None:
self._path_or_buf.seek(self._seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self._path_or_buf.read(3) != b"GSO":
break
if self._format_version == 117:
v_o = self._read_uint64()
else:
buf = self._path_or_buf.read(12)
# Only tested on little endian machine.
v_size = 2 if self._format_version == 118 else 3
if self._byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
buf = buf[4 - v_size : 4] + buf[(4 + v_size) :]
v_o = struct.unpack(f"{self._byteorder}Q", buf)[0]
typ = self._read_uint8()
length = self._read_uint32()
va = self._path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
self._using_iterator = True
return self.read(nrows=self._chunksize)
def get_chunk(self, size: int | None = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def read(
self,
nrows: int | None = None,
convert_dates: bool | None = None,
convert_categoricals: bool | None = None,
index_col: str | None = None,
convert_missing: bool | None = None,
preserve_dtypes: bool | None = None,
columns: Sequence[str] | None = None,
order_categoricals: bool | None = None,
) -> DataFrame:
"""
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables.
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered.
Returns
-------
DataFrame
"""
self._ensure_open()
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self._nobs
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self._nobs == 0) and nrows == 0:
data = DataFrame(columns=self._varlist)
# Apply dtypes correctly
for i, col in enumerate(data.columns):
dt = self._dtyplist[i]
if isinstance(dt, np.dtype):
if dt.char != "S":
data[col] = data[col].astype(dt)
if columns is not None:
data = self._do_select_columns(data, columns)
return data
if (self._format_version >= 117) and (not self._value_labels_read):
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self._nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self._path_or_buf.seek(self._data_location + offset)
read_lines = min(nrows, self._nobs - self._lines_read)
raw_data = np.frombuffer(
self._path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
# if necessary, swap the byte order to native here
if self._byteorder != self._native_byteorder:
raw_data = raw_data.byteswap().view(raw_data.dtype.newbyteorder())
if convert_categoricals:
self._read_value_labels()
if len(raw_data) == 0:
data = DataFrame(columns=self._varlist)
else:
data = DataFrame.from_records(raw_data)
data.columns = Index(self._varlist)
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
data.index = RangeIndex(
self._lines_read - read_lines, self._lines_read
) # set attr instead of set_index to avoid copy
if columns is not None:
data = self._do_select_columns(data, columns)
# Decode strings
for col, typ in zip(data, self._typlist, strict=True):
if isinstance(typ, int):
data[col] = data[col].apply(self._decode)
data = self._insert_strls(data)
# Convert columns (if needed) to match input type
valid_dtypes = [i for i, dtyp in enumerate(self._dtyplist) if dtyp is not None]
object_type = np.dtype(object)
for idx in valid_dtypes:
dtype = data.iloc[:, idx].dtype
if dtype not in (object_type, self._dtyplist[idx]):
data.isetitem(idx, data.iloc[:, idx].astype(dtype))
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
for i, fmt in enumerate(self._fmtlist):
if any(fmt.startswith(date_fmt) for date_fmt in _date_formats):
data.isetitem(
i, _stata_elapsed_date_to_datetime_vec(data.iloc[:, i], fmt)
)
if convert_categoricals:
data = self._do_convert_categoricals(
data, self._value_label_dict, self._lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.dtype(np.float16), np.dtype(np.float32)):
dtype = np.dtype(np.float64)
convert = True
elif dtype in (
np.dtype(np.int8),
np.dtype(np.int16),
np.dtype(np.int32),
):
dtype = np.dtype(np.int64)
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# missing code for double was different in version 105 and prior
old_missingdouble = float.fromhex("0x1.0p333")
# Check for missing values, and replace if found
replacements = {}
for i in range(len(data.columns)):
fmt = self._typlist[i]
# recode instances of the old missing code to the currently used value
if self._format_version <= 105 and fmt == "d":
data.iloc[:, i] = data.iloc[:, i].replace(
old_missingdouble, self.MISSING_VALUES["d"]
)
if self._format_version <= 111:
if fmt not in self.OLD_VALID_RANGE:
continue
fmt = cast(str, fmt) # only strs in OLD_VALID_RANGE
nmin, nmax = self.OLD_VALID_RANGE[fmt]
else:
if fmt not in self.VALID_RANGE:
continue
fmt = cast(str, fmt) # only strs in VALID_RANGE
nmin, nmax = self.VALID_RANGE[fmt]
series = data.iloc[:, i]
# appreciably faster to do this with ndarray instead of Series
svals = series._values
missing = (svals < nmin) | (svals > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=object)
for j, um in enumerate(umissing):
if self._format_version <= 111:
missing_value = StataMissingValue(
float(self.MISSING_VALUES[fmt])
)
else:
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
# Note: operating on ._values is much faster than directly
# TODO: can we fix that?
replacement._values[missing] = np.nan
replacements[i] = replacement
if replacements:
for idx, value in replacements.items():
data.isetitem(idx, value)
return data
def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self._typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.isetitem(i, [self.GSO[str(k)] for k in data.iloc[:, i]])
return data
def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
joined = ", ".join(list(unmatched))
raise ValueError(
"The following columns were not "
f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col) # type: ignore[no-untyped-call]
dtyplist.append(self._dtyplist[i])
typlist.append(self._typlist[i])
fmtlist.append(self._fmtlist[i])
lbllist.append(self._lbllist[i])
self._dtyplist = dtyplist
self._typlist = typlist
self._fmtlist = fmtlist
self._lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self,
data: DataFrame,
value_label_dict: dict[str, dict[int, str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
if not value_label_dict:
return data
cat_converted_data = []
for col, label in zip(data, lbllist, strict=True):
if label in value_label_dict:
# Explicit call with ordered=True
vl = value_label_dict[label]
keys = np.array(list(vl.keys()))
column = data[col]
key_matches = column.isin(keys)
if self._using_iterator and key_matches.all():
initial_categories: np.ndarray | None = keys
# If all categories are in the keys and we are iterating,
# use the same keys for all chunks. If some are missing
# value labels, then we will fall back to the categories
# varying across chunks.
else:
if self._using_iterator:
# warn is using an iterator
warnings.warn(
categorical_conversion_warning,
CategoricalConversionWarning,
stacklevel=find_stack_level(),
)
initial_categories = None
cat_data = Categorical(
column, categories=initial_categories, ordered=order_categoricals
)
if initial_categories is None:
# If None here, then we need to match the cats in the Categorical
categories = []
for category in cat_data.categories:
if category in vl:
categories.append(vl[category])
else:
categories.append(category)
else:
# If all cats are matched, we can use the values
categories = list(vl.values())
try:
# Try to catch duplicate categories
# TODO: if we get a non-copying rename_categories, use that
cat_data = cat_data.rename_categories(categories)
except ValueError as err:
vc = Series(categories, copy=False).value_counts()
repeated_cats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg) from err
# TODO: is the next line needed above in the data(...) method?
cat_series = Series(cat_data, index=data.index, copy=False)
cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame(dict(cat_converted_data), copy=False)
return data
@property
def data_label(self) -> str:
"""
Return data label of Stata file.
The data label is a descriptive string associated with the dataset
stored in the Stata file. This property provides access to that
label, if one is present.
See Also
--------
io.stata.StataReader.variable_labels : Return a dict associating each variable
name with corresponding label.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame([(1,)], columns=["variable"])
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> data_label = "This is a data file."
>>> path = "/My_path/filename.dta"
>>> df.to_stata(
... path,
... time_stamp=time_stamp, # doctest: +SKIP
... data_label=data_label, # doctest: +SKIP
... version=None,
... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.data_label) # doctest: +SKIP
This is a data file.
"""
self._ensure_open()
return self._data_label
@property
def time_stamp(self) -> str:
"""
Return time stamp of Stata file.
"""
self._ensure_open()
return self._time_stamp
def variable_labels(self) -> dict[str, str]:
"""
Return a dict associating each variable name with corresponding label.
This method retrieves variable labels from a Stata file. Variable labels are
mappings between variable names and their corresponding descriptive labels
in a Stata dataset.
Returns
-------
dict
A python dictionary.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"])
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> variable_labels = {"col_1": "This is an example"}
>>> df.to_stata(
... path,
... time_stamp=time_stamp, # doctest: +SKIP
... variable_labels=variable_labels,
... version=None,
... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.variable_labels()) # doctest: +SKIP
{'index': '', 'col_1': 'This is an example', 'col_2': ''}
>>> pd.read_stata(path) # doctest: +SKIP
index col_1 col_2
0 0 1 2
1 1 3 4
"""
self._ensure_open()
return dict(zip(self._varlist, self._variable_labels, strict=True))
def value_labels(self) -> dict[str, dict[int, str]]:
"""
Return a nested dict associating each variable name to its value and label.
This method retrieves the value labels from a Stata file. Value labels are
mappings between the coded values and their corresponding descriptive labels
in a Stata dataset.
Returns
-------
dict
A python dictionary.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"])
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> value_labels = {"col_1": {3: "x"}}
>>> df.to_stata(
... path,
... time_stamp=time_stamp, # doctest: +SKIP
... value_labels=value_labels,
... version=None,
... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.value_labels()) # doctest: +SKIP
{'col_1': {3: 'x'}}
>>> pd.read_stata(path) # doctest: +SKIP
index col_1 col_2
0 0 1 2
1 1 x 4
"""
if not self._value_labels_read:
self._read_value_labels()
return self._value_label_dict
@set_module("pandas")
def read_stata(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: str | None = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Sequence[str] | None = None,
order_categoricals: bool = True,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
storage_options: StorageOptions | None = None,
) -> DataFrame | StataReader:
"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables.
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered.
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines.
iterator : bool, default False
Return StataReader object.
compression : str or dict, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
'filepath_or_buffer' is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', '.xz', '.zst', '.tar',
'.tar.gz', '.tar.xz' or '.tar.bz2' (otherwise no compression).
If using 'zip' or 'tar', the ZIP file must contain only one
data file to be read in. Set to ``None`` for no decompression.
Can also be a dict with key ``'method'`` set to one of
{``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and
other key-value pairs are forwarded to
``zipfile.ZipFile``, ``gzip.GzipFile``,
``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or
``tarfile.TarFile``, respectively.
As an example, the following could be passed for Zstandard decompression using a
custom compression dictionary:
``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
Returns
-------
DataFrame, pandas.api.typing.StataReader
If iterator or chunksize, returns StataReader, else DataFrame.
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values.
Examples
--------
Creating a dummy stata for this example
>>> df = pd.DataFrame(
... {
... "animal": ["falcon", "parrot", "falcon", "parrot"],
... "speed": [350, 18, 361, 15],
... }
... ) # doctest: +SKIP
>>> df.to_stata("animals.dta") # doctest: +SKIP
Read a Stata dta file:
>>> df = pd.read_stata("animals.dta") # doctest: +SKIP
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(
... 0, 10, size=(20_000, 1), dtype="uint8"
... ) # doctest: +SKIP
>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP
>>> df.to_stata("filename.dta") # doctest: +SKIP
>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.mean()
... pass # doctest: +SKIP
"""
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
storage_options=storage_options,
compression=compression,
)
if iterator or chunksize:
return reader
with reader:
return reader.read()
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(f"Endianness {endianness} not understood")
def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
if isinstance(name, bytes):
return name + b"\x00" * (length - len(name))
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.dtype(np.float64) # Stata expects doubles for SIFs
else:
raise NotImplementedError(f"Format {fmt} not implemented")
def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict:
new_dict = {}
for key, value in convert_dates.items():
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + value
if key in varlist:
new_dict[varlist.index(key)] = convert_dates[key]
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a column or an integer")
new_dict[key] = convert_dates[key]
return new_dict
def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type is np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
return max(itemsize, 1)
elif dtype.type is np.float64:
return 255
elif dtype.type is np.float32:
return 254
elif dtype.type is np.int32:
return 253
elif dtype.type is np.int16:
return 252
elif dtype.type is np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _dtype_to_default_stata_fmt(
dtype: np.dtype, column: Series, dta_version: int = 114, force_strl: bool = False
) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return "%9s"
if dtype.type is np.object_:
itemsize = max_len_string_array(ensure_object(column._values))
if itemsize > max_str_len:
if dta_version >= 117:
return "%9s"
else:
raise ValueError(excessive_string_length_error.format(column.name))
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype in (np.int8, np.int16):
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
| StataReader |
python | apache__airflow | providers/qdrant/tests/unit/qdrant/hooks/test_qdrant.py | {
"start": 992,
"end": 5346
} | class ____:
def setup_method(self):
"""Set up the test connection for the QdrantHook."""
with patch("airflow.models.Connection.get_connection_from_secrets") as mock_get_connection:
mock_conn = Mock()
mock_conn.host = "localhost"
mock_conn.port = 6333
mock_conn.extra_dejson = {}
mock_conn.password = "some_test_api_key"
mock_get_connection.return_value = mock_conn
self.qdrant_hook = QdrantHook()
self.collection_name = "test_collection"
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_verify_connection(self, mock_conn):
"""Test the verify_connection of the QdrantHook."""
self.qdrant_hook.verify_connection()
mock_conn.get_collections.assert_called_once()
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_upsert(self, conn):
"""Test the upsert method of the QdrantHook with appropriate arguments."""
vectors = [[0.732, 0.611, 0.289], [0.217, 0.526, 0.416], [0.326, 0.483, 0.376]]
ids = [32, 21, "b626f6a9-b14d-4af9-b7c3-43d8deb719a6"]
payloads = [{"meta": "data"}, {"meta": "data_2"}, {"meta": "data_3", "extra": "data"}]
parallel = 2
self.qdrant_hook.conn.upsert(
collection_name=self.collection_name,
vectors=vectors,
ids=ids,
payloads=payloads,
parallel=parallel,
)
conn.upsert.assert_called_once_with(
collection_name=self.collection_name,
vectors=vectors,
ids=ids,
payloads=payloads,
parallel=parallel,
)
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_list_collections(self, conn):
"""Test that the list_collections is called correctly."""
self.qdrant_hook.conn.list_collections()
conn.list_collections.assert_called_once()
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_create_collection(self, conn):
"""Test that the create_collection is called with correct arguments."""
from qdrant_client.models import Distance, VectorParams
self.qdrant_hook.conn.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(size=384, distance=Distance.COSINE),
)
conn.create_collection.assert_called_once_with(
collection_name=self.collection_name,
vectors_config=VectorParams(size=384, distance=Distance.COSINE),
)
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_delete(self, conn):
"""Test that the delete is called with correct arguments."""
self.qdrant_hook.conn.delete(
collection_name=self.collection_name, points_selector=[32, 21], wait=False
)
conn.delete.assert_called_once_with(
collection_name=self.collection_name, points_selector=[32, 21], wait=False
)
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_search(self, conn):
"""Test that the search is called with correct arguments."""
self.qdrant_hook.conn.search(
collection_name=self.collection_name,
query_vector=[1.0, 2.0, 3.0],
limit=10,
with_vectors=True,
)
conn.search.assert_called_once_with(
collection_name=self.collection_name, query_vector=[1.0, 2.0, 3.0], limit=10, with_vectors=True
)
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_get_collection(self, conn):
"""Test that the get_collection is called with correct arguments."""
self.qdrant_hook.conn.get_collection(collection_name=self.collection_name)
conn.get_collection.assert_called_once_with(collection_name=self.collection_name)
@patch("airflow.providers.qdrant.hooks.qdrant.QdrantHook.conn")
def test_delete_collection(self, conn):
"""Test that the delete_collection is called with correct arguments."""
self.qdrant_hook.conn.delete_collection(collection_name=self.collection_name)
conn.delete_collection.assert_called_once_with(collection_name=self.collection_name)
| TestQdrantHook |
python | lepture__authlib | authlib/integrations/starlette_client/apps.py | {
"start": 1628,
"end": 2380
} | class ____(StarletteAppMixin, AsyncOAuth1Mixin, BaseApp):
client_cls = AsyncOAuth1Client
async def authorize_access_token(self, request, **kwargs):
params = dict(request.query_params)
state = params.get("oauth_token")
if not state:
raise OAuthError(description='Missing "oauth_token" parameter')
data = await self.framework.get_state_data(request.session, state)
if not data:
raise OAuthError(description='Missing "request_token" in temporary data')
params["request_token"] = data["request_token"]
params.update(kwargs)
await self.framework.clear_state_data(request.session, state)
return await self.fetch_access_token(**params)
| StarletteOAuth1App |
python | apache__airflow | providers/yandex/src/airflow/providers/yandex/operators/dataproc.py | {
"start": 15463,
"end": 17421
} | class ____(DataprocBaseOperator):
"""
Runs Hive job in Data Proc cluster.
:param query: Hive query.
:param query_file_uri: URI of the script that contains Hive queries. Can be placed in HDFS or S3.
:param properties: A mapping of property names to values, used to configure Hive.
:param script_variables: Mapping of query variable names to values.
:param continue_on_failure: Whether to continue executing queries if a query fails.
:param name: Name of the job. Used for labeling.
:param cluster_id: ID of the cluster to run job in.
Will try to take the ID from Dataproc Hook object if it's specified. (templated)
:param connection_id: ID of the Yandex.Cloud Airflow connection.
"""
def __init__(
self,
*,
query: str | None = None,
query_file_uri: str | None = None,
script_variables: dict[str, str] | None = None,
continue_on_failure: bool = False,
properties: dict[str, str] | None = None,
name: str = "Hive job",
cluster_id: str | None = None,
connection_id: str | None = None,
**kwargs,
) -> None:
super().__init__(yandex_conn_id=connection_id, cluster_id=cluster_id, **kwargs)
self.query = query
self.query_file_uri = query_file_uri
self.script_variables = script_variables
self.continue_on_failure = continue_on_failure
self.properties = properties
self.name = name
def execute(self, context: Context) -> None:
hook = self._setup(context)
hook.dataproc_client.create_hive_job(
query=self.query,
query_file_uri=self.query_file_uri,
script_variables=self.script_variables,
continue_on_failure=self.continue_on_failure,
properties=self.properties,
name=self.name,
cluster_id=self.cluster_id,
)
| DataprocCreateHiveJobOperator |
python | fastapi__sqlmodel | docs_src/advanced/uuid/tutorial002.py | {
"start": 101,
"end": 1592
} | class ____(SQLModel, table=True):
id: uuid.UUID = Field(default_factory=uuid.uuid4, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Union[int, None] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_hero():
with Session(engine) as session:
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
print("The hero before saving in the DB")
print(hero_1)
print("The hero ID was already set")
print(hero_1.id)
session.add(hero_1)
session.commit()
session.refresh(hero_1)
print("After saving in the DB")
print(hero_1)
def select_hero():
with Session(engine) as session:
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_2)
session.commit()
session.refresh(hero_2)
hero_id = hero_2.id
print("Created hero:")
print(hero_2)
print("Created hero ID:")
print(hero_id)
selected_hero = session.get(Hero, hero_id)
print("Selected hero:")
print(selected_hero)
print("Selected hero ID:")
print(selected_hero.id)
def main() -> None:
create_db_and_tables()
create_hero()
select_hero()
if __name__ == "__main__":
main()
| Hero |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition_group.py | {
"start": 597,
"end": 1071
} | class ____(TestCase):
def test_get_data_conditions_for_group(self) -> None:
assert get_data_conditions_for_group(0) == []
def test_get_data_conditions_for_group__exists(self) -> None:
data_condition_group = self.create_data_condition_group()
data_condition = self.create_data_condition(condition_group=data_condition_group)
assert get_data_conditions_for_group(data_condition_group.id) == [data_condition]
| TestGetDataConditionsForGroup |
python | keras-team__keras | keras/src/utils/file_utils_test.py | {
"start": 26783,
"end": 28691
} | class ____(test_case.TestCase):
def test_gcs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/gcs/some/path/to/file.txt"))
self.assertTrue(file_utils.is_remote_path("/gcs/another/directory/"))
self.assertTrue(file_utils.is_remote_path("gcs://bucket/some/file.txt"))
def test_hdfs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("hdfs://some/path/on/hdfs"))
self.assertTrue(file_utils.is_remote_path("/hdfs/some/local/path"))
def test_cns_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/cns/some/path"))
def test_placer_remote_path(self):
self.assertTrue(
file_utils.is_remote_path("/placer/prod/home/some/path")
)
self.assertTrue(
file_utils.is_remote_path("/placer/test/home/some/path")
)
self.assertTrue(
file_utils.is_remote_path("/placer/prod/scratch/home/some/path")
)
def test_tfhub_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/tfhub/some/path"))
def test_cfs_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/cfs/some/path"))
def test_readahead_remote_path(self):
self.assertTrue(file_utils.is_remote_path("/readahead/some/path"))
def test_non_remote_paths(self):
self.assertFalse(file_utils.is_remote_path("/local/path/to/file.txt"))
self.assertFalse(
file_utils.is_remote_path("C:\\local\\path\\on\\windows\\file.txt")
)
self.assertFalse(file_utils.is_remote_path("~/relative/path/"))
self.assertFalse(file_utils.is_remote_path("./another/relative/path"))
self.assertFalse(file_utils.is_remote_path("/local/path"))
self.assertFalse(file_utils.is_remote_path("./relative/path"))
self.assertFalse(file_utils.is_remote_path("~/relative/path"))
| IsRemotePathTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.