language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/admin_inlines/models.py | {
"start": 8316,
"end": 8608
} | class ____(models.Model):
name = models.CharField(max_length=1)
position = models.PositiveIntegerField(help_text="Position help_text.")
parent = models.ForeignKey(SomeParentModel, models.CASCADE)
readonly_field = models.CharField(max_length=1)
# Models for #30231
| SomeChildModel |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 18565,
"end": 19049
} | class ____:
def test_longdouble(self):
assert_(np._core.sctypeDict['float64'] is not np.longdouble)
assert_(np._core.sctypeDict['complex128'] is not np.clongdouble)
def test_ulong(self):
assert np._core.sctypeDict['ulong'] is np.ulong
assert np.dtype(np.ulong) is np.dtype("ulong")
assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize
@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning")
| TestSctypeDict |
python | PyCQA__pyflakes | pyflakes/checker.py | {
"start": 17164,
"end": 20101
} | class ____:
names = dir()
# Globally defined names which are not attributes of the builtins module, or
# are only present on some platforms.
_MAGIC_GLOBALS = ['__file__', '__builtins__', '__annotations__', 'WindowsError']
def getNodeName(node):
# Returns node.id, or node.name, or None
if hasattr(node, 'id'): # One of the many nodes with an id
return node.id
if hasattr(node, 'name'): # an ExceptHandler node
return node.name
if hasattr(node, 'rest'): # a MatchMapping node
return node.rest
TYPING_MODULES = frozenset(('typing', 'typing_extensions'))
def _is_typing_helper(node, is_name_match_fn, scope_stack):
"""
Internal helper to determine whether or not something is a member of a
typing module. This is used as part of working out whether we are within a
type annotation context.
Note: you probably don't want to use this function directly. Instead see the
utils below which wrap it (`_is_typing` and `_is_any_typing_member`).
"""
def _bare_name_is_attr(name):
for scope in reversed(scope_stack):
if name in scope:
return (
isinstance(scope[name], ImportationFrom) and
scope[name].module in TYPING_MODULES and
is_name_match_fn(scope[name].real_name)
)
return False
def _module_scope_is_typing(name):
for scope in reversed(scope_stack):
if name in scope:
return (
isinstance(scope[name], Importation) and
scope[name].fullName in TYPING_MODULES
)
return False
return (
(
isinstance(node, ast.Name) and
_bare_name_is_attr(node.id)
) or (
isinstance(node, ast.Attribute) and
isinstance(node.value, ast.Name) and
_module_scope_is_typing(node.value.id) and
is_name_match_fn(node.attr)
)
)
def _is_typing(node, typing_attr, scope_stack):
"""
Determine whether `node` represents the member of a typing module specified
by `typing_attr`.
This is used as part of working out whether we are within a type annotation
context.
"""
return _is_typing_helper(node, lambda x: x == typing_attr, scope_stack)
def _is_any_typing_member(node, scope_stack):
"""
Determine whether `node` represents any member of a typing module.
This is used as part of working out whether we are within a type annotation
context.
"""
return _is_typing_helper(node, lambda x: True, scope_stack)
def is_typing_overload(value, scope_stack):
return (
isinstance(value.source, (ast.FunctionDef, ast.AsyncFunctionDef)) and
any(
_is_typing(dec, 'overload', scope_stack)
for dec in value.source.decorator_list
)
)
| DetectClassScopedMagic |
python | pytorch__pytorch | test/export/test_sparse.py | {
"start": 1599,
"end": 1715
} | class ____(torch.nn.Module):
def forward(self, x):
return [xi.to_sparse() for xi in x]
| SparseActivationCOO |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 6905,
"end": 8129
} | class ____(SpinOpBase, Operator):
"""The J- operator."""
_coord = '-'
basis = 'Jz'
def _apply_operator_JzKet(self, ket, **options):
j = ket.j
m = ket.m
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m - S.One))*JzKet(j, m - S.One)
def _apply_operator_JzKetCoupled(self, ket, **options):
j = ket.j
m = ket.m
jn = ket.jn
coupling = ket.coupling
if m.is_Number and j.is_Number:
if m <= -j:
return S.Zero
return hbar*sqrt(j*(j + S.One) - m*(m - S.One))*JzKetCoupled(j, m - S.One, jn, coupling)
def matrix_element(self, j, m, jp, mp):
result = hbar*sqrt(j*(j + S.One) - mp*(mp - S.One))
result *= KroneckerDelta(m, mp - 1)
result *= KroneckerDelta(j, jp)
return result
def _represent_default_basis(self, **options):
return self._represent_JzOp(None, **options)
def _represent_JzOp(self, basis, **options):
return self._represent_base(basis, **options)
def _eval_rewrite_as_xyz(self, *args, **kwargs):
return JxOp(args[0]) - I*JyOp(args[0])
| JminusOp |
python | pypa__pip | src/pip/_vendor/truststore/_windows.py | {
"start": 1260,
"end": 1845
} | class ____(Structure):
_fields_ = (
("cbSize", DWORD),
("RequestedUsage", CERT_USAGE_MATCH),
("RequestedIssuancePolicy", CERT_USAGE_MATCH),
("dwUrlRetrievalTimeout", DWORD),
("fCheckRevocationFreshnessTime", BOOL),
("dwRevocationFreshnessTime", DWORD),
("pftCacheResync", LPFILETIME),
("pStrongSignPara", c_void_p),
("dwStrongSignFlags", DWORD),
)
if TYPE_CHECKING:
PCERT_CHAIN_PARA = pointer[CERT_CHAIN_PARA] # type: ignore[misc]
else:
PCERT_CHAIN_PARA = POINTER(CERT_CHAIN_PARA)
| CERT_CHAIN_PARA |
python | anthropics__anthropic-sdk-python | src/anthropic/types/message_tokens_count.py | {
"start": 155,
"end": 329
} | class ____(BaseModel):
input_tokens: int
"""
The total number of tokens across the provided list of messages, system prompt,
and tools.
"""
| MessageTokensCount |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 2617,
"end": 3510
} | class ____:
def __new__(cls, *new_args, **new_kwargs):
"""__new__(cls, d, e=1) -> DocstringSig
First line of docstring
rest of docstring
"""
def __init__(self, *init_args, **init_kwargs):
"""__init__(self, a, b=1) -> None
First line of docstring
rest of docstring
"""
def meth(self):
"""meth(FOO, BAR=1) -> BAZ
First line of docstring
rest of docstring
"""
def meth2(self):
"""First line, no signature
Second line followed by indentation::
indented line
"""
@property
def prop1(self):
"""DocstringSig.prop1(self)
First line of docstring
"""
return 123
@property
def prop2(self):
"""First line of docstring
Second line of docstring
"""
return 456
| DocstringSig |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 3626,
"end": 3699
} | class ____(BaseModel):
text: str
type: str = "summary_text"
| Summary |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_bar12.py | {
"start": 315,
"end": 1380
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_bar12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [40293888, 40295424]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__openai-python | src/openai/_utils/_logs.py | {
"start": 895,
"end": 1351
} | class ____(logging.Filter):
@override
def filter(self, record: logging.LogRecord) -> bool:
if is_dict(record.args) and "headers" in record.args and is_dict(record.args["headers"]):
headers = record.args["headers"] = {**record.args["headers"]}
for header in headers:
if str(header).lower() in SENSITIVE_HEADERS:
headers[header] = "<redacted>"
return True
| SensitiveHeadersFilter |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/scoped_css.py | {
"start": 113,
"end": 454
} | class ____(Widget):
DEFAULT_CSS = """
MyWidget {
height: auto;
border: magenta;
}
Label {
border: solid green;
}
"""
def compose(self) -> ComposeResult:
yield Label("foo")
yield Label("bar")
def on_mount(self) -> None:
self.log(self.app.stylesheet.css)
| MyWidget |
python | cython__cython | Cython/Compiler/MatchCaseNodes.py | {
"start": 9666,
"end": 12178
} | class ____(PatternNode):
"""
alternatives list of PatternNodes
"""
child_attrs = PatternNode.child_attrs + ["alternatives"]
def get_first_irrefutable(self):
for alternative in self.alternatives:
if alternative.is_irrefutable():
return alternative
return None
def is_irrefutable(self):
return self.get_first_irrefutable() is not None
def irrefutable_message(self):
return self.get_first_irrefutable().irrefutable_message()
def get_main_pattern_targets(self):
child_targets = None
for alternative in self.alternatives:
alternative_targets = alternative.get_targets()
if child_targets is not None and child_targets != alternative_targets:
error(self.pos, "alternative patterns bind different names")
child_targets = alternative_targets
return child_targets
def validate_irrefutable(self):
super(OrPatternNode, self).validate_irrefutable()
found_irrefutable_case = None
for alternative in self.alternatives:
if found_irrefutable_case:
error(
found_irrefutable_case.pos,
f"{found_irrefutable_case.irrefutable_message()} makes remaining patterns unreachable"
)
break
if alternative.is_irrefutable():
found_irrefutable_case = alternative
alternative.validate_irrefutable()
def is_simple_value_comparison(self):
return all(
# it turns out to be hard to generate correct assignment code
# for or patterns with targets
a.is_simple_value_comparison() and not a.get_targets()
for a in self.alternatives
)
def get_simple_comparison_node(self, subject_node):
assert self.is_simple_value_comparison()
assert len(self.alternatives) >= 2
binop = ExprNodes.BoolBinopNode(
self.pos,
operator="or",
operand1=self.alternatives[0].get_simple_comparison_node(subject_node),
operand2=self.alternatives[1].get_simple_comparison_node(subject_node),
)
for a in self.alternatives[2:]:
binop = ExprNodes.BoolBinopNode(
self.pos,
operator="or",
operand1=binop,
operand2=a.get_simple_comparison_node(subject_node),
)
return binop
| OrPatternNode |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/path_registry.py | {
"start": 11473,
"end": 12502
} | class ____(_CreatesToken):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
__slots__ = ()
inherit_cache = True
path = natural_path = ()
has_entity = False
is_aliased_class = False
is_root = True
is_unnatural = False
def _getitem(
self, entity: Any
) -> Union[_TokenRegistry, _AbstractEntityRegistry]:
if entity in PathToken._intern:
if TYPE_CHECKING:
assert isinstance(entity, _StrPathToken)
return _TokenRegistry(self, PathToken._intern[entity])
else:
try:
return entity._path_registry # type: ignore
except AttributeError:
raise IndexError(
f"invalid argument for RootRegistry.__getitem__: {entity}"
)
def _truncate_recursive(self) -> RootRegistry:
return self
if not TYPE_CHECKING:
__getitem__ = _getitem
PathRegistry.root = RootRegistry()
| RootRegistry |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/tasks.py | {
"start": 16865,
"end": 19949
} | class ____(GoogleCloudBaseOperator):
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueDeleteOperator`
:param location: The location name in which the queue will be deleted.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| CloudTasksQueueDeleteOperator |
python | encode__django-rest-framework | tests/test_relations_hyperlink.py | {
"start": 2128,
"end": 2441
} | class ____(serializers.HyperlinkedModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('url', 'name', 'nullable_source')
# TODO: Add test that .data cannot be accessed prior to .is_valid
@override_settings(ROOT_URLCONF='tests.test_relations_hyperlink')
| NullableOneToOneTargetSerializer |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 214,
"end": 1334
} | class ____:
params = [
[
# from_dtype == to_dtype
("Float64", "Float64"),
("float64[pyarrow]", "float64[pyarrow]"),
# from non-EA to EA
("float64", "Float64"),
("float64", "float64[pyarrow]"),
# from EA to non-EA
("Float64", "float64"),
("float64[pyarrow]", "float64"),
# from EA to EA
("Int64", "Float64"),
("int64[pyarrow]", "float64[pyarrow]"),
],
[False, True],
]
param_names = ["from_to_dtypes", "copy"]
def setup(self, from_to_dtypes, copy):
from_dtype = from_to_dtypes[0]
if from_dtype in ("float64", "Float64", "float64[pyarrow]"):
data = np.random.randn(100, 100)
elif from_dtype in ("int64", "Int64", "int64[pyarrow]"):
data = np.random.randint(0, 1000, (100, 100))
else:
raise NotImplementedError
self.df = DataFrame(data, dtype=from_dtype)
def time_astype(self, from_to_dtypes, copy):
self.df.astype(from_to_dtypes[1], copy=copy)
| AsType |
python | celery__celery | t/unit/worker/test_heartbeat.py | {
"start": 488,
"end": 798
} | class ____:
def call_repeatedly(self, secs, fun, args=(), kwargs={}):
class entry(tuple):
canceled = False
def cancel(self):
self.canceled = True
return entry((secs, fun, args, kwargs))
def cancel(self, entry):
entry.cancel()
| MockTimer |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/ops.py | {
"start": 421,
"end": 4124
} | class ____(Config):
connector_id: str = Field(
description=(
"The Fivetran Connector ID that this op will sync. You can retrieve this "
'value from the "Setup" tab of a given connector in the Fivetran UI.'
),
)
poll_interval: float = Field(
default=DEFAULT_POLL_INTERVAL,
description="The time (in seconds) that will be waited between successive polls.",
)
poll_timeout: Optional[float] = Field(
default=None,
description=(
"The maximum time that will waited before this operation is timed out. By "
"default, this will never time out."
),
)
yield_materializations: bool = Field(
default=True,
description=(
"If True, materializations corresponding to the results of the Fivetran sync will "
"be yielded when the op executes."
),
)
asset_key_prefix: list[str] = Field(
default=["fivetran"],
description=(
"If provided and yield_materializations is True, these components will be used to "
"prefix the generated asset keys."
),
)
@op(
ins={"start_after": In(Nothing)},
out=Out(
FivetranOutput,
description=(
"Parsed json dictionary representing the details of the Fivetran connector after the"
" sync successfully completes. See the [Fivetran API"
" Docs](https://fivetran.com/docs/rest-api/connectors#retrieveconnectordetails) to see"
" detailed information on this response."
),
),
tags={COMPUTE_KIND_TAG: "fivetran"},
)
@deprecated(
breaking_version="0.30",
additional_warn_text=(
"Fivetran ops are no longer best practice and will soon be removed. "
"Use `FivetranWorkspace` resource and `@fivetran_asset` decorator instead."
),
)
def fivetran_sync_op(config: SyncConfig, fivetran: FivetranResource) -> Any:
"""Executes a Fivetran sync for a given ``connector_id``, and polls until that sync
completes, raising an error if it is unsuccessful. It outputs a FivetranOutput which contains
the details of the Fivetran connector after the sync successfully completes, as well as details
about which tables the sync updates.
It requires the use of the :py:class:`~dagster_fivetran.fivetran_resource`, which allows it to
communicate with the Fivetran API.
Examples:
.. code-block:: python
from dagster import job
from dagster_fivetran import fivetran_resource, fivetran_sync_op
my_fivetran_resource = fivetran_resource.configured(
{
"api_key": {"env": "FIVETRAN_API_KEY"},
"api_secret": {"env": "FIVETRAN_API_SECRET"},
}
)
sync_foobar = fivetran_sync_op.configured({"connector_id": "foobar"}, name="sync_foobar")
@job(resource_defs={"fivetran": my_fivetran_resource})
def my_simple_fivetran_job():
sync_foobar()
@job(resource_defs={"fivetran": my_fivetran_resource})
def my_composed_fivetran_job():
final_foobar_state = sync_foobar(start_after=some_op())
other_op(final_foobar_state)
"""
fivetran_output = fivetran.sync_and_poll(
connector_id=config.connector_id,
poll_interval=config.poll_interval,
poll_timeout=config.poll_timeout,
)
if config.yield_materializations:
yield from generate_materializations(
fivetran_output, asset_key_prefix=config.asset_key_prefix
)
yield Output(fivetran_output)
| SyncConfig |
python | pyparsing__pyparsing | tests/test_unit.py | {
"start": 394240,
"end": 394851
} | class ____(Test02_WithoutPackrat):
"""
rerun Test2 tests, now with unbounded left recursion cache
"""
def setUp(self):
ParserElement.enable_left_recursion(force=True)
def tearDown(self):
default_suite_context.restore()
def test000_assert_packrat_status(self):
print("Left-Recursion enabled:", ParserElement._left_recursion_enabled)
self.assertTrue(
ParserElement._left_recursion_enabled, "left recursion not enabled"
)
self.assertIsInstance(ParserElement.recursion_memos, pp.util.UnboundedMemo)
| Test09_WithLeftRecursionParsing |
python | kamyu104__LeetCode-Solutions | Python/check-if-two-expression-trees-are-equivalent.py | {
"start": 1560,
"end": 2571
} | class ____(object):
def checkEquivalence(self, root1, root2):
"""
:type root1: Node
:type root2: Node
:rtype: bool
"""
def add_counter(counter, prev, d, val):
if val.isalpha():
counter[ord(val)-ord('a')] += d if prev[0] == '+' else -d
prev[0] = val
def inorder_traversal(root, cb):
def traverseLeft(node, stk):
while node:
stk.append(node)
node = node.left
stk = []
traverseLeft(root, stk)
while stk:
curr = stk.pop()
cb(curr.val)
traverseLeft(curr.right, stk)
counter = collections.defaultdict(int)
inorder_traversal(root1, functools.partial(add_counter, counter, ['+'], 1))
inorder_traversal(root2, functools.partial(add_counter, counter, ['+'], -1))
return all(v == 0 for v in counter.itervalues())
| Solution2 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position06.py | {
"start": 315,
"end": 935
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"object_position": 4})
worksheet.set_row(8, None, None, {"hidden": True})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ionelmc__pytest-benchmark | src/pytest_benchmark/csv.py | {
"start": 88,
"end": 1564
} | class ____:
def __init__(self, columns, sort, logger):
self.columns = columns
self.sort = sort
self.logger = logger
def render(self, output_file, groups):
output_file = Path(output_file)
output_file.parent.mkdir(exist_ok=True, parents=True)
if not output_file.suffix:
output_file = output_file.with_suffix('.csv')
with output_file.open('w') as stream:
writer = csv.writer(stream)
params = sorted(
{param for group, benchmarks in groups for benchmark in benchmarks for param in benchmark.get('params', {}) or ()}
)
writer.writerow(
[
'name',
]
+ [f'param:{p}' for p in params]
+ self.columns
)
for _, benchmarks in groups:
benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort))
for bench in benchmarks:
row = [bench.get('fullfunc', bench['fullname'])]
bench_params = bench.get('params', {})
bench_params = bench_params if bench_params is not None else {}
row.extend(bench_params.get(param, '') for param in params)
row.extend(bench[prop] for prop in self.columns)
writer.writerow(row)
self.logger.info(f'Generated csv: {output_file}', bold=True)
| CSVResults |
python | pytorch__pytorch | test/inductor/test_codecache.py | {
"start": 67448,
"end": 87604
} | class ____(TestCase):
def setUp(self):
super().setUp()
counters.clear()
PatchCaches.setUp()
CacheArtifactManager.clear()
def tearDown(self):
super().tearDown()
PatchCaches.tearDown()
def reset(self):
AOTAutogradCache.clear()
PyCodeCache.cache_clear(purge=True)
torch._dynamo.reset()
clear_caches()
def capture(self, fn, dynamic=None):
def inner(*args):
gm = None
actual_args = None
kwargs = None
def backend(gm_, args_, **kwargs_):
nonlocal gm
nonlocal actual_args
nonlocal kwargs
gm = gm_
actual_args = args_
kwargs = kwargs_
return gm
_ = torch.compile(fn, fullgraph=True, backend=backend, dynamic=dynamic)(
*args
)
return gm, actual_args, kwargs
return inner
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("format", ("binary", "unpacked"))
@parametrize("dynamic", (False, True))
@parametrize("graph_partition", (False, True))
@parametrize("is_aot", (False, True))
def test_basic(
self,
device: str,
format: str,
dynamic: bool,
graph_partition: bool,
is_aot: bool,
) -> None:
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
# AOT mode does not support unpacked format
if is_aot and format == "unpacked":
raise unittest.SkipTest("AOT mode does not support unpacked format")
mod = torch.nn.Linear(1, 3, device=device)
x = torch.randn(4, 1, device=device)
if dynamic:
torch._dynamo.mark_dynamic(x, 0)
def f(x):
with torch.no_grad():
return mod(x), x.sin()
eager_out = f(x)
with (
tempfile.TemporaryDirectory() as temp_dir,
config.patch(graph_partition=graph_partition),
):
path = (
temp_dir
if format == "unpacked"
else os.path.join(temp_dir, "compiled_artifact.bin")
)
with fresh_cache():
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
compiled_artifact = torch._inductor.standalone_compile(
gm, args, aot=is_aot
)
compiled_artifact.save(path=path, format=format)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
with fresh_cache():
loaded = torch._inductor.CompiledArtifact.load(path=path, format=format)
if dynamic:
concrete_args = [
4 if isinstance(a, torch.SymInt) else a for a in args
]
else:
concrete_args = args
compiled_out = loaded(*concrete_args)
self.assertEqual(eager_out, compiled_out)
if not is_aot:
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("dynamic", (False, True))
@parametrize("is_aot", (False, True))
def test_call_in_backend(self, dynamic: bool, is_aot: bool) -> None:
mod = torch.nn.Linear(1, 3)
x = torch.randn(4, 1)
if dynamic:
torch._dynamo.mark_dynamic(x, 0)
def f(x):
with torch.no_grad():
return mod(x)
eager_out = f(x)
def backend(gm, args, **kwargs):
return torch._inductor.standalone_compile(gm, args, aot=is_aot)
with fresh_cache():
compiled_out = torch.compile(f, fullgraph=True, backend=backend)(x)
self.assertEqual(eager_out, compiled_out)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
def test_save_in_new_path(self) -> None:
mod = torch.nn.Linear(1, 3)
x = torch.randn(4, 1)
def f(x):
with torch.no_grad():
return mod(x)
eager_out = f(x)
with tempfile.TemporaryDirectory() as temp_dir:
path = os.path.join(temp_dir, "new_dir")
with fresh_cache():
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
compiled_artifact = torch._inductor.standalone_compile(gm, args)
compiled_artifact.save(path=path, format="unpacked")
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
with fresh_cache():
loaded = torch._inductor.CompiledArtifact.load(
path=path, format="unpacked"
)
compiled_out = loaded(*args)[0]
self.assertEqual(eager_out, compiled_out)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("device", (GPU_TYPE, "cpu"))
def test_modify_unpacked_file(self, device: str) -> None:
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
x = torch.ones(4, device=device)
def f(x):
with torch.no_grad():
return 2 * x, x.sin()
eager_out = f(x)
with tempfile.TemporaryDirectory() as temp_dir:
with fresh_cache():
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
compiled_artifact = torch._inductor.standalone_compile(gm, args)
compiled_out = compiled_artifact(*args)
self.assertEqual(eager_out, compiled_out)
compiled_artifact.save(path=temp_dir, format="unpacked")
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
with fresh_cache():
# Now modify the output file and expect to see the changes
for subdir in os.listdir(temp_dir):
if subdir in ["aotautograd", "fxgraph"]:
continue
subdir_path = os.path.join(temp_dir, subdir)
for file in os.listdir(subdir_path):
file_path = os.path.join(subdir_path, file)
assert os.path.isfile(file_path)
with open(file_path) as f:
file_contents = f.read()
if device == GPU_TYPE:
file_contents = file_contents.replace(
"tmp1 = 2.0", "tmp1 = 8.0"
)
else:
assert device == "cpu"
file_contents = file_contents.replace(
"auto tmp1 = static_cast<float>(2.0);",
"auto tmp1 = static_cast<float>(8.0);",
)
with open(file_path, "w") as f:
f.write(file_contents)
loaded = torch._inductor.CompiledArtifact.load(
path=temp_dir, format="unpacked"
)
compiled_out = loaded(*args)
self.assertEqual(4 * eager_out[0], compiled_out[0])
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@unittest.skipIf(IS_FBCODE, "torch import error")
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
def test_different_process(self):
x = torch.ones(4, 1)
def f(x):
return x.sin() * 2
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
with tempfile.TemporaryDirectory() as temp_dir:
path = normalize_path_separator(
os.path.join(temp_dir, "compiled_artifact.bin")
)
with fresh_cache():
compiled_artifact = torch._inductor.standalone_compile(gm, args)
compiled_artifact.save(path=path)
script = f"""
import torch
from torch._inductor.utils import fresh_cache
arg = torch.ones(4, 1)
with fresh_cache():
loaded = torch._inductor.CompiledArtifact.load(path="{path}")
compiled_result = loaded(arg)[0]
eager_result = arg.sin() * 2
if not torch.allclose(eager_result, compiled_result, atol=0.1, rtol=0.01):
raise RuntimeError("tensors do not match")
"""
try:
subprocess.check_output(
[sys.executable, "-c", script],
stderr=subprocess.STDOUT,
cwd=os.path.dirname(os.path.realpath(__file__)),
)
except subprocess.CalledProcessError as e:
self.fail(
msg=(
"Subprocess exception while attempting to run test: "
+ e.output.decode("utf-8")
)
)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("is_aot", (False, True))
def test_dynamic_shapes_from_graph(self, is_aot: bool):
def f(x):
return x.shape[0] * x
x = torch.ones(3)
torch._dynamo.mark_dynamic(x, 0)
with fresh_cache():
# captured graph is lambda s0, x: x * s0
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
compiled_artifact = torch._inductor.standalone_compile(
gm, args, dynamic_shapes="from_graph", aot=is_aot
)
x = torch.ones(4)
(result,) = compiled_artifact(4, x)
self.assertEqual(result, x * 4)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@functorch_config.patch({"autograd_cache_normalize_inputs": True})
@parametrize("is_aot", (False, True))
def test_split_module(self, is_aot):
class Mod(torch.nn.Module):
def forward(self, x, a0, a1, b0, b1, c0, c1):
x = x + (a0**2) + (a1 / 2)
x = x + (b0**2) + (b1 / 2)
x = x + (c0**2) + (c1 / 2)
return x
seen = 0
splits = [4, 8]
def split(n):
nonlocal seen
if seen < splits[0]:
seen += 1
return 0
elif seen < splits[1]:
seen += 1
return 1
else:
seen += 1
return 2
def t():
return torch.randn([])
x = t()
a0 = t()
a1 = t()
b0 = t()
b1 = t()
c0 = t()
c1 = t()
example_inputs = (x, a0, a1, b0, b1, c0, c1)
gm, inps, _ = self.capture(Mod())(*example_inputs)
split = torch.fx.passes.split_module.split_module(gm, gm, split)
# Each of the split graphs only has one output.
ca0 = torch._inductor.standalone_compile(
split.submod_0, (a0, x, a1), aot=is_aot
)
ca1 = torch._inductor.standalone_compile(
split.submod_1, (b0, x, b1), aot=is_aot
)
ca2 = torch._inductor.standalone_compile(
split.submod_2, (c0, x, c1), aot=is_aot
)
y = ca0(a0, x, a1)
y = ca1(b0, y, b1)
y = ca2(c0, y, c1)
if not is_aot:
# fx graph cache doesn't run in AOT mode
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 2)
# TODO: split_module causes ca1 and ca2 to have different type annotations
# for the parameter x, so we can only AOTAutogradCache cache hit once instead of twice
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 1)
self.assertEqual(counters["aot_autograd"]["autograd_cache_saved"], 2)
expected = Mod()(*example_inputs)
self.assertEqual(y, expected)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("is_aot", (False, True))
@parametrize("config_patches", [True, False])
def test_dynamic_shapes_from_example_inputs(self, config_patches, is_aot):
def f(x):
return x.shape[0] * x
x = torch.ones(3)
torch._dynamo.mark_dynamic(x, 0)
with fresh_cache():
# captured graph is lambda s0, x: x * s0
gm, args, kwargs = self.capture(f)(x)
assert not kwargs
if config_patches:
config_patches = {"fx_graph_cache": True}
else:
config_patches = None
# specialized on example inputs
compiled_artifact = torch._inductor.standalone_compile(
gm,
(5, torch.ones(4)),
dynamic_shapes="from_example_inputs",
options={"config_patches": config_patches},
aot=is_aot,
)
x = torch.ones(4)
(result,) = compiled_artifact(3, x)
# int 5 was baked in!
self.assertEqual(result, x * 5)
# size 4 was baked in
with self.assertRaisesRegex(AssertionError, "expected size 5==4"):
x = torch.randn(5)
(result,) = compiled_artifact(4, x)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("is_aot", (True, False))
@parametrize("dynamic_shapes", ["from_graph", "from_example_inputs"])
def test_static_shapes(self, dynamic_shapes, is_aot):
def f(x):
return x.shape[0] * x
static_x = torch.randn(3)
with fresh_cache():
# static_gm is lambda x: x * 3
static_gm, args, kwargs = self.capture(f, dynamic=False)(static_x)
assert not kwargs
compiled_artifact = torch._inductor.standalone_compile(
static_gm, [static_x], dynamic_shapes=dynamic_shapes, aot=is_aot
)
x = torch.randn(3)
(result,) = compiled_artifact(x)
self.assertEqual(result, x * 3)
with self.assertRaisesRegex(AssertionError, "expected size 4==3"):
x = torch.randn(4)
(result,) = compiled_artifact(x)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("is_aot", (True, False))
@parametrize("dynamic_shapes", ["from_tracing_context", "from_graph"])
def test_backend(self, dynamic_shapes, is_aot):
def f(x):
return x.shape[0] * x
x = torch.randn(3)
torch._dynamo.mark_dynamic(x, 0)
def backend(gm, args, **kwargs):
compiled_artifact = torch._inductor.standalone_compile(
gm, args, dynamic_shapes=dynamic_shapes, aot=is_aot
)
y = torch.randn(4)
(result,) = compiled_artifact(4, y)
self.assertEqual(result, y * 4)
return compiled_artifact
torch._dynamo.reset()
_ = torch.compile(f, backend=backend)(x)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize("is_aot", (True, False))
def test_backend_dynamic_shapes_from_example_inputs(self, is_aot):
def f(x):
return x.shape[0] * x
x = torch.ones(4)
torch._dynamo.mark_dynamic(x, 0)
def backend(gm, args, **kwargs):
compiled_artifact = torch._inductor.standalone_compile(
gm, [5, torch.ones(4)], dynamic_shapes="from_example_inputs", aot=is_aot
)
y = torch.ones(4)
(result,) = compiled_artifact(4, y)
# 5 was baked in
self.assertEqual(result, y * 5)
# shape of y was baked in
with self.assertRaisesRegex(AssertionError, "expected size 5==4"):
y = torch.ones(5)
(result,) = compiled_artifact(4, y)
return compiled_artifact
torch._dynamo.reset()
_ = torch.compile(f, backend=backend)(x)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@functorch_config.patch({"enable_autograd_cache": True})
@parametrize(
"dynamic_shapes", ["from_tracing_context", "from_graph", "from_example_inputs"]
)
def test_backend_static_shapes(self, dynamic_shapes):
# on static_x, all of these options should produce a static graph,
# but it's a bit hard to tell, so these are just smoke tests.
static_x = torch.randn(3)
def f(x):
return x.shape[0] * x
def backend(gm, args, **kwargs):
return torch._inductor.standalone_compile(
gm, args, dynamic_shapes=dynamic_shapes
)
result = torch.compile(f, backend=backend)(static_x)
self.assertEqual(result, static_x * 3)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_custom_pass_handling(self):
"""
Test that properly-registered custom hooks allow caching.
"""
class TestCustomGraphPass(CustomGraphPass):
def __call__(self, graph: torch.fx.graph.Graph) -> None:
return None
def uuid(self) -> Optional[Union[bytes, str]]:
return "uuid"
def fn(a, b):
return torch.mm(a, b)
a = torch.rand(8, 32, device="cpu")
b = torch.rand(32, 8, device="cpu")
compiled_fn = torch.compile(fn)
# The cache should be bypassed if a custom hook doesn't use CustomGraphPass.
with config.patch({"post_grad_custom_pre_pass": lambda x: x}):
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# With proper usage, we expect normal caching.
custom_pass = TestCustomGraphPass()
with config.patch(
{
"post_grad_custom_pre_pass": custom_pass,
"post_grad_custom_post_pass": custom_pass,
"joint_custom_pre_pass": custom_pass,
"joint_custom_post_pass": custom_pass,
}
):
self.reset()
counters.clear()
# Cache miss
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.reset()
counters.clear()
# Cache hit
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
| TestStandaloneCompile |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 2760,
"end": 2864
} | class ____(AirflowException):
"""Raise when name of the stats is invalid."""
| InvalidStatsNameException |
python | gevent__gevent | src/gevent/pywsgi.py | {
"start": 53298,
"end": 56446
} | class ____(object):
"""
An adapter for :class:`logging.Logger` instances
to let them be used with :class:`WSGIServer`.
.. warning:: Unless the entire process is monkey-patched at a very
early part of the lifecycle (before logging is configured),
loggers are likely to not be gevent-cooperative. For example,
the socket and syslog handlers use the socket module in a way
that can block, and most handlers acquire threading locks.
.. warning:: It *may* be possible for the logging functions to be
called in the :class:`gevent.Hub` greenlet. Code running in the
hub greenlet cannot use any gevent blocking functions without triggering
a ``LoopExit``.
.. versionadded:: 1.1a3
.. versionchanged:: 1.1b6
Attributes not present on this object are proxied to the underlying
logger instance. This permits using custom :class:`~logging.Logger`
subclasses (or indeed, even duck-typed objects).
.. versionchanged:: 1.1
Strip trailing newline characters on the message passed to :meth:`write`
because log handlers will usually add one themselves.
"""
# gevent avoids importing and using logging because importing it and
# creating loggers creates native locks unless monkey-patched.
__slots__ = ('_logger', '_level')
def __init__(self, logger, level=20):
"""
Write information to the *logger* at the given *level* (default to INFO).
"""
self._logger = logger
self._level = level
def write(self, msg):
if msg and msg.endswith('\n'):
msg = msg[:-1]
self._logger.log(self._level, msg)
def flush(self):
"No-op; required to be a file-like object"
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._logger, name)
def __setattr__(self, name, value):
if name not in LoggingLogAdapter.__slots__:
setattr(self._logger, name, value)
else:
object.__setattr__(self, name, value)
def __delattr__(self, name):
delattr(self._logger, name)
####
## Environ classes.
# These subclass dict. They could subclass collections.UserDict on
# 3.3+ and proxy to the underlying real dict to avoid a copy if we
# have to print them (on 2.7 it's slightly more complicated to be an
# instance of collections.MutableMapping; UserDict.UserDict isn't.)
# Then we could have either the WSGIHandler.get_environ or the
# WSGIServer.get_environ return one of these proxies, and
# WSGIHandler.run_application would know to access the `environ.data`
# attribute to be able to pass the *real* dict to the application
# (because PEP3333 requires no subclasses, only actual dict objects;
# wsgiref.validator and webob.Request both enforce this). This has the
# advantage of not being fragile if anybody else tries to print/log
# self.environ (and not requiring a copy). However, if there are any
# subclasses of Handler or Server, this could break if they don't know
# to return this type.
####
| LoggingLogAdapter |
python | run-llama__llama_index | llama-index-core/llama_index/core/schema.py | {
"start": 22105,
"end": 25818
} | class ____(BaseNode):
"""
Provided for backward compatibility.
Note: we keep the field with the typo "seperator" to maintain backward compatibility for
serialized objects.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Make TextNode forward-compatible with Node by supporting 'text_resource' in the constructor."""
if "text_resource" in kwargs:
tr = kwargs.pop("text_resource")
if isinstance(tr, MediaResource):
kwargs["text"] = tr.text
else:
kwargs["text"] = tr["text"]
super().__init__(*args, **kwargs)
text: str = Field(default="", description="Text content of the node.")
mimetype: str = Field(
default="text/plain", description="MIME type of the node content."
)
start_char_idx: Optional[int] = Field(
default=None, description="Start char index of the node."
)
end_char_idx: Optional[int] = Field(
default=None, description="End char index of the node."
)
metadata_seperator: str = Field(
default="\n",
description="Separator between metadata fields when converting to string.",
)
text_template: str = Field(
default=DEFAULT_TEXT_NODE_TMPL,
description=(
"Template for how text is formatted, with {content} and "
"{metadata_str} placeholders."
),
)
@classmethod
def class_name(cls) -> str:
return "TextNode"
@property
def hash(self) -> str:
doc_identity = str(self.text) + str(self.metadata)
return str(sha256(doc_identity.encode("utf-8", "surrogatepass")).hexdigest())
@classmethod
def get_type(cls) -> str:
"""Get Object type."""
return ObjectType.TEXT
def get_content(self, metadata_mode: MetadataMode = MetadataMode.NONE) -> str:
"""Get object content."""
metadata_str = self.get_metadata_str(mode=metadata_mode).strip()
if metadata_mode == MetadataMode.NONE or not metadata_str:
return self.text
return self.text_template.format(
content=self.text, metadata_str=metadata_str
).strip()
def get_metadata_str(self, mode: MetadataMode = MetadataMode.ALL) -> str:
"""Metadata info string."""
if mode == MetadataMode.NONE:
return ""
usable_metadata_keys = set(self.metadata.keys())
if mode == MetadataMode.LLM:
for key in self.excluded_llm_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
elif mode == MetadataMode.EMBED:
for key in self.excluded_embed_metadata_keys:
if key in usable_metadata_keys:
usable_metadata_keys.remove(key)
return self.metadata_seperator.join(
[
self.metadata_template.format(key=key, value=str(value))
for key, value in self.metadata.items()
if key in usable_metadata_keys
]
)
def set_content(self, value: str) -> None:
"""Set the content of the node."""
self.text = value
def get_node_info(self) -> Dict[str, Any]:
"""Get node info."""
return {"start": self.start_char_idx, "end": self.end_char_idx}
def get_text(self) -> str:
return self.get_content(metadata_mode=MetadataMode.NONE)
@property
@deprecated(
version="0.12.2",
reason="'node_info' is deprecated, use 'get_node_info' instead.",
)
def node_info(self) -> Dict[str, Any]:
"""Deprecated: Get node info."""
return self.get_node_info()
| TextNode |
python | huggingface__transformers | utils/modular_model_converter.py | {
"start": 24709,
"end": 35283
} | class ____(CSTVisitor, ABC):
"""An abstract visitor class which analyses a module, creating a mapping of dependencies for classes, functions and assignments.
Class dependencies are computed with `compute_class_dependencies()`, while function and assignment dependencies are stored in
`self.object_recursive_dependency_mapping` (can be computed by `_compute_recursive_object_dependencies()`).
It defines common visiting patterns (i.e. common visit_xxx/leave_xxx functions) between the modular file and the
modeling files that will be visited.
"""
METADATA_DEPENDENCIES = (ParentNodeProvider, PositionProvider)
def __init__(self, python_module: cst.Module):
# fmt: off
self.python_module: cst.Module = python_module # original cst.Module being visited
self.classes: dict[str, cst.ClassDef] = {} # mapping from class names to Nodes (it will be ordered by default!!)
self.imports = [] # stores all import statements
self.functions: dict[str, cst.FunctionDef] = {} # mapping of global scope function names to Nodes
self.object_dependency_mapping = defaultdict(set) # immediate function/assignment dependency mapping (i.e. dependencies immediately in the function/assignment definition)
self.assignments: dict[str, cst.SimpleStatementLine] = {} # mapping of global assignments names to Nodes
self.current_function = None # this keeps track of the current module-scope function
self.current_class = None # this keeps track of the current module-scope class
self.current_assignment = None # this keeps track of the current module-scope assignment
# this keeps track of objects imported from modeling files (`from .configuration import Config`) -> `Config` should not be a dependency
self.objects_imported_from_modeling = set()
# regex pattern joining every possible file type
self.match_patterns = "|".join(ALL_FILE_TYPES)
# fmt: on
def visit_ImportFrom(self, node):
"""This keeps track of objects imported from neighbor modeling files (e.g. in `modeling_xxx.py, we have
`from .configuration_xxx import Config`, then `Config` should be recorded as it is not a dependency that needs
to be added (because it will be part of the imports)"""
# `node.module` is None for fully relative imports, e.g. `from ... import initialization as init`
import_module = self.python_module.code_for_node(node.module) if node.module is not None else ""
import_statement = "." * len(node.relative) + import_module
if re.search(rf"^\.({self.match_patterns}).*", import_statement):
for imported_object in node.names:
# If an alias is present, we record it and not the original name
if imported_object.evaluated_alias is not None:
self.objects_imported_from_modeling.add(imported_object.evaluated_alias)
else:
self.objects_imported_from_modeling.add(imported_object.evaluated_name)
def visit_SimpleStatementLine(self, node):
"""
Global Assigns like `GEMMA_INPUT_DOCSTRING = 'THIS IS THE INPUT'` and all import statements
are extracted and saved in their corresponding dict. They are then used when updating dependency mappings.
"""
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
simple_top_level_assign_structure = m.SimpleStatementLine(
body=[m.Assign(targets=[m.AssignTarget(target=m.Name())])]
)
simple_top_level_variable_indexing = m.SimpleStatementLine(
body=[m.Assign(targets=[m.AssignTarget(target=m.Subscript(value=m.Name()) | m.Attribute(value=m.Name()))])]
)
if m.matches(parent_node, m.Module()):
if m.matches(node, simple_top_level_assign_structure):
left_hand_side = node.body[0].targets[0].target.value
self.current_assignment = left_hand_side
self.assignments[left_hand_side] = node
# This corresponds to a global variable being indexed or having an attribute look-up
elif m.matches(node, simple_top_level_variable_indexing):
indexed_variable = node.body[0].targets[0].target.value.value
# We should follow any dependencies relative to the variable being indexed
self.current_assignment = indexed_variable
# The indexing node should be directly added as a dependency of the indexed variable (register the node with a "fake" name)
node_name = self.python_module.code_for_node(node)
self.assignments[node_name] = node
self.object_dependency_mapping[indexed_variable].add(node_name)
elif m.matches(node, m.SimpleStatementLine(body=[m.Import() | m.ImportFrom()])):
self.imports.append(node)
def leave_SimpleStatementLine(self, node):
# No need to check for the parent here -> everytime we exit one, it should be None anyway independently of where the
# SimpleStatement is located
self.current_assignment = None
def visit_FunctionDef(self, node):
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
if m.matches(parent_node, m.Module()):
self.current_function = node.name.value
self.functions[node.name.value] = node
def leave_FunctionDef(self, node):
parent_node = self.get_metadata(cst.metadata.ParentNodeProvider, node)
if m.matches(parent_node, m.Module()):
self.current_function = None
def visit_If(self, node):
# If we are inside a function, do not add the import to the list of imports
if self.current_function is None and self.current_class is None:
for stmt in node.body.body:
if m.matches(stmt, m.SimpleStatementLine(body=[m.ImportFrom() | m.Import()])):
self.imports.append(node)
def visit_ClassDef(self, node: ClassDef) -> None:
"""Record class nodes to create their dependencies at the end."""
self.classes[node.name.value] = node
self.current_class = node.name.value
def leave_ClassDef(self, node):
self.current_class = None
def visit_Name(self, node: cst.Call):
"""This is used to create a mapping from module-scope functions and assignments to objects used inside them."""
if self.current_function is not None:
self.object_dependency_mapping[self.current_function].add(node.value)
if self.current_assignment is not None:
self.object_dependency_mapping[self.current_assignment].add(node.value)
def leave_Module(self, node):
"""When leaving the module, we store the position of each global scoped node to allow sorting the dependencies
based on their position in the code later. We use the PositionProvider metadata wrapper for this.
We also make sure to update `self.object_dependency_mapping` so that it contains only names recorded in
`self.global_nodes`.
"""
# assign all nodes
self.global_nodes = {**self.assignments, **self.classes, **self.functions}
# now sort the class dependency_mapping based on the position of the nodes
self.start_lines = {}
for id, node in self.global_nodes.items():
self.start_lines[id] = self.get_metadata(cst.metadata.PositionProvider, node).start.line
def _restrict_dependencies_to_known_entities(self):
"""Since we added every Name as part of `self.object_dependency_mapping`, we need to remove those that
are not part of the recorded objects in `self.global_nodes` (i.e. built-in variables, imports, etc).
This should be called only after all merging operations have been finalized!!"""
global_objects = set(self.global_nodes.keys())
for object_name, dependencies in self.object_dependency_mapping.items():
self.object_dependency_mapping[object_name] = {dep for dep in dependencies if dep in global_objects}
def _compute_recursive_object_dependencies(self) -> dict[str, set]:
"""Based on immediate dependency mapping, create the recursive dependency mapping. For example, given the
following file:
```
def foo():
pass
def bar():
foo()
def test():
bar()
```
this visitor can only record immediate dependencies, i.e. it will record the following
`self.object_dependency_mapping = {"test": {"bar"}, "bar": {"foo}}`. This function is used to create
the recursive mapping, i.e. `recursive_dependencies = {"test": {"bar", "foo"}, "bar": {"foo}}`.
"""
recursive_dependencies = {}
for object_name in self.object_dependency_mapping:
all_dependencies = find_all_dependencies(self.object_dependency_mapping, start_entity=object_name)
recursive_dependencies[object_name] = all_dependencies
return recursive_dependencies
def augment_dependencies(self, dependencies: set[str]) -> set[str]:
"""For a set of `dependencies`, augment them by adding all potential dependencies of the **functions** and
**assignments** present in the `dependencies`.
"""
new_dependencies = dependencies.copy()
# Go through the set of dependencies
for dep in tuple(dependencies):
if dep in self.object_recursive_dependency_mapping:
new_dependencies.update(self.object_recursive_dependency_mapping[dep])
return new_dependencies
def compute_class_dependencies(self):
"""For each visited class, find its dependencies based on visiting the current file + potential merged dependencies."""
self.class_dependency_mapping = {}
for class_name, class_node in self.classes.items():
dependencies = dependencies_for_class_node(class_node, set(self.global_nodes.keys()))
# Correctly augment class dependencies with all needed objects
self.class_dependency_mapping[class_name] = self.augment_dependencies(dependencies)
@abstractmethod
def compute_relative_order(self, missing_dependencies: set) -> dict[str, int]:
raise NotImplementedError
| ModuleMapper |
python | keras-team__keras | keras/src/backend/torch/trainer.py | {
"start": 570,
"end": 17859
} | class ____(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
def _should_torch_compile(self):
# require torch>=2.1.0 to enable dynamo since it
# includes many improvements/fixes to torch.compile()
# TODO eventually we want to get rid of this when
# torch is upgraded to >=2.1 (from 2.0.1) in g3
if self.jit_compile and parse(torch.__version__) < parse("2.1.0"):
warnings.warn(
"Please upgrade to torch>=2.1.0 for `jit_compile=True` "
"to take effect. Using `jit_compile=False`"
)
self.jit_compile = False
return self.jit_compile
def train_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
# Compute predictions
if self._call_has_training_arg:
y_pred = self(x, training=True)
else:
y_pred = self(x)
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=True
)
self._loss_tracker.update_state(
loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
if self.optimizer is not None:
loss = self.optimizer.scale_loss(loss)
# Compute gradients
if self.trainable_weights:
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = self.trainable_weights[:]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
else:
warnings.warn("The model does not have any trainable weights.")
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def test_step(self, data):
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False
)
self._loss_tracker.update_state(
loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single training step on a batch of data."""
data = data[0]
return self.train_step(data)
if self._should_torch_compile():
self.train_function = torch.compile(one_step_on_data)
else:
self.train_function = one_step_on_data
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.test_step(data)
if self._should_torch_compile():
self.test_function = torch.compile(one_step_on_data)
else:
self.test_function = one_step_on_data
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a predict test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.predict_step(data)
if self._should_torch_compile():
self.predict_function = torch.compile(one_step_on_data)
else:
self.predict_function = one_step_on_data
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
if not self.compiled:
raise ValueError(
"You must call `compile()` before calling `fit()`."
)
# Possibly cap epochs for debugging runs.
max_epochs = config.max_epochs()
if max_epochs and max_epochs < epochs:
warnings.warn("Limiting epochs to %d" % max_epochs)
epochs = max_epochs
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
# TODO: Support torch tensors for validation data.
(
(x, y, sample_weight),
validation_data,
) = array_slicing.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.stop_training = False
training_logs = {}
self.make_train_function()
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
# Switch the torch Module to training mode. Inform torch layers to
# do training behavior in case the user did not use `self.training`
# when implementing a custom layer with torch layers.
self.train()
logs = {}
for begin_step, end_step, data in epoch_iterator:
# Callbacks
callbacks.on_train_batch_begin(begin_step)
logs = self.train_function(data)
# Callbacks
callbacks.on_train_batch_end(end_step, logs)
if self.stop_training:
break
# Override with model metrics instead of last step logs if needed.
epoch_logs = dict(self._get_metrics_result_or_logs(logs))
# Switch the torch Module back to testing mode.
self.eval()
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create TorchEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = TorchEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
f"val_{name}": val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
# Switch the torch Module back to testing mode.
self.eval()
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
for begin_step, end_step, data in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
logs = self.test_function(data)
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = TorchEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
# Switch the torch Module back to testing mode.
self.eval()
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for begin_step, end_step, data in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(end_step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
outputs = tree.map_structure(backend.convert_to_numpy, outputs)
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_train_function()
logs = self.train_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_test_function()
logs = self.test_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
| TorchTrainer |
python | PyCQA__pylint | tests/functional/u/unsubscriptable_value.py | {
"start": 2158,
"end": 2405
} | class ____:
def __init__(self):
self.ala = {i for i in range(10)}
self.bala = [i for i in range(10)]
self.portocala = None
def test_unsubscriptable(self):
self.bala[0]
self.portocala[0]
| AbstractClass |
python | wandb__wandb | wandb/sdk/internal/tb_watcher.py | {
"start": 12513,
"end": 16652
} | class ____:
"""Consume tfevents from a priority queue.
There should always only be one of these per run_manager. We wait for 10 seconds of
queued events to reduce the chance of multiple tfevent files triggering out of order
steps.
"""
def __init__(
self,
tbwatcher: TBWatcher,
queue: "PriorityQueue",
run_proto: "RunRecord",
settings: "SettingsStatic",
delay: int = 10,
) -> None:
self._tbwatcher = tbwatcher
self._queue = queue
self._thread = threading.Thread(target=self._thread_except_body)
self._shutdown = threading.Event()
self.tb_history = TBHistory()
self._delay = delay
# This is a bit of a hack to get file saving to work as it does in the user
# process. Since we don't have a real run object, we have to define the
# datatypes callback ourselves.
def datatypes_cb(fname: filesystem.GlobStr) -> None:
files: FilesDict = dict(files=[(fname, "now")])
self._tbwatcher._interface.publish_files(files)
# this is only used for logging artifacts
self._internal_run = internal_run.InternalRun(run_proto, settings, datatypes_cb)
self._internal_run._set_internal_run_interface(self._tbwatcher._interface)
def start(self) -> None:
self._start_time = time.time()
self._thread.start()
def finish(self) -> None:
self._delay = 0
self._shutdown.set()
self._thread.join()
while not self._queue.empty():
event = self._queue.get(True, 1)
if event:
self._handle_event(event, history=self.tb_history)
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(
item,
)
def _thread_except_body(self) -> None:
try:
self._thread_body()
except Exception:
logger.exception("generic exception in TBEventConsumer thread")
raise
def _thread_body(self) -> None:
while True:
try:
event = self._queue.get(True, 1)
# Wait self._delay seconds from consumer start before logging events
if (
time.time() < self._start_time + self._delay
and not self._shutdown.is_set()
):
self._queue.put(event)
time.sleep(0.1)
continue
except queue.Empty:
event = None
if self._shutdown.is_set():
break
if event:
self._handle_event(event, history=self.tb_history)
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(
item,
)
# flush uncommitted data
self.tb_history._flush()
items = self.tb_history._get_and_reset()
for item in items:
self._save_row(item)
def _handle_event(
self, event: "ProtoEvent", history: Optional["TBHistory"] = None
) -> None:
wandb.tensorboard._log( # type: ignore
event.event,
step=event.event.step,
namespace=event.namespace,
history=history,
)
def _save_row(self, row: "HistoryDict") -> None:
chart_keys = set()
for k, v in row.items():
if isinstance(v, CustomChart):
chart_keys.add(k)
v.set_key(k)
self._tbwatcher._interface.publish_config(
key=v.spec.config_key,
val=v.spec.config_value,
)
for k in chart_keys:
chart = row.pop(k)
if isinstance(chart, CustomChart):
row[chart.spec.table_key] = chart.table
self._tbwatcher._interface.publish_history(
self._internal_run,
row,
publish_step=False,
)
| TBEventConsumer |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 37774,
"end": 40506
} | class ____:
def test_csv_output(self, helper_table: PrettyTable) -> None:
assert helper_table.get_csv_string(delimiter="\t", header=False) == (
"1\tvalue 1\tvalue2\tvalue3\r\n"
"4\tvalue 4\tvalue5\tvalue6\r\n"
"7\tvalue 7\tvalue8\tvalue9\r\n"
)
assert helper_table.get_csv_string() == (
",Field 1,Field 2,Field 3\r\n"
"1,value 1,value2,value3\r\n"
"4,value 4,value5,value6\r\n"
"7,value 7,value8,value9\r\n"
)
options = {"fields": ["Field 1", "Field 3"]}
assert helper_table.get_csv_string(**options) == (
"Field 1,Field 3\r\n"
"value 1,value3\r\n"
"value 4,value6\r\n"
"value 7,value9\r\n"
)
def test_paginate(city_data: PrettyTable) -> None:
expected_page_1 = """
+-----------+------+------------+-----------------+
| City name | Area | Population | Annual Rainfall |
+-----------+------+------------+-----------------+
| Adelaide | 1295 | 1158259 | 600.5 |
| Brisbane | 5905 | 1857594 | 1146.4 |
| Darwin | 112 | 120900 | 1714.7 |
| Hobart | 1357 | 205556 | 619.5 |
+-----------+------+------------+-----------------+""".strip()
expected_page_2 = """
+-----------+------+------------+-----------------+
| City name | Area | Population | Annual Rainfall |
+-----------+------+------------+-----------------+
| Sydney | 2058 | 4336374 | 1214.8 |
| Melbourne | 1566 | 3806092 | 646.9 |
| Perth | 5386 | 1554769 | 869.4 |
+-----------+------+------------+-----------------+""".strip()
paginated = city_data.paginate(page_length=4).strip()
assert paginated.startswith(expected_page_1)
assert "\f" in paginated
assert paginated.endswith(expected_page_2)
paginated = city_data.paginate(page_length=4, line_break="\n")
assert "\f" not in paginated
assert "\n" in paginated
def test_autoindex(city_data: PrettyTable) -> None:
"""Testing that a table with a custom index row is
equal to the one produced by the function
.add_autoindex()
"""
city_data.field_names = CITY_DATA_HEADER
city_data.add_autoindex(fieldname="Test")
table2 = PrettyTable()
table2.field_names = ["Test"] + CITY_DATA_HEADER
for idx, row in enumerate(CITY_DATA):
table2.add_row([idx + 1] + row)
assert str(city_data) == str(table2)
@pytest.fixture(scope="function")
def unpadded_pt() -> PrettyTable:
table = PrettyTable(header=False, padding_width=0)
table.add_row(list("abc"))
table.add_row(list("def"))
table.add_row(list("g.."))
return table
| TestCsvOutput |
python | chroma-core__chroma | chromadb/test/property/test_filtering.py | {
"start": 5318,
"end": 28351
} | class ____(WhereExpr):
"""
Wraps old-style where/where_document dicts for testing.
Converts where_document to use #document field and combines with where using $and.
"""
def __init__(self, where: Optional[Where] = None, where_document: Optional[WhereDocument] = None):
self.where = where
self.where_document = where_document
def _convert_where_document(self, where_doc: WhereDocument) -> Dict[str, Any]:
"""Convert where_document filters to use #document field."""
if not where_doc:
return {}
# Handle logical operators recursively
if "$and" in where_doc:
and_clauses = where_doc["$and"]
if isinstance(and_clauses, list):
return {"$and": [self._convert_where_document(clause) for clause in and_clauses]}
elif "$or" in where_doc:
or_clauses = where_doc["$or"]
if isinstance(or_clauses, list):
return {"$or": [self._convert_where_document(clause) for clause in or_clauses]}
# Handle document operators - convert to #document field
if "$contains" in where_doc:
return {"#document": {"$contains": where_doc["$contains"]}}
elif "$not_contains" in where_doc:
return {"#document": {"$not_contains": where_doc["$not_contains"]}}
if "$regex" in where_doc:
return {"#document": {"$regex": where_doc["$regex"]}}
elif "$not_regex" in where_doc:
return {"#document": {"$not_regex": where_doc["$not_regex"]}}
# Cast to dict for return
return cast(Dict[str, Any], where_doc)
def to_dict(self) -> Dict[str, Any]:
# Combine where and where_document into a single where clause
combined_where = None
# Build list of conditions to AND together
conditions = []
if self.where:
conditions.append(self.where)
if self.where_document:
# Convert where_document to use #document field
converted_doc_filter = self._convert_where_document(self.where_document)
if converted_doc_filter:
conditions.append(converted_doc_filter)
# Combine conditions with $and if needed
if len(conditions) == 1:
combined_where = conditions[0]
elif len(conditions) > 1:
combined_where = {"$and": conditions}
# Return the combined where clause directly
if combined_where:
return combined_where
return {}
def _search_with_filter(
collection: Collection,
filter: strategies.Filter,
query_embedding: Optional[Embedding] = None,
n_results: int = 10
) -> List[str]:
"""Use the search API to retrieve results with filters - test helper function."""
# Build Search object
search = Search()
# Add KNN if embedding provided
if query_embedding is not None:
search = search.rank(Knn(query=query_embedding)) # type: ignore[arg-type]
# Add filters using the LegacyWhereWrapper
if filter.get("where") or filter.get("where_document") or filter.get("ids"):
# Convert ids to list if it's a string
ids_val = filter.get("ids")
if isinstance(ids_val, str):
ids_val = [ids_val]
# Build the where clause
where_expr = None
# Add legacy where/where_document if present
if filter.get("where") or filter.get("where_document"):
wrapper = LegacyWhereWrapper(
where=filter.get("where"),
where_document=filter.get("where_document"),
)
if wrapper.to_dict(): # Only use if it has content
where_expr = wrapper
# Add ID filter if present
if ids_val:
id_expr = Key.ID.is_in(ids_val)
if where_expr:
where_expr = where_expr & id_expr # type: ignore[assignment]
else:
where_expr = id_expr
# Apply the where clause if we have one
if where_expr:
search = search.where(where_expr)
# Set limit and select only IDs
search = search.limit(n_results).select("id")
# Execute search and return IDs
result = collection.search(search)
return result["ids"][0] if result["ids"] else []
collection_st = st.shared(
strategies.collections(add_filterable_data=True, with_hnsw_params=True),
key="coll",
)
recordset_st = st.shared(
strategies.recordsets(collection_st, max_size=1000), key="recordset"
)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
) # type: ignore
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(strategies.filters(collection_st, recordset_st), min_size=1),
should_compact=st.booleans(),
)
def test_filterable_metadata_get(
caplog,
client: ClientAPI,
collection: strategies.Collection,
record_set,
filters,
should_compact: bool,
) -> None:
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
coll.add(**record_set)
if not NOT_CLUSTER_ONLY:
# Only wait for compaction if the size of the collection is
# some minimal size
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
# Wait for the model to be updated
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
for filter in filters:
result_ids = coll.get(**filter)["ids"]
expected_ids = _filter_embedding_set(record_set, filter)
assert sorted(result_ids) == sorted(expected_ids)
@pytest.mark.skipif(
NOT_CLUSTER_ONLY,
reason="Search API only available in distributed mode"
)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
) # type: ignore
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(strategies.filters(collection_st, recordset_st), min_size=1),
should_compact=st.booleans(),
)
def test_filterable_metadata_search(
caplog,
client: ClientAPI,
collection: strategies.Collection,
record_set,
filters,
should_compact: bool,
) -> None:
"""Test metadata filtering using search API endpoint."""
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
coll.add(**record_set)
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
for filter in filters:
# Use search API instead of get
result_ids = _search_with_filter(coll, filter, n_results=1000)
expected_ids = _filter_embedding_set(record_set, filter)
assert sorted(result_ids) == sorted(expected_ids)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
) # type: ignore
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(strategies.filters(collection_st, recordset_st), min_size=1),
limit=st.integers(min_value=1, max_value=10),
offset=st.integers(min_value=0, max_value=10),
should_compact=st.booleans(),
)
# Repro of a former off-by-one error in distributed Chroma. Fixed in https://github.com/chroma-core/chroma/pull/3489.
@example(
collection=strategies.Collection(
name="test",
metadata={"test": "test"},
embedding_function=None,
id=uuid.uuid4(),
dimension=2,
dtype="float32",
known_metadata_keys={},
known_document_keywords=[],
),
record_set=strategies.RecordSet(
ids=[str(i) for i in range(11)],
embeddings=[np.random.rand(2).tolist() for _ in range(11)],
metadatas=[{"test": "test"} for _ in range(11)],
documents=None,
),
filters=[
strategies.Filter(
{
"where_document": {"$not_contains": "foo"},
"ids": None,
"where": None,
}
)
],
limit=10,
offset=10,
should_compact=True,
)
def test_filterable_metadata_get_limit_offset(
caplog,
client: ClientAPI,
collection: strategies.Collection,
record_set,
filters,
limit,
offset,
should_compact: bool,
) -> None:
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
coll.add(**record_set)
if not NOT_CLUSTER_ONLY:
# Only wait for compaction if the size of the collection is
# some minimal size
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
# Wait for the model to be updated
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
for filter in filters:
# add limit and offset to filter
filter["limit"] = limit
filter["offset"] = offset
result_ids = coll.get(**filter)["ids"]
expected_ids = _filter_embedding_set(record_set, filter)
if len(expected_ids) > 0:
collection_ids = coll.get(ids=expected_ids)["ids"]
offset_id_order = {id: index for index, id in enumerate(collection_ids)}
assert (
result_ids
== sorted(expected_ids, key=lambda id: offset_id_order[id])[
offset : offset + limit
]
)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
)
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(
strategies.filters(collection_st, recordset_st, include_all_ids=True),
min_size=1,
),
should_compact=st.booleans(),
data=st.data(),
)
def test_filterable_metadata_query(
caplog: pytest.LogCaptureFixture,
client: ClientAPI,
collection: strategies.Collection,
record_set: strategies.RecordSet,
filters: List[strategies.Filter],
should_compact: bool,
data: st.DataObject,
) -> None:
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
normalized_record_set = invariants.wrap_all(record_set)
coll.add(**record_set) # type: ignore[arg-type]
if not NOT_CLUSTER_ONLY:
# Only wait for compaction if the size of the collection is
# some minimal size
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
# Wait for the model to be updated
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
total_count = len(normalized_record_set["ids"])
# Pick a random vector using Hypothesis data
random_query: Embedding
query_index = data.draw(st.integers(min_value=0, max_value=total_count - 1))
if collection.has_embeddings:
assert normalized_record_set["embeddings"] is not None
assert all(isinstance(e, list) for e in normalized_record_set["embeddings"])
# Use data.draw to select index
random_query = normalized_record_set["embeddings"][query_index]
else:
assert isinstance(normalized_record_set["documents"], list)
assert collection.embedding_function is not None
# Use data.draw to select index
random_query = collection.embedding_function(
[normalized_record_set["documents"][query_index]]
)[0]
for filter in filters:
result_ids = set(
coll.query(
query_embeddings=random_query,
n_results=total_count,
where=filter["where"],
where_document=filter["where_document"],
)["ids"][0]
)
expected_ids = set(
_filter_embedding_set(
cast(strategies.RecordSet, normalized_record_set), filter
)
)
assert len(result_ids.intersection(expected_ids)) == len(result_ids)
@pytest.mark.skipif(
NOT_CLUSTER_ONLY,
reason="Search API only available in distributed mode"
)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
HealthCheck.filter_too_much,
],
)
@given(
collection=collection_st,
record_set=recordset_st,
filters=st.lists(
strategies.filters(collection_st, recordset_st, include_all_ids=True),
min_size=1,
),
should_compact=st.booleans(),
data=st.data(),
)
def test_filterable_metadata_query_via_search(
caplog: pytest.LogCaptureFixture,
client: ClientAPI,
collection: strategies.Collection,
record_set: strategies.RecordSet,
filters: List[strategies.Filter],
should_compact: bool,
data: st.DataObject,
) -> None:
"""Test query-like filtering using search API endpoint."""
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
normalized_record_set = invariants.wrap_all(record_set)
coll.add(**record_set) # type: ignore[arg-type]
if should_compact and len(invariants.wrap(record_set["ids"])) > 10:
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
total_count = len(normalized_record_set["ids"])
# Pick a random query embedding
query_index = data.draw(st.integers(min_value=0, max_value=total_count - 1))
if collection.has_embeddings:
assert normalized_record_set["embeddings"] is not None
random_query = normalized_record_set["embeddings"][query_index]
else:
assert isinstance(normalized_record_set["documents"], list)
assert collection.embedding_function is not None
random_query = collection.embedding_function(
[normalized_record_set["documents"][query_index]]
)[0]
for filter in filters:
# Use search API with query embedding
result_ids = set(_search_with_filter(
coll,
filter,
query_embedding=random_query,
n_results=total_count
))
expected_ids = set(
_filter_embedding_set(
cast(strategies.RecordSet, normalized_record_set), filter
)
)
assert len(result_ids.intersection(expected_ids)) == len(result_ids)
def test_empty_filter(client: ClientAPI) -> None:
"""Test that a filter where no document matches returns an empty result"""
reset(client)
coll = client.create_collection(name="test")
test_ids: IDs = ["1", "2", "3"]
test_embeddings: Embeddings = [np.array([1, 1]), np.array([2, 2]), np.array([3, 3])]
test_query_embedding: Embedding = np.array([1, 2])
test_query_embeddings: Embeddings = [test_query_embedding, test_query_embedding]
coll.add(ids=test_ids, embeddings=test_embeddings)
res = coll.query(
query_embeddings=test_query_embedding,
where={"q": {"$eq": 4}}, # type: ignore[dict-item]
n_results=3,
include=["embeddings", "distances", "metadatas"],
)
assert res["ids"] == [[]]
if res["embeddings"] is not None:
assert cast(np.ndarray, res["embeddings"][0]).size == 0 # type: ignore
assert res["distances"] == [[]]
assert res["metadatas"] == [[]]
assert set(res["included"]) == set(["embeddings", "distances", "metadatas"])
res = coll.query(
query_embeddings=test_query_embeddings,
where={"test": "yes"},
n_results=3,
)
assert res["ids"] == [[], []]
assert res["embeddings"] is None
assert res["distances"] == [[], []]
assert res["metadatas"] == [[], []]
assert set(res["included"]) == set(["metadatas", "documents", "distances"])
def test_boolean_metadata(client: ClientAPI) -> None:
"""Test that metadata with boolean values is correctly filtered"""
reset(client)
coll = client.create_collection(name="test")
test_ids: IDs = ["1", "2", "3"]
test_embeddings: Embeddings = [np.array([1, 1]), np.array([2, 2]), np.array([3, 3])]
test_metadatas: Metadatas = [{"test": True}, {"test": False}, {"test": True}]
coll.add(ids=test_ids, embeddings=test_embeddings, metadatas=test_metadatas)
res = coll.get(where={"test": True})
assert res["ids"] == ["1", "3"]
def test_get_empty(client: ClientAPI) -> None:
"""Tests that calling get() with empty filters returns nothing"""
reset(client)
coll = client.create_collection(name="test")
test_ids: IDs = ["1", "2", "3"]
test_embeddings: Embeddings = [np.array([1, 1]), np.array([2, 2]), np.array([3, 3])]
test_metadatas: Metadatas = [{"test": 10}, {"test": 20}, {"test": 30}]
def check_empty_res(res: GetResult) -> None:
assert len(res["ids"]) == 0
assert res["embeddings"] is not None
assert len(res["embeddings"]) == 0
assert res["documents"] is not None
assert len(res["documents"]) == 0
assert res["metadatas"] is not None
coll.add(ids=test_ids, embeddings=test_embeddings, metadatas=test_metadatas)
res = coll.get(ids=["nope"], include=["embeddings", "metadatas", "documents"])
check_empty_res(res)
res = coll.get(
include=["embeddings", "metadatas", "documents"], where={"test": 100}
)
check_empty_res(res)
@settings(
deadline=90000,
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
],
)
@given(
collection=collection_st,
record_set=recordset_st,
n_results_st=st.integers(min_value=1, max_value=100),
should_compact=st.booleans(),
data=st.data(),
)
def test_query_ids_filter_property(
caplog: pytest.LogCaptureFixture,
client: ClientAPI,
collection: strategies.Collection,
record_set: strategies.RecordSet,
n_results_st: int,
should_compact: bool,
data: st.DataObject,
) -> None:
"""Property test for querying with only the ids filter."""
if (
client.get_settings().chroma_api_impl
== "chromadb.api.async_fastapi.AsyncFastAPI"
):
pytest.skip(
"Skipping test for async client due to potential resource/timeout issues"
)
caplog.set_level(logging.ERROR)
reset(client)
coll = client.create_collection(
name=collection.name,
metadata=collection.metadata, # type: ignore
embedding_function=collection.embedding_function,
)
initial_version = coll.get_model()["version"]
normalized_record_set = invariants.wrap_all(record_set)
if len(normalized_record_set["ids"]) == 0:
# Cannot add empty record set
return
coll.add(**record_set) # type: ignore[arg-type]
if not NOT_CLUSTER_ONLY:
if should_compact and len(normalized_record_set["ids"]) > 10:
wait_for_version_increase(client, collection.name, initial_version) # type: ignore
total_count = len(normalized_record_set["ids"])
n_results = min(n_results_st, total_count)
# Generate a random subset of ids to filter on using Hypothesis data
ids_to_query = data.draw(
st.lists(
st.sampled_from(normalized_record_set["ids"]),
min_size=0,
max_size=total_count,
unique=True,
)
)
# Pick a random query vector using Hypothesis data
random_query: Embedding
query_index = data.draw(st.integers(min_value=0, max_value=total_count - 1))
if collection.has_embeddings:
assert normalized_record_set["embeddings"] is not None
assert all(isinstance(e, list) for e in normalized_record_set["embeddings"])
# Use data.draw to select index
random_query = normalized_record_set["embeddings"][query_index]
else:
assert isinstance(normalized_record_set["documents"], list)
assert collection.embedding_function is not None
# Use data.draw to select index
random_query = collection.embedding_function(
[normalized_record_set["documents"][query_index]]
)[0]
# Perform the query with only the ids filter
result = coll.query(
query_embeddings=[random_query],
ids=ids_to_query,
n_results=n_results,
)
result_ids = set(result["ids"][0])
filter_ids_set = set(ids_to_query)
# The core assertion: all returned IDs must be within the filter set
assert result_ids.issubset(filter_ids_set)
# Also check that the number of results is reasonable
assert len(result_ids) <= n_results
assert len(result_ids) <= len(filter_ids_set)
def test_regex(client: ClientAPI) -> None:
"""Tests that regex works"""
reset(client)
coll = client.create_collection(name="test")
test_ids: IDs = ["1", "2", "3"]
test_documents: Documents = ["cat", "Cat", "CAT"]
test_embeddings: Embeddings = [np.array([1, 1]), np.array([2, 2]), np.array([3, 3])]
test_metadatas: Metadatas = [{"test": 10}, {"test": 20}, {"test": 30}]
coll.add(
ids=test_ids,
documents=test_documents,
embeddings=test_embeddings,
metadatas=test_metadatas,
)
res = coll.get(where_document={"$regex": "cat"})
assert res["ids"] == ["1"]
res = coll.get(where_document={"$regex": "(?i)cat"})
assert sorted(res["ids"]) == ["1", "2", "3"]
res = coll.get(
where={"test": {"$ne": 10}}, where_document={"$regex": "(?i)c(?-i)at"} # type: ignore[dict-item]
)
assert res["ids"] == ["2"]
| LegacyWhereWrapper |
python | wandb__wandb | wandb/vendor/pygments/lexers/jvm.py | {
"start": 812,
"end": 3724
} | class ____(RegexLexer):
"""
For `Java <http://www.sun.com/java/>`_ source code.
"""
name = 'Java'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
# keywords: go before method names to avoid lexing "throw new XYZ"
# as a method signature
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
# method names
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'@[^\W\d][\w.]*', Name.Decorator),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import(?:\s+static)?)(\s+)', bygroups(Keyword.Namespace, Text),
'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)((?:[^\W\d]|\$)[\w$]*)', bygroups(Operator, Name.Attribute)),
(r'^\s*([^\W\d]|\$)[\w$]*:', Name.Label),
(r'([^\W\d]|\$)[\w$]*', Name),
(r'([0-9][0-9_]*\.([0-9][0-9_]*)?|'
r'\.[0-9][0-9_]*)'
r'([eE][+\-]?[0-9][0-9_]*)?[fFdD]?|'
r'[0-9][eE][+\-]?[0-9][0-9_]*[fFdD]?|'
r'[0-9]([eE][+\-]?[0-9][0-9_]*)?[fFdD]|'
r'0[xX]([0-9a-fA-F][0-9a-fA-F_]*\.?|'
r'([0-9a-fA-F][0-9a-fA-F_]*)?\.[0-9a-fA-F][0-9a-fA-F_]*)'
r'[pP][+\-]?[0-9][0-9_]*[fFdD]?', Number.Float),
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*[lL]?', Number.Hex),
(r'0[bB][01][01_]*[lL]?', Number.Bin),
(r'0[0-7_]+[lL]?', Number.Oct),
(r'0|[1-9][0-9_]*[lL]?', Number.Integer),
(r'[~^*!%&\[\](){}<>|+=:;,./?-]', Operator),
(r'\n', Text)
],
'class': [
(r'([^\W\d]|\$)[\w$]*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
| JavaLexer |
python | django__django | tests/auth_tests/test_tokens.py | {
"start": 569,
"end": 7569
} | class ____(TestCase):
def test_make_token(self):
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertIs(p0.check_token(user, tk1), True)
def test_10265(self):
"""
The token generated for a user created in the same request
will work correctly.
"""
user = User.objects.create_user("comebackkid", "test3@example.com", "testpw")
user_reload = User.objects.get(username="comebackkid")
p0 = MockedPasswordResetTokenGenerator(datetime.now())
tk1 = p0.make_token(user)
tk2 = p0.make_token(user_reload)
self.assertEqual(tk1, tk2)
def test_token_with_different_email(self):
"""Updating the user email address invalidates the token."""
tests = [
(CustomEmailField, None),
(CustomEmailField, "test4@example.com"),
(User, "test4@example.com"),
]
for model, email in tests:
with self.subTest(model=model.__qualname__, email=email):
user = model.objects.create_user(
"changeemailuser",
email=email,
password="testpw",
)
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertIs(p0.check_token(user, tk1), True)
setattr(user, user.get_email_field_name(), "test4new@example.com")
user.save()
self.assertIs(p0.check_token(user, tk1), False)
def test_timeout(self):
"""The token is valid after n seconds, but no greater."""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'now'.
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
now = datetime.now()
p0 = MockedPasswordResetTokenGenerator(now)
tk1 = p0.make_token(user)
p1 = MockedPasswordResetTokenGenerator(
now + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT)
)
self.assertIs(p1.check_token(user, tk1), True)
p2 = MockedPasswordResetTokenGenerator(
now + timedelta(seconds=(settings.PASSWORD_RESET_TIMEOUT + 1))
)
self.assertIs(p2.check_token(user, tk1), False)
with self.settings(PASSWORD_RESET_TIMEOUT=60 * 60):
p3 = MockedPasswordResetTokenGenerator(
now + timedelta(seconds=settings.PASSWORD_RESET_TIMEOUT)
)
self.assertIs(p3.check_token(user, tk1), True)
p4 = MockedPasswordResetTokenGenerator(
now + timedelta(seconds=(settings.PASSWORD_RESET_TIMEOUT + 1))
)
self.assertIs(p4.check_token(user, tk1), False)
def test_check_token_with_nonexistent_token_and_user(self):
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertIs(p0.check_token(None, tk1), False)
self.assertIs(p0.check_token(user, None), False)
def test_token_with_different_secret(self):
"""
A valid token can be created with a secret other than SECRET_KEY by
using the PasswordResetTokenGenerator.secret attribute.
"""
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
new_secret = "abcdefghijkl"
# Create and check a token with a different secret.
p0 = PasswordResetTokenGenerator()
p0.secret = new_secret
tk0 = p0.make_token(user)
self.assertIs(p0.check_token(user, tk0), True)
# Create and check a token with the default secret.
p1 = PasswordResetTokenGenerator()
self.assertEqual(p1.secret, settings.SECRET_KEY)
self.assertNotEqual(p1.secret, new_secret)
tk1 = p1.make_token(user)
# Tokens created with a different secret don't validate.
self.assertIs(p0.check_token(user, tk1), False)
self.assertIs(p1.check_token(user, tk0), False)
def test_token_with_different_secret_subclass(self):
class CustomPasswordResetTokenGenerator(PasswordResetTokenGenerator):
secret = "test-secret"
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
custom_password_generator = CustomPasswordResetTokenGenerator()
tk_custom = custom_password_generator.make_token(user)
self.assertIs(custom_password_generator.check_token(user, tk_custom), True)
default_password_generator = PasswordResetTokenGenerator()
self.assertNotEqual(
custom_password_generator.secret,
default_password_generator.secret,
)
self.assertEqual(default_password_generator.secret, settings.SECRET_KEY)
# Tokens created with a different secret don't validate.
tk_default = default_password_generator.make_token(user)
self.assertIs(custom_password_generator.check_token(user, tk_default), False)
self.assertIs(default_password_generator.check_token(user, tk_custom), False)
@override_settings(SECRET_KEY="")
def test_secret_lazy_validation(self):
default_token_generator = PasswordResetTokenGenerator()
msg = "The SECRET_KEY setting must not be empty."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
default_token_generator.secret
def test_check_token_secret_fallbacks(self):
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
p1 = PasswordResetTokenGenerator()
p1.secret = "secret"
tk = p1.make_token(user)
p2 = PasswordResetTokenGenerator()
p2.secret = "newsecret"
p2.secret_fallbacks = ["secret"]
self.assertIs(p1.check_token(user, tk), True)
self.assertIs(p2.check_token(user, tk), True)
@override_settings(
SECRET_KEY="secret",
SECRET_KEY_FALLBACKS=["oldsecret"],
)
def test_check_token_secret_key_fallbacks(self):
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
p1 = PasswordResetTokenGenerator()
p1.secret = "oldsecret"
tk = p1.make_token(user)
p2 = PasswordResetTokenGenerator()
self.assertIs(p2.check_token(user, tk), True)
@override_settings(
SECRET_KEY="secret",
SECRET_KEY_FALLBACKS=["oldsecret"],
)
def test_check_token_secret_key_fallbacks_override(self):
user = User.objects.create_user("tokentestuser", "test2@example.com", "testpw")
p1 = PasswordResetTokenGenerator()
p1.secret = "oldsecret"
tk = p1.make_token(user)
p2 = PasswordResetTokenGenerator()
p2.secret_fallbacks = []
self.assertIs(p2.check_token(user, tk), False)
| TokenGeneratorTest |
python | python__mypy | mypyc/irbuild/classdef.py | {
"start": 7287,
"end": 7968
} | class ____:
"""Create IR for a class definition.
This is an abstract base class.
"""
def __init__(self, builder: IRBuilder, cdef: ClassDef) -> None:
self.builder = builder
self.cdef = cdef
self.attrs_to_cache: list[tuple[Lvalue, RType]] = []
@abstractmethod
def add_method(self, fdef: FuncDef) -> None:
"""Add a method to the class IR"""
@abstractmethod
def add_attr(self, lvalue: NameExpr, stmt: AssignmentStmt) -> None:
"""Add an attribute to the class IR"""
@abstractmethod
def finalize(self, ir: ClassIR) -> None:
"""Perform any final operations to complete the class IR"""
| ClassBuilder |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 61428,
"end": 62840
} | class ____(DefinedFunction):
r"""
Returns the Legendre symbol `(a / p)`.
For an integer ``a`` and an odd prime ``p``, the Legendre symbol is
defined as
.. math ::
\genfrac(){}{}{a}{p} = \begin{cases}
0 & \text{if } p \text{ divides } a\\
1 & \text{if } a \text{ is a quadratic residue modulo } p\\
-1 & \text{if } a \text{ is a quadratic nonresidue modulo } p
\end{cases}
Examples
========
>>> from sympy.functions.combinatorial.numbers import legendre_symbol
>>> [legendre_symbol(i, 7) for i in range(7)]
[0, 1, 1, -1, 1, -1, -1]
>>> sorted(set([i**2 % 7 for i in range(7)]))
[0, 1, 2, 4]
See Also
========
sympy.ntheory.residue_ntheory.is_quad_residue, jacobi_symbol
"""
is_integer = True
is_prime = False
@classmethod
def eval(cls, a, p):
if a.is_integer is False:
raise TypeError("a should be an integer")
if p.is_integer is False:
raise TypeError("p should be an integer")
if p.is_prime is False or p.is_odd is False:
raise ValueError("p should be an odd prime integer")
if (a % p).is_zero is True:
return S.Zero
if a is S.One:
return S.One
if a.is_Integer is True and p.is_Integer is True:
return S(legendre(as_int(a), as_int(p)))
| legendre_symbol |
python | PyCQA__pylint | tests/functional/s/stop_iteration_inside_generator.py | {
"start": 3116,
"end": 4505
} | class ____:
def next(self):
return iter([1, 2, 3])
def some_gen(self):
for value in self.next():
yield value
SomeClassWithNext().some_gen()
def something_invalid():
raise Exception("cannot iterate this")
def invalid_object_passed_to_next():
yield next(something_invalid()) # [stop-iteration-return]
# pylint: disable=redefined-builtin,too-many-function-args
def safeiter(it):
"""Regression test for issue #7610 when ``next`` builtin is redefined"""
def next():
while True:
try:
return next(it)
except StopIteration:
raise
it = iter(it)
while True:
yield next()
def other_safeiter(it):
"""Regression test for issue #7610 when ``next`` builtin is redefined"""
def next(*things):
print(*things)
while True:
try:
return next(it)
except StopIteration:
raise
it = iter(it)
while True:
yield next(1, 2)
def data(filename):
"""
Ensure pylint doesn't crash if `next` is incorrectly called without args
See https://github.com/pylint-dev/pylint/issues/7828
"""
with open(filename, encoding="utf8") as file:
next() # attempt to skip header but this is incorrect code
for line in file:
yield line
| SomeClassWithNext |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/MatplotlibWidget.py | {
"start": 281,
"end": 2210
} | class ____(QtWidgets.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
parent_default = None
figsize_default = (5.0, 4.0)
dpi_default = 100
@typing.overload
def __init__(self, figsize=(5.0, 4.0), dpi=100, parent=None):
pass
@typing.overload
def __init__(self, parent=None, figsize=(5.0, 4.0), dpi=100):
pass
def __init__(self, *args, **kwargs):
if (args and not isinstance(args[0], QtWidgets.QWidget)):
figsize = args[0] if len(args) > 0 \
else kwargs.get("figsize", MatplotlibWidget.figsize_default)
dpi = args[1] if len(args) > 1 \
else kwargs.get("dpi", MatplotlibWidget.dpi_default)
parent = args[2] if len(args) > 2 \
else kwargs.get("parent", MatplotlibWidget.parent_default)
else:
parent = args[0] if len(args) > 0 \
else kwargs.get("parent", MatplotlibWidget.parent_default)
figsize = args[1] if len(args) > 1 \
else kwargs.get("figsize", MatplotlibWidget.figsize_default)
dpi = args[2] if len(args) > 2 \
else kwargs.get("dpi", MatplotlibWidget.dpi_default)
super().__init__(parent)
self.fig = Figure(figsize, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtWidgets.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| MatplotlibWidget |
python | walkccc__LeetCode | solutions/1915. Number of Wonderful Substrings/1915.py | {
"start": 0,
"end": 484
} | class ____:
def wonderfulSubstrings(self, word: str) -> int:
ans = 0
prefix = 0 # the binary prefix
count = [0] * 1024 # the binary prefix count
count[0] = 1 # the empty string ""
for c in word:
prefix ^= 1 << ord(c) - ord('a')
# All the letters occur even number of times.
ans += count[prefix]
# `c` occurs odd number of times.
ans += sum(count[prefix ^ 1 << i] for i in range(10))
count[prefix] += 1
return ans
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 240464,
"end": 242913
} | class ____(TestCase):
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
out = np.empty(5, dtype="i2")
res = np.argmax(mat, 0, out=out)
assert_equal(res, range(5))
def test_argmin_with_out(self):
mat = -np.eye(5)
out = np.empty(5, dtype="i2")
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
@xpassIfTorchDynamo_np # (reason="XXX: place()")
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a > 2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
# hit one of the failing paths
assert_raises(ValueError, np.place, a, a > 20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
assert not a.flags["C_CONTIGUOUS"] # sanity check
np.put(a, [0, 2], [44, 55])
assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]]))
@xpassIfTorchDynamo_np # (reason="XXX: putmask()")
def test_putmask_noncontiguous(self):
a = np.arange(6).reshape(2, 3).T # force non-c-contiguous
# uses arr_putmask
np.putmask(a, a > 2, a**2)
assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]]))
def test_take_mode_raise(self):
a = np.arange(6, dtype="int")
out = np.empty(2, dtype="int")
np.take(a, [0, 2], out=out, mode="raise")
assert_equal(out, np.array([0, 2]))
def test_choose_mod_raise(self):
a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]])
out = np.empty((3, 3), dtype="int")
choices = [-10, 10]
np.choose(a, choices, out=out, mode="raise")
assert_equal(out, np.array([[10, -10, 10], [-10, 10, -10], [10, -10, 10]]))
@xpassIfTorchDynamo_np # (reason="XXX: ndarray.flat")
def test_flatiter__array__(self):
a = np.arange(9).reshape(3, 3)
b = a.T.flat
c = b.__array__()
# triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics
del c
def test_dot_out(self):
# if HAVE_CBLAS, will use WRITEBACKIFCOPY
a = np.arange(9, dtype=float).reshape(3, 3)
b = np.dot(a, a, out=a)
assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]]))
@instantiate_parametrized_tests
| TestWritebackIfCopy |
python | pallets__werkzeug | examples/cupoftee/application.py | {
"start": 2182,
"end": 3643
} | class ____:
def __init__(self, database, interval=120):
self.jinja_env = Environment(loader=PackageLoader("cupoftee"), autoescape=True)
self.interval = interval
self.db = Database(database)
self.server_browser = ServerBrowser(self)
self.updater = Thread(None, self.update_server_browser)
self.updater.daemon = True
self.updater.start()
def update_server_browser(self):
while 1:
if self.server_browser.sync():
wait = self.interval
else:
wait = self.interval // 2
time.sleep(wait)
def dispatch_request(self, request):
url_adapter = url_map.bind_to_environ(request.environ)
try:
endpoint, values = url_adapter.match()
page = pages[endpoint](self, request, url_adapter)
response = page.process(**values)
except NotFound:
page = MissingPage(self, request, url_adapter)
response = page.process()
except HTTPException as e:
return e
return response or page.get_response()
def __call__(self, environ, start_response):
request = Request(environ)
return self.dispatch_request(request)(environ, start_response)
def render_template(self, name, **context):
template = self.jinja_env.get_template(name)
return template.render(context)
from cupoftee.pages import MissingPage
| Cup |
python | coleifer__peewee | tests/models.py | {
"start": 155536,
"end": 158278
} | class ____(ModelTestCase):
requires = [CFile, CNote]
def setUp(self):
super(TestCompoundSelectModels, self).setUp()
def generate_ts():
i = [0]
def _inner():
i[0] += 1
return datetime.datetime(2018, 1, i[0])
return _inner
make_ts = generate_ts()
self.ts = lambda i: datetime.datetime(2018, 1, i)
with self.database.atomic():
for i, content in enumerate(('note-a', 'note-b', 'note-c'), 1):
CNote.create(id=i, content=content, timestamp=make_ts())
file_data = (
('peewee.txt', 'peewee orm'),
('walrus.txt', 'walrus redis toolkit'),
('huey.txt', 'huey task queue'))
for filename, data in file_data:
CFile.create(filename=filename, data=data, timestamp=make_ts())
def test_mix_models_with_model_row_type(self):
cast = 'CHAR' if IS_MYSQL else 'TEXT'
lhs = CNote.select(CNote.id.cast(cast).alias('id_text'),
CNote.content, CNote.timestamp)
rhs = CFile.select(CFile.filename, CFile.data, CFile.timestamp)
query = (lhs | rhs).order_by(SQL('timestamp')).limit(4)
data = [(n.id_text, n.content, n.timestamp) for n in query]
self.assertEqual(data, [
('1', 'note-a', self.ts(1)),
('2', 'note-b', self.ts(2)),
('3', 'note-c', self.ts(3)),
('peewee.txt', 'peewee orm', self.ts(4))])
def test_mixed_models_tuple_row_type(self):
cast = 'CHAR' if IS_MYSQL else 'TEXT'
lhs = CNote.select(CNote.id.cast(cast).alias('id'),
CNote.content, CNote.timestamp)
rhs = CFile.select(CFile.filename, CFile.data, CFile.timestamp)
query = (lhs | rhs).order_by(SQL('timestamp')).limit(5)
self.assertEqual(list(query.tuples()), [
('1', 'note-a', self.ts(1)),
('2', 'note-b', self.ts(2)),
('3', 'note-c', self.ts(3)),
('peewee.txt', 'peewee orm', self.ts(4)),
('walrus.txt', 'walrus redis toolkit', self.ts(5))])
def test_mixed_models_dict_row_type(self):
notes = CNote.select(CNote.content, CNote.timestamp)
files = CFile.select(CFile.filename, CFile.timestamp)
query = (notes | files).order_by(SQL('timestamp').desc()).limit(4)
self.assertEqual(list(query.dicts()), [
{'content': 'huey.txt', 'timestamp': self.ts(6)},
{'content': 'walrus.txt', 'timestamp': self.ts(5)},
{'content': 'peewee.txt', 'timestamp': self.ts(4)},
{'content': 'note-c', 'timestamp': self.ts(3)}])
| TestCompoundSelectModels |
python | Textualize__textual | tests/toggles/test_radioset.py | {
"start": 5677,
"end": 6980
} | class ____(App[None]):
def compose(self) -> ComposeResult:
self.selected = []
with RadioSet():
yield RadioButton("0", disabled=True)
yield RadioButton("1")
yield RadioButton("2", disabled=True)
yield RadioButton("3", disabled=True)
yield RadioButton("4")
yield RadioButton("5")
yield RadioButton("6", disabled=True)
yield RadioButton("7")
yield RadioButton("8", disabled=True)
def on_radio_set_changed(self, radio_set: RadioSet.Changed) -> None:
self.selected.append(str(radio_set.pressed.label))
async def test_keyboard_navigation_with_disabled_buttons():
"""Regression test for https://github.com/Textualize/textual/issues/3839."""
app = RadioSetDisabledButtonsApp()
async with app.run_test() as pilot:
await pilot.press("enter")
for _ in range(5):
await pilot.press("down")
await pilot.press("enter")
for _ in range(5):
await pilot.press("up")
await pilot.press("enter")
assert app.selected == [
"1",
"4",
"5",
"7",
"1",
"4",
"1",
"7",
"5",
"4",
"1",
]
| RadioSetDisabledButtonsApp |
python | ZoranPandovski__al-go-rithms | data_structures/B+tree/btree.py | {
"start": 26,
"end": 708
} | class ____():
def __init__(self, lf):
self.keys = []
self.children = []
self.next = None
self.leaf = lf
def kaatde(self):
mid = len(self.keys) // 2
mval = self.keys[mid]
nw_node = Node(self.leaf)
nw_node.keys = self.keys[mid:] if self.leaf else self.keys[mid+1:]
nw_node.children = self.children[mid:] if self.leaf else self.children[mid+1:]
self.keys = self.keys[:mid]
self.children = self.children[:mid] if self.leaf else self.children[:mid+1]
if self.leaf:
nw_node.next = self.next
self.next = nw_node
return mval, nw_node
| Node |
python | pytorch__pytorch | test/torch_np/test_reductions.py | {
"start": 16730,
"end": 18139
} | class ____(TestCase):
"""Run a set of generic tests to verify that cumsum/cumprod are sane."""
@parametrize("func", [np.cumsum, np.cumprod])
def test_bad_axis(self, func):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
assert_raises(TypeError, func, m, axis="foo")
assert_raises(np.AxisError, func, m, axis=3)
assert_raises(TypeError, func, m, axis=np.array([[1], [2]]))
assert_raises(TypeError, func, m, axis=1.5)
# TODO: add tests with np.int32(3) etc, when implemented
@parametrize("func", [np.cumsum, np.cumprod])
def test_array_axis(self, func):
a = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
assert_equal(func(a, axis=np.array(-1)), func(a, axis=-1))
with assert_raises(TypeError):
func(a, axis=np.array([1, 2]))
@parametrize("func", [np.cumsum, np.cumprod])
def test_axis_empty_generic(self, func):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_array_equal(func(a, axis=None), func(a.ravel(), axis=0))
@parametrize("func", [np.cumsum, np.cumprod])
def test_axis_bad_tuple(self, func):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
with assert_raises(TypeError):
func(m, axis=(1, 1))
if __name__ == "__main__":
run_tests()
| TestGenericCumSumProd |
python | django__django | tests/migrations/test_optimizer.py | {
"start": 281,
"end": 52246
} | class ____(OptimizerTestBase):
"""
Tests the migration optimizer.
"""
def test_none_app_label(self):
optimizer = MigrationOptimizer()
with self.assertRaisesMessage(TypeError, "app_label must be a str"):
optimizer.optimize([], None)
def test_single(self):
"""
The optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel(
"Bar",
[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
)
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelOptions(
name="Foo", options={"verbose_name_plural": "Foozes"}
),
],
[
migrations.CreateModel(
"Foo", fields=[], options={"verbose_name_plural": "Foozes"}
),
],
)
def test_create_alter_model_managers(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelManagers(
name="Foo",
managers=[
("objects", models.Manager()),
("things", models.Manager()),
],
),
],
[
migrations.CreateModel(
"Foo",
fields=[],
managers=[
("objects", models.Manager()),
("things", models.Manager()),
],
),
],
)
def test_create_alter_model_table(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelTable(
name="foo",
table="foo",
),
],
[
migrations.CreateModel(
"Foo",
fields=[],
options={
"db_table": "foo",
},
),
],
)
def test_create_alter_model_table_comment(self):
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", fields=[]),
migrations.AlterModelTableComment(
name="foo",
table_comment="A lovely table.",
),
],
[
migrations.CreateModel(
"Foo",
fields=[],
options={
"db_table_comment": "A lovely table.",
},
),
],
)
def test_create_model_and_remove_model_options(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
"MyModel",
fields=[],
options={"verbose_name": "My Model"},
),
migrations.AlterModelOptions("MyModel", options={}),
],
[migrations.CreateModel("MyModel", fields=[])],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"MyModel",
fields=[],
options={
"verbose_name": "My Model",
"verbose_name_plural": "My Model plural",
},
),
migrations.AlterModelOptions(
"MyModel",
options={"verbose_name": "My Model"},
),
],
[
migrations.CreateModel(
"MyModel",
fields=[],
options={"verbose_name": "My Model"},
),
],
)
def _test_create_alter_foo_delete_model(self, alter_foo):
"""
CreateModel, AlterModelTable, AlterUniqueTogether/AlterIndexTogether/
AlterOrderWithRespectTo, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.AlterModelTable("Foo", "woohoo"),
alter_foo,
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_alter_unique_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterUniqueTogether("Foo", [["a", "b"]])
)
def test_create_alter_index_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterIndexTogether("Foo", [["a", "b"]])
)
def test_create_alter_owrt_delete_model(self):
self._test_create_alter_foo_delete_model(
migrations.AlterOrderWithRespectTo("Foo", "a")
)
def _test_alter_alter(self, alter_foo, alter_bar):
"""
Two AlterUniqueTogether/AlterIndexTogether/AlterOrderWithRespectTo
/AlterField should collapse into the second.
"""
self.assertOptimizesTo(
[
alter_foo,
alter_bar,
],
[
alter_bar,
],
)
def test_alter_alter_table_model(self):
self._test_alter_alter(
migrations.AlterModelTable("Foo", "a"),
migrations.AlterModelTable("Foo", "b"),
)
def test_alter_alter_unique_model(self):
self._test_alter_alter(
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.AlterUniqueTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_index_model(self):
self._test_alter_alter(
migrations.AlterIndexTogether("Foo", [["a", "b"]]),
migrations.AlterIndexTogether("Foo", [["a", "c"]]),
)
def test_alter_alter_owrt_model(self):
self._test_alter_alter(
migrations.AlterOrderWithRespectTo("Foo", "a"),
migrations.AlterOrderWithRespectTo("Foo", "b"),
)
def test_alter_alter_field(self):
self._test_alter_alter(
migrations.AlterField("Foo", "name", models.IntegerField()),
migrations.AlterField("Foo", "name", models.IntegerField(help_text="help")),
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or
delete of a different model, but only if the create operation does not
mention the model at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# Operations should be optimized if the FK references a model from the
# other app.
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
],
app_label="otherapp",
)
# But it shouldn't work if a FK references a model with the same
# app_label.
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
)
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("other", models.ForeignKey("testapp.Foo", models.CASCADE))]
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# This should not work - bases should block it
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("Foo",)
),
migrations.DeleteModel("Foo"),
],
)
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
# The same operations should be optimized if app_label and none of
# bases belong to that app.
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
],
app_label="otherapp",
)
# But it shouldn't work if some of bases belongs to the specified app.
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar", [("size", models.IntegerField())], bases=("testapp.Foo",)
),
migrations.DeleteModel("Foo"),
],
app_label="testapp",
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Book", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Person", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"book",
"author",
models.ForeignKey("test_app.Person", models.CASCADE),
),
migrations.CreateModel(
"Review",
[("book", models.ForeignKey("test_app.Book", models.CASCADE))],
),
migrations.CreateModel(
"Reviewer", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"review",
"reviewer",
models.ForeignKey("test_app.Reviewer", models.CASCADE),
),
migrations.RemoveField("book", "author"),
migrations.DeleteModel("Person"),
],
[
migrations.CreateModel(
"Book", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Reviewer", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Review",
[
("book", models.ForeignKey("test_app.Book", models.CASCADE)),
(
"reviewer",
models.ForeignKey("test_app.Reviewer", models.CASCADE),
),
],
),
],
app_label="test_app",
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_reordering(self):
"""
AddField optimizes into CreateModel if it's a FK to a model that's
between them (and there's no FK in the other direction), by changing
the order of the CreateModel operations.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField(
"Foo", "link", models.ForeignKey("migrations.Link", models.CASCADE)
),
],
[
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.CreateModel(
"Foo",
[
("name", models.CharField(max_length=255)),
("link", models.ForeignKey("migrations.Link", models.CASCADE)),
],
),
],
)
def test_create_model_reordering_circular_fk(self):
"""
CreateModel reordering behavior doesn't result in an infinite loop if
there are FKs in both directions.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Bar", [("url", models.TextField())]),
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.AddField(
"Bar", "foo_fk", models.ForeignKey("migrations.Foo", models.CASCADE)
),
migrations.AddField(
"Foo", "bar_fk", models.ForeignKey("migrations.Bar", models.CASCADE)
),
],
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel(
"Bar",
[
("url", models.TextField()),
("foo_fk", models.ForeignKey("migrations.Foo", models.CASCADE)),
],
),
migrations.AddField(
"Foo", "bar_fk", models.ForeignKey("migrations.Bar", models.CASCADE)
),
],
)
def test_create_model_no_reordering_for_unrelated_fk(self):
"""
CreateModel order remains unchanged if the later AddField operation
isn't a FK between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Link", [("url", models.TextField())]),
migrations.AddField(
"Other",
"link",
models.ForeignKey("migrations.Link", models.CASCADE),
),
],
)
def test_create_model_no_reordering_of_inherited_model(self):
"""
A CreateModel that inherits from another isn't reordered to avoid
moving it earlier than its parent CreateModel operation.
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Other", [("foo", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ParentModel", [("bar", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ChildModel",
[("baz", models.CharField(max_length=255))],
bases=("migrations.parentmodel",),
),
migrations.AddField(
"Other",
"fk",
models.ForeignKey("migrations.ChildModel", models.CASCADE),
),
],
[
migrations.CreateModel(
"ParentModel", [("bar", models.CharField(max_length=255))]
),
migrations.CreateModel(
"ChildModel",
[("baz", models.CharField(max_length=255))],
bases=("migrations.parentmodel",),
),
migrations.CreateModel(
"Other",
[
("foo", models.CharField(max_length=255)),
(
"fk",
models.ForeignKey("migrations.ChildModel", models.CASCADE),
),
],
),
],
)
def test_create_model_add_field_not_through_m2m_through(self):
"""
AddField should NOT optimize into CreateModel if it's an M2M using a
through that's created between them.
"""
self.assertDoesNotOptimize(
[
migrations.CreateModel("Employee", []),
migrations.CreateModel("Employer", []),
migrations.CreateModel(
"Employment",
[
(
"employee",
models.ForeignKey("migrations.Employee", models.CASCADE),
),
(
"employment",
models.ForeignKey("migrations.Employer", models.CASCADE),
),
],
),
migrations.AddField(
"Employer",
"employees",
models.ManyToManyField(
"migrations.Employee",
through="migrations.Employment",
),
),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[("name", models.CharField(max_length=255))],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("title", models.CharField(max_length=255)),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_swapping_fields_names(self):
self.assertDoesNotOptimize(
[
migrations.CreateModel(
"MyModel",
[
("field_a", models.IntegerField()),
("field_b", models.IntegerField()),
],
),
migrations.RunPython(migrations.RunPython.noop),
migrations.RenameField("MyModel", "field_a", "field_c"),
migrations.RenameField("MyModel", "field_b", "field_a"),
migrations.RenameField("MyModel", "field_c", "field_b"),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
managers = [("objects", EmptyManager())]
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel(
name="Foo",
fields=[
("name", models.CharField(max_length=255)),
],
options={"verbose_name": "Foo"},
bases=(UnicodeModel,),
managers=managers,
),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField(
"Foo", name="age", field=models.FloatField(default=2.4)
),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def _test_create_alter_foo_field(self, alter):
"""
CreateModel, AlterFooTogether/AlterOrderWithRespectTo followed by an
add/alter/rename field should optimize to CreateModel with options.
"""
option_value = getattr(alter, alter.option_name)
options = {alter.option_name: option_value}
# AddField
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.AddField("Foo", "c", models.IntegerField()),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
options=options,
),
],
)
# AlterField
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.AlterField("Foo", "b", models.CharField(max_length=255)),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.CharField(max_length=255)),
],
options=options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.AlterField("Foo", "c", models.CharField(max_length=255)),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.CharField(max_length=255)),
],
options=options,
),
],
)
# RenameField
if isinstance(option_value, str):
renamed_options = {alter.option_name: "c"}
else:
renamed_options = {
alter.option_name: {
tuple("c" if value == "b" else value for value in item)
for item in option_value
}
}
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "b", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("c", models.IntegerField()),
],
options=renamed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "b", "x"),
migrations.RenameField("Foo", "x", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("c", models.IntegerField()),
],
options=renamed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.RenameField("Foo", "c", "d"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("d", models.IntegerField()),
],
options=options,
),
],
)
# RemoveField
if isinstance(option_value, str):
removed_options = None
else:
removed_options = {
alter.option_name: {
tuple(value for value in item if value != "b")
for item in option_value
}
}
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
),
alter,
migrations.RemoveField("Foo", "b"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
],
options=removed_options,
),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
("c", models.IntegerField()),
],
),
alter,
migrations.RemoveField("Foo", "c"),
],
[
migrations.CreateModel(
"Foo",
[
("a", models.IntegerField()),
("b", models.IntegerField()),
],
options=options,
),
],
)
def test_create_alter_unique_field(self):
self._test_create_alter_foo_field(
migrations.AlterUniqueTogether("Foo", [["a", "b"]])
)
def test_create_alter_index_field(self):
self._test_create_alter_foo_field(
migrations.AlterIndexTogether("Foo", [["a", "b"]])
)
def test_create_alter_owrt_field(self):
self._test_create_alter_foo_field(
migrations.AlterOrderWithRespectTo("Foo", "b")
)
def test_optimize_through_fields(self):
"""
field-level through checking is working. This should manage to collapse
model Foo to nonexistence, and model Bar to a single IntegerField
called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
def test_optimize_elidable_operation(self):
elidable_operation = operations.base.Operation()
elidable_operation.elidable = True
self.assertOptimizesTo(
[
elidable_operation,
migrations.CreateModel(
"Foo", [("name", models.CharField(max_length=255))]
),
elidable_operation,
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
elidable_operation,
migrations.RenameModel("Foo", "Phou"),
migrations.DeleteModel("Bar"),
elidable_operation,
],
[
migrations.CreateModel(
"Phou", [("name", models.CharField(max_length=255))]
),
],
)
def test_rename_index(self):
self.assertOptimizesTo(
[
migrations.RenameIndex(
"Pony", new_name="mid_name", old_fields=("weight", "pink")
),
migrations.RenameIndex(
"Pony", new_name="new_name", old_name="mid_name"
),
],
[
migrations.RenameIndex(
"Pony", new_name="new_name", old_fields=("weight", "pink")
),
],
)
self.assertOptimizesTo(
[
migrations.RenameIndex(
"Pony", new_name="mid_name", old_name="old_name"
),
migrations.RenameIndex(
"Pony", new_name="new_name", old_name="mid_name"
),
],
[migrations.RenameIndex("Pony", new_name="new_name", old_name="old_name")],
)
self.assertDoesNotOptimize(
[
migrations.RenameIndex(
"Pony", new_name="mid_name", old_name="old_name"
),
migrations.RenameIndex(
"Pony", new_name="new_name", old_fields=("weight", "pink")
),
]
)
def test_add_rename_index(self):
tests = [
models.Index(fields=["weight", "pink"], name="mid_name"),
models.Index(Abs("weight"), name="mid_name"),
models.Index(
Abs("weight"), name="mid_name", condition=models.Q(weight__gt=0)
),
]
for index in tests:
with self.subTest(index=index):
renamed_index = index.clone()
renamed_index.name = "new_name"
self.assertOptimizesTo(
[
migrations.AddIndex("Pony", index),
migrations.RenameIndex(
"Pony", new_name="new_name", old_name="mid_name"
),
],
[
migrations.AddIndex("Pony", renamed_index),
],
)
self.assertDoesNotOptimize(
[
migrations.AddIndex("Pony", index),
migrations.RenameIndex(
"Pony", new_name="new_name", old_name="other_name"
),
],
)
def test_add_remove_index(self):
self.assertOptimizesTo(
[
migrations.AddIndex(
"Pony",
models.Index(
fields=["weight", "pink"], name="idx_pony_weight_pink"
),
),
migrations.RemoveIndex("Pony", "idx_pony_weight_pink"),
],
[],
)
def test_add_remove_constraint(self):
gt_constraint = models.CheckConstraint(
condition=models.Q(pink__gt=2), name="constraint_pony_pink_gt_2"
)
self.assertOptimizesTo(
[
migrations.AddConstraint("Pony", gt_constraint),
migrations.RemoveConstraint("Pony", gt_constraint.name),
],
[],
)
self.assertDoesNotOptimize(
[
migrations.AddConstraint("Pony", gt_constraint),
migrations.RemoveConstraint("Pony", "other_name"),
],
)
def test_multiple_alter_constraints(self):
gt_constraint_violation_msg_added = models.CheckConstraint(
condition=models.Q(pink__gt=2),
name="pink_gt_2",
violation_error_message="ERROR",
)
gt_constraint_violation_msg_altered = models.CheckConstraint(
condition=models.Q(pink__gt=2),
name="pink_gt_2",
violation_error_message="error",
)
self.assertOptimizesTo(
[
migrations.AlterConstraint(
"Pony", "pink_gt_2", gt_constraint_violation_msg_added
),
migrations.AlterConstraint(
"Pony", "pink_gt_2", gt_constraint_violation_msg_altered
),
],
[
migrations.AlterConstraint(
"Pony", "pink_gt_2", gt_constraint_violation_msg_altered
)
],
)
other_constraint_violation_msg = models.CheckConstraint(
condition=models.Q(weight__gt=3),
name="pink_gt_3",
violation_error_message="error",
)
self.assertDoesNotOptimize(
[
migrations.AlterConstraint(
"Pony", "pink_gt_2", gt_constraint_violation_msg_added
),
migrations.AlterConstraint(
"Pony", "pink_gt_3", other_constraint_violation_msg
),
]
)
def test_alter_remove_constraint(self):
self.assertOptimizesTo(
[
migrations.AlterConstraint(
"Pony",
"pink_gt_2",
models.CheckConstraint(
condition=models.Q(pink__gt=2), name="pink_gt_2"
),
),
migrations.RemoveConstraint("Pony", "pink_gt_2"),
],
[migrations.RemoveConstraint("Pony", "pink_gt_2")],
)
def test_add_alter_constraint(self):
constraint = models.CheckConstraint(
condition=models.Q(pink__gt=2), name="pink_gt_2"
)
constraint_with_error = models.CheckConstraint(
condition=models.Q(pink__gt=2),
name="pink_gt_2",
violation_error_message="error",
)
self.assertOptimizesTo(
[
migrations.AddConstraint("Pony", constraint),
migrations.AlterConstraint("Pony", "pink_gt_2", constraint_with_error),
],
[migrations.AddConstraint("Pony", constraint_with_error)],
)
def test_create_model_add_index(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [models.Index(fields=["age"], name="idx_pony_age")],
},
),
migrations.AddIndex(
"Pony",
models.Index(fields=["weight"], name="idx_pony_weight"),
),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [
models.Index(fields=["age"], name="idx_pony_age"),
models.Index(fields=["weight"], name="idx_pony_weight"),
],
},
),
],
)
def test_create_model_remove_index(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [
models.Index(fields=["age"], name="idx_pony_age"),
models.Index(fields=["weight"], name="idx_pony_weight"),
],
},
),
migrations.RemoveIndex("Pony", "idx_pony_age"),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [
models.Index(fields=["weight"], name="idx_pony_weight"),
],
},
),
],
)
def test_create_model_rename_index_no_old_fields(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [models.Index(fields=["age"], name="idx_pony_age")],
},
),
migrations.RenameIndex(
"Pony", new_name="idx_pony_age_new", old_name="idx_pony_age"
),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
("age", models.IntegerField()),
],
options={
"indexes": [models.Index(fields=["age"], name="idx_pony_age")],
},
),
migrations.RenameIndex(
"Pony", new_name="idx_pony_age_new", old_name="idx_pony_age"
),
],
)
def test_create_model_add_constraint(self):
gt_constraint = models.CheckConstraint(
condition=models.Q(weight__gt=0), name="pony_weight_gt_0"
)
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
),
migrations.AddConstraint("Pony", gt_constraint),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
options={"constraints": [gt_constraint]},
),
],
)
def test_create_model_alter_constraint(self):
original_constraint = models.CheckConstraint(
condition=models.Q(weight__gt=0), name="pony_weight_gt_0"
)
altered_constraint = models.CheckConstraint(
condition=models.Q(weight__gt=0),
name="pony_weight_gt_0",
violation_error_message="incorrect weight",
)
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
options={
"constraints": [
original_constraint,
models.UniqueConstraint(
"weight", name="pony_weight_unique"
),
],
},
),
migrations.AlterConstraint(
"Pony", "pony_weight_gt_0", altered_constraint
),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
options={
"constraints": [
models.UniqueConstraint(
"weight",
name="pony_weight_unique",
),
altered_constraint,
]
},
),
],
)
def test_create_model_remove_constraint(self):
self.assertOptimizesTo(
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
options={
"constraints": [
models.CheckConstraint(
condition=models.Q(weight__gt=0),
name="pony_weight_gt_0",
),
models.UniqueConstraint(
"weight", name="pony_weight_unique"
),
],
},
),
migrations.RemoveConstraint("Pony", "pony_weight_gt_0"),
],
[
migrations.CreateModel(
name="Pony",
fields=[
("weight", models.IntegerField()),
],
options={
"constraints": [
models.UniqueConstraint(
"weight", name="pony_weight_unique"
),
]
},
),
],
)
| OptimizerTests |
python | apache__airflow | providers/apache/kafka/src/airflow/providers/apache/kafka/hooks/produce.py | {
"start": 929,
"end": 1550
} | class ____(KafkaBaseHook):
"""
A hook for creating a Kafka Producer.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
def __init__(self, kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
def _get_client(self, config) -> Producer:
return Producer(config)
def get_producer(self) -> Producer:
"""Return a producer object for sending messages to Kafka."""
producer = self.get_conn
self.log.info("Producer %s", producer)
return producer
| KafkaProducerHook |
python | ansible__ansible | .azure-pipelines/scripts/publish-codecov.py | {
"start": 657,
"end": 4702
} | class ____:
dry_run: bool
path: pathlib.Path
def parse_args() -> Args:
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--dry-run', action='store_true')
parser.add_argument('path', type=pathlib.Path)
args = parser.parse_args()
# Store arguments in a typed dataclass
fields = dataclasses.fields(Args)
kwargs = {field.name: getattr(args, field.name) for field in fields}
return Args(**kwargs)
def run(
*args: str | pathlib.Path,
dry_run: bool = False,
) -> None:
"""
Log and run given command.
The command is not actually executed if ``dry_run`` is truthy.
"""
cmd = [str(arg) for arg in args]
dry_prefix = '[would run] ' if dry_run else ''
print(f'==> {dry_prefix}{shlex.join(cmd)}', flush=True)
if not dry_run:
subprocess.run(cmd, check=True)
def install_codecov(dest: pathlib.Path, dry_run: bool = False) -> pathlib.Path:
"""Populate a transitively pinned venv with ``codecov-cli``."""
requirement_file = DEPS_DIR / 'codecov.in'
constraint_file = requirement_file.with_suffix('.txt')
venv_dir = dest / 'venv'
python_bin = venv_dir / 'bin' / 'python'
codecov_bin = venv_dir / 'bin' / 'codecovcli'
venv.create(venv_dir, with_pip=True)
run(
python_bin,
'-m',
'pip',
'install',
f'--constraint={constraint_file!s}',
f'--requirement={requirement_file!s}',
'--disable-pip-version-check',
dry_run=dry_run,
)
return codecov_bin
def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
processed = []
for file in directory.joinpath('reports').glob('coverage*.xml'):
name = file.stem.replace('coverage=', '')
# Get flags from name
flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
processed.append(CoverageFile(name, file, flags))
return tuple(processed)
def upload_files(codecov_bin: pathlib.Path, config_file: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
for file in files:
cmd = [
codecov_bin,
'--disable-telem',
'--codecov-yml-path',
config_file,
'upload-process',
'--disable-search',
'--disable-file-fixes',
'--plugin',
'noop',
'--name',
file.name,
'--file',
file.path,
]
for flag in file.flags:
cmd.extend(['--flag', flag])
if dry_run:
cmd.append('--dry-run')
run(*cmd)
def report_upload_completion(
codecov_bin: pathlib.Path,
config_file: pathlib.Path,
dry_run: bool = False,
) -> None:
"""Notify Codecov backend that all reports we wanted are in."""
cmd = [
codecov_bin,
'--disable-telem',
f'--codecov-yml-path={config_file}',
'send-notifications',
]
run(*cmd, dry_run=dry_run)
def main() -> None:
args = parse_args()
with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
config_file = pathlib.Path(tmpdir) / 'config.yml'
# Refs:
# * https://docs.codecov.com/docs/codecovyml-reference#codecovnotifymanual_trigger
# * https://docs.codecov.com/docs/notifications#preventing-notifications-until-youre-ready-to-send-notifications
config_file.write_text('codecov:\n notify:\n manual_trigger: true')
codecov_bin = install_codecov(
pathlib.Path(tmpdir),
dry_run=args.dry_run,
)
files = process_files(args.path)
upload_files(codecov_bin, config_file, files, args.dry_run)
# Ref: https://docs.codecov.com/docs/cli-options#send-notifications
report_upload_completion(codecov_bin, config_file, args.dry_run)
if __name__ == '__main__':
main()
| Args |
python | facebook__pyre-check | client/commands/profile.py | {
"start": 1511,
"end": 2011
} | class ____(Event):
duration: int
def add_phase_duration_to_result(self, result: Dict[str, int]) -> None:
tags = self.metadata.tags
if PHASE_NAME in tags:
phase_name = tags[PHASE_NAME]
result[phase_name] = self.duration
if TRIGGERED_DEPENDENCIES in tags:
result[phase_name + ": triggered dependencies"] = int(
tags[TRIGGERED_DEPENDENCIES]
)
@dataclasses.dataclass(frozen=True)
| DurationEvent |
python | run-llama__llama_index | llama-index-core/llama_index/core/query_engine/graph_query_engine.py | {
"start": 568,
"end": 4827
} | class ____(BaseQueryEngine):
"""
Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any,
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = Settings.callback_manager
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""
Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
| ComposableGraphQueryEngine |
python | coleifer__peewee | peewee.py | {
"start": 160061,
"end": 160237
} | class ____(Field):
field_type = 'INT'
def adapt(self, value):
try:
return int(value)
except ValueError:
return value
| IntegerField |
python | tensorflow__tensorflow | tensorflow/python/distribute/tpu_replicated_variable_test.py | {
"start": 1269,
"end": 6090
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tpu_replicated_variable_simple(self):
v0 = variables_lib.Variable([0], name='v0')
v1 = variables_lib.Variable([0], name='v1')
r = tpu_replicated_variable.TPUReplicatedVariable([v0, v1])
self.evaluate(variables_lib.global_variables_initializer())
self.assertEqual(r.variables[0], v0)
self.assertEqual(r.variables[1], v1)
self.assertEqual(r.shape.as_list(), [1])
self.assertEqual(r.dtype, v0.dtype)
self.check_replicated_variables_all_the_same(r)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tpu_replicated_variable_update(self):
batch_size = 32
num_feature_in = 16
x = np.random.rand(batch_size, num_feature_in).astype(np.float32)
w_init = np.random.rand(batch_size, num_feature_in).astype(np.float32)
w0 = variables_lib.Variable(w_init, dtype=dtypes.float32, name='w0')
w1 = variables_lib.Variable(w_init, dtype=dtypes.float32, name='w1')
self.evaluate(variables_lib.global_variables_initializer())
w = tpu_replicated_variable.TPUReplicatedVariable([w0, w1])
# Make a copy of x so that `w` and `x` do not share the same buffer.
# See b/195972684.
self.evaluate(w.assign(x.copy()))
result = self.evaluate(w.read_value())
self.assertAllClose(result, x)
self.check_replicated_variables_all_the_same(w)
x1 = np.random.rand(batch_size, num_feature_in).astype(np.float32)
self.evaluate(w.assign_sub(x1))
result = self.evaluate(w.read_value())
self.assertAllClose(result, np.subtract(x, x1))
self.check_replicated_variables_all_the_same(w)
x2 = np.random.rand(batch_size, num_feature_in).astype(np.float32)
self.evaluate(w.assign(x.copy()))
self.evaluate(w.assign_add(x2))
result = self.evaluate(w.read_value())
self.assertAllClose(result, np.add(x, x2))
self.check_replicated_variables_all_the_same(w)
def check_replicated_variables_all_the_same(self, rv):
for v in rv.variables:
self.assertAllEqual(
self.evaluate(rv.variables[0].read_value()),
self.evaluate(v))
@combinations.generate(combinations.combine(
mode=['graph', 'eager'],
enable_async_ckpt=[True, False]
))
def test_tpu_replicated_variable_checkpoint(self, enable_async_ckpt):
batch_size = 4
num_feature_in = 2
# Initialize variables
x = np.random.rand(batch_size, num_feature_in).astype(np.float32)
w_init = np.random.rand(batch_size, num_feature_in).astype(np.float32)
w0 = variables_lib.Variable(w_init, dtype=dtypes.float32, name='w0')
w1 = variables_lib.Variable(w_init, dtype=dtypes.float32, name='w1')
self.evaluate(variables_lib.global_variables_initializer())
w = tpu_replicated_variable.TPUReplicatedVariable([w0, w1])
before_save = self.evaluate(w.read_value())
# Save w_init into checkpoint
ckpt = trackable_utils.Checkpoint(w=w)
ckpt_options = checkpoint_options.CheckpointOptions(
experimental_enable_async_checkpoint=enable_async_ckpt)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
with self.test_session():
save_path = ckpt.save(file_prefix=prefix, options=ckpt_options)
# Change values of w to x
self.evaluate(w.assign(x.copy()))
result = self.evaluate(w.read_value())
self.assertAllClose(result, x)
self.check_replicated_variables_all_the_same(w)
# Restore from the checkpoint
with self.test_session():
ckpt.restore(save_path).assert_consumed().run_restore_ops()
after_restore = self.evaluate(w.read_value())
self.check_replicated_variables_all_the_same(w)
self.assertAllClose(before_save, after_restore)
# Another round of saving/restoring to ensure that the logic of
# _copy_trackable_to_cpu works when a copy is already created in object_map.
y = np.random.rand(batch_size, num_feature_in).astype(np.float32)
z = np.random.rand(batch_size, num_feature_in).astype(np.float32)
self.evaluate(w.assign(y.copy())) # change from x to y
before_save = self.evaluate(w.read_value())
self.assertAllClose(before_save, y)
self.check_replicated_variables_all_the_same(w)
with self.test_session():
save_path = ckpt.save(file_prefix=prefix, options=ckpt_options)
self.evaluate(w.assign(z.copy())) # change from y to z
result = self.evaluate(w.read_value())
self.assertAllClose(result, z)
with self.test_session():
ckpt.restore(save_path).assert_consumed().run_restore_ops()
after_restore = self.evaluate(w.read_value())
self.check_replicated_variables_all_the_same(w)
self.assertAllClose(before_save, after_restore)
if __name__ == '__main__':
test.main()
| TPUReplicatedVariableTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol17.py | {
"start": 657,
"end": 817
} | class ____(Protocol[_T1, _T2, _T3]):
def m1(self, p0: _T1, p1: _T2, p2: _T3) -> _T1: ...
def m2(self) -> _T1: ...
def m3(self) -> _T2: ...
| Protocol2 |
python | openai__openai-python | src/openai/types/beta/realtime/session_update_event_param.py | {
"start": 916,
"end": 1080
} | class ____(TypedDict, total=False):
expires_after: SessionClientSecretExpiresAfter
"""Configuration for the ephemeral token expiration."""
| SessionClientSecret |
python | kamyu104__LeetCode-Solutions | Python/sum-of-subarray-minimums.py | {
"start": 75,
"end": 787
} | class ____(object):
def sumSubarrayMins(self, A):
"""
:type A: List[int]
:rtype: int
"""
M = 10**9 + 7
left, s1 = [0]*len(A), []
for i in xrange(len(A)):
count = 1
while s1 and s1[-1][0] > A[i]:
count += s1.pop()[1]
left[i] = count
s1.append([A[i], count])
right, s2 = [0]*len(A), []
for i in reversed(xrange(len(A))):
count = 1
while s2 and s2[-1][0] >= A[i]:
count += s2.pop()[1]
right[i] = count
s2.append([A[i], count])
return sum(a*l*r for a, l, r in itertools.izip(A, left, right)) % M
| Solution |
python | ansible__ansible | lib/ansible/_internal/_errors/_captured.py | {
"start": 4075,
"end": 4384
} | class ____(AnsibleResultCapturedError):
"""An exception representing error detail captured in a module context and returned from an action's result dictionary."""
_default_message = 'Module failed.'
context = 'target'
@dataclasses.dataclass(**_messages._dataclass_kwargs)
| AnsibleModuleCapturedError |
python | numba__numba | numba/tests/test_parfors.py | {
"start": 47048,
"end": 81359
} | class ____(TestParforsBase):
""" Tests cpython, reduction and various parfors features"""
def test_arraymap(self):
def test_impl(a, x, y):
return a * x + y
self.check_variants(test_impl, lambda: self.gen_linspace_variants(3))
def test_0d_broadcast(self):
def test_impl():
X = np.array(1)
Y = np.ones((10, 12))
return np.sum(X + Y)
self.check(test_impl)
self.assertEqual(countParfors(test_impl, ()), 1)
def test_2d_parfor(self):
def test_impl():
X = np.ones((10, 12))
Y = np.zeros((10, 12))
return np.sum(X + Y)
self.check(test_impl)
self.assertEqual(countParfors(test_impl, ()), 1)
def test_nd_parfor(self):
def case1():
X = np.ones((10, 12))
Y = np.zeros((10, 12))
yield (X, Y)
data_gen = lambda: chain(case1(), self.gen_linspace_variants(2))
def test_impl(X, Y):
return np.sum(X + Y)
self.check_variants(test_impl, data_gen)
self.count_parfors_variants(test_impl, data_gen)
def test_np_func_direct_import(self):
from numpy import ones # import here becomes FreeVar
def test_impl(n):
A = ones(n)
return A[0]
n = 111
self.check(test_impl, n)
def test_size_assertion(self):
def test_impl(m, n):
A = np.ones(m)
B = np.ones(n)
return np.sum(A + B)
self.check(test_impl, 10, 10)
with self.assertRaises(AssertionError) as raises:
cfunc = njit(parallel=True)(test_impl)
cfunc(10, 9)
msg = "Sizes of A, B do not match"
self.assertIn(msg, str(raises.exception))
def test_cfg(self):
# from issue #2477
def test_impl(x, is_positive, N):
for i in numba.prange(2):
for j in range( i*N//2, (i+1)*N//2 ):
is_positive[j] = 0
if x[j] > 0:
is_positive[j] = 1
return is_positive
N = 100
x = np.random.rand(N)
is_positive = np.zeros(N)
self.check(test_impl, x, is_positive, N)
def test_reduce(self):
def test_impl(A):
init_val = 10
return reduce(lambda a,b: min(a, b), A, init_val)
n = 211
A = np.random.ranf(n)
self.check(test_impl, A)
A = np.random.randint(10, size=n).astype(np.int32)
self.check(test_impl, A)
# test checking the number of arguments for the reduce function
def test_impl():
g = lambda x: x ** 2
return reduce(g, np.array([1, 2, 3, 4, 5]), 2)
with self.assertTypingError():
self.check(test_impl)
# test checking reduction over bitarray masked arrays
n = 160
A = np.random.randint(10, size=n).astype(np.int32)
def test_impl(A):
return np.sum(A[A>=3])
self.check(test_impl, A)
# TODO: this should fuse
# self.assertTrue(countParfors(test_impl, (numba.float64[:],)) == 1)
def test_impl(A):
B = A[:,0]
return np.sum(A[B>=3,1])
self.check(test_impl, A.reshape((16,10)))
# TODO: this should also fuse
#self.assertTrue(countParfors(test_impl, (numba.float64[:,:],)) == 1)
def test_impl(A):
B = A[:,0]
return np.sum(A[B>=3,1:2])
self.check(test_impl, A.reshape((16,10)))
# this doesn't fuse due to mixed indices
self.assertEqual(countParfors(test_impl, (numba.float64[:,:],)), 2)
def test_impl(A):
min_val = np.amin(A)
return A - min_val
self.check(test_impl, A)
# this doesn't fuse due to use of reduction variable
self.assertEqual(countParfors(test_impl, (numba.float64[:],)), 2)
def test_use_of_reduction_var1(self):
def test_impl():
acc = 0
for i in prange(1):
acc = cmath.sqrt(acc)
return acc
# checks that invalid use of reduction variable is detected
msg = ("Use of reduction variable acc in an unsupported reduction function.")
with self.assertRaises(ValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_unsupported_floordiv1(self):
def test_impl():
acc = 100
for i in prange(2):
acc //= 2
return acc
# checks that invalid use of ifloordiv reduction operator is detected
msg = ("Parallel floordiv reductions are not supported. "
"If all divisors are integers then a floordiv "
"reduction can in some cases be parallelized as "
"a multiply reduction followed by a floordiv of "
"the resulting product.")
with self.assertRaises(errors.NumbaValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_unsupported_xor1(self):
def test_impl():
acc = 100
for i in prange(2):
acc ^= i + 2
return acc
msg = ("Use of reduction variable acc in an unsupported reduction function.")
with self.assertRaises(ValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_parfor_array_access1(self):
# signed index of the prange generated by sum() should be replaced
# resulting in array A to be eliminated (see issue #2846)
def test_impl(n):
A = np.ones(n)
return A.sum()
n = 211
self.check(test_impl, n)
self.assertEqual(countArrays(test_impl, (types.intp,)), 0)
def test_parfor_array_access2(self):
# in this test, the prange index has the same name (i) in two loops
# thus, i has multiple definitions and is harder to replace
def test_impl(n):
A = np.ones(n)
m = 0
n = 0
for i in numba.prange(len(A)):
m += A[i]
for i in numba.prange(len(A)):
if m == n: # access in another block
n += A[i]
return m + n
n = 211
self.check(test_impl, n)
self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0)
def test_parfor_array_access3(self):
def test_impl(n):
A = np.ones(n, np.int64)
m = 0
for i in numba.prange(len(A)):
m += A[i]
if m==2:
i = m
n = 211
with self.assertRaises(errors.UnsupportedRewriteError) as raises:
self.check(test_impl, n)
self.assertIn("Overwrite of parallel loop index", str(raises.exception))
@needs_blas
def test_parfor_array_access4(self):
# in this test, one index of a multi-dim access should be replaced
# np.dot parallel implementation produces this case
def test_impl(A, b):
return np.dot(A, b)
n = 211
d = 4
A = np.random.ranf((n, d))
b = np.random.ranf(d)
self.check(test_impl, A, b)
# make sure the parfor index is replaced in build_tuple of access to A
test_ir, tp = get_optimized_numba_ir(
test_impl, (types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 1, 'C')))
# this code should have one basic block after optimization
self.assertTrue(len(test_ir.blocks) == 1 and 0 in test_ir.blocks)
block = test_ir.blocks[0]
parfor_found = False
parfor = None
for stmt in block.body:
if isinstance(stmt, numba.parfors.parfor.Parfor):
parfor_found = True
parfor = stmt
self.assertTrue(parfor_found)
build_tuple_found = False
# there should be only one build_tuple
for bl in parfor.loop_body.values():
for stmt in bl.body:
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'build_tuple'):
build_tuple_found = True
self.assertTrue(parfor.index_var in stmt.value.items)
self.assertTrue(build_tuple_found)
def test_parfor_dtype_type(self):
# test array type replacement creates proper type
def test_impl(a):
for i in numba.prange(len(a)):
a[i] = a.dtype.type(0)
return a[4]
a = np.ones(10)
self.check(test_impl, a)
def test_parfor_array_access5(self):
# one dim is slice in multi-dim access
def test_impl(n):
X = np.ones((n, 3))
y = 0
for i in numba.prange(n):
y += X[i,:].sum()
return y
n = 211
self.check(test_impl, n)
self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0)
@disabled_test # Test itself is problematic, see #3155
def test_parfor_hoist_setitem(self):
# Make sure that read of out is not hoisted.
def test_impl(out):
for i in prange(10):
out[0] = 2 * out[0]
return out[0]
out = np.ones(1)
self.check(test_impl, out)
@needs_blas
def test_parfor_generate_fuse(self):
# issue #2857
def test_impl(N, D):
w = np.ones(D)
X = np.ones((N, D))
Y = np.ones(N)
for i in range(3):
B = (-Y * np.dot(X, w))
return B
n = 211
d = 3
self.check(test_impl, n, d)
self.assertEqual(countArrayAllocs(test_impl, (types.intp, types.intp)), 4)
self.assertEqual(countParfors(test_impl, (types.intp, types.intp)), 4)
def test_ufunc_expr(self):
# issue #2885
def test_impl(A, B):
return np.bitwise_and(A, B)
A = np.ones(3, np.uint8)
B = np.ones(3, np.uint8)
B[1] = 0
self.check(test_impl, A, B)
def test_find_callname_intrinsic(self):
def test_impl(n):
A = unsafe_empty((n,))
for i in range(n):
A[i] = i + 2.0
return A
# the unsafe allocation should be found even though it is imported
# as a different name
self.assertEqual(countArrayAllocs(test_impl, (types.intp,)), 1)
def test_reduction_var_reuse(self):
# issue #3139
def test_impl(n):
acc = 0
for i in prange(n):
acc += 1
for i in prange(n):
acc += 2
return acc
self.check(test_impl, 16)
def test_non_identity_initial(self):
# issue #7344
def test_impl(A, cond):
s = 1
for i in prange(A.shape[0]):
if cond[i]:
s += 1
return s
self.check(test_impl, np.ones(10), np.ones(10).astype('bool'))
def test_if_not_else_reduction(self):
# issue #7344
def test_impl(A, cond):
s = 1
t = 10
for i in prange(A.shape[0]):
if cond[i]:
s += 1
t += 1
else:
s += 2
return s + t
self.check(test_impl, np.ones(10), np.ones(10).astype('bool'))
def test_two_d_array_reduction_reuse(self):
def test_impl(n):
shp = (13, 17)
size = shp[0] * shp[1]
result1 = np.zeros(shp, np.int_)
tmp = np.arange(size).reshape(shp)
for i in numba.prange(n):
result1 += tmp
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_one_d_array_reduction(self):
def test_impl(n):
result = np.zeros(1, np.int_)
for i in numba.prange(n):
result += np.array([i], np.int_)
return result
self.check(test_impl, 100)
def test_two_d_array_reduction(self):
def test_impl(n):
shp = (13, 17)
size = shp[0] * shp[1]
result1 = np.zeros(shp, np.int_)
tmp = np.arange(size).reshape(shp)
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_two_d_array_reduction_with_float_sizes(self):
# result1 is float32 and tmp is float64.
# Tests reduction with differing dtypes.
def test_impl(n):
shp = (2, 3)
result1 = np.zeros(shp, np.float32)
tmp = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).reshape(shp)
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_two_d_array_reduction_prod(self):
def test_impl(n):
shp = (13, 17)
result1 = 2 * np.ones(shp, np.int_)
tmp = 2 * np.ones_like(result1)
for i in numba.prange(n):
result1 *= tmp
return result1
self.check(test_impl, 100)
def test_three_d_array_reduction(self):
def test_impl(n):
shp = (3, 2, 7)
result1 = np.zeros(shp, np.int_)
for i in numba.prange(n):
result1 += np.ones(shp, np.int_)
return result1
self.check(test_impl, 100)
def test_preparfor_canonicalize_kws(self):
# test canonicalize_array_math typing for calls with kw args
def test_impl(A):
return A.argsort() + 1
n = 211
A = np.arange(n)
self.check(test_impl, A)
def test_preparfor_datetime64(self):
# test array.dtype transformation for datetime64
def test_impl(A):
return A.dtype
A = np.empty(1, np.dtype('datetime64[ns]'))
cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),))
self.assertEqual(cpfunc.entry_point(A), test_impl(A))
def test_no_hoisting_with_member_function_call(self):
def test_impl(X):
n = X.shape[0]
acc = 0
for i in prange(n):
R = {1, 2, 3}
R.add(i)
tmp = 0
for x in R:
tmp += x
acc += tmp
return acc
self.check(test_impl, np.random.ranf(128))
def test_array_compare_scalar(self):
""" issue3671: X != 0 becomes an arrayexpr with operator.ne.
That is turned into a parfor by devectorizing. Make sure
the return type of the devectorized operator.ne
on integer types works properly.
"""
def test_impl():
X = np.zeros(10, dtype=np.int_)
return X != 0
self.check(test_impl)
def test_array_analysis_optional_def(self):
def test_impl(x, half):
size = len(x)
parr = x[0:size]
if half:
parr = x[0:size//2]
return parr.sum()
x = np.ones(20)
self.check(test_impl, x, True, check_scheduling=False)
def test_prange_side_effects(self):
def test_impl(a, b):
data = np.empty(len(a), dtype=np.float64)
size = len(data)
for i in numba.prange(size):
data[i] = a[i]
for i in numba.prange(size):
data[i] = data[i] + b[i]
return data
x = np.arange(10 ** 2, dtype=float)
y = np.arange(10 ** 2, dtype=float)
self.check(test_impl, x, y)
self.assertEqual(countParfors(test_impl,
(types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'))), 1)
def test_tuple1(self):
def test_impl(a):
atup = (3, 4)
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0] + atup[1] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_tuple2(self):
def test_impl(a):
atup = a.shape
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_tuple3(self):
def test_impl(a):
atup = (np.arange(10), 4)
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0][5] + atup[1] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple1(self):
def test_impl(a):
antup = TestNamedTuple(part0=3, part1=4)
b = 7
for i in numba.prange(len(a)):
a[i] += antup.part0 + antup.part1 + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple2(self):
TestNamedTuple2 = namedtuple('TestNamedTuple2', ('part0', 'part1'))
def test_impl(a):
antup = TestNamedTuple2(part0=3, part1=4)
b = 7
for i in numba.prange(len(a)):
a[i] += antup.part0 + antup.part1 + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple3(self):
# issue5872: test that a.y[:] = 5 is not removed as
# deadcode.
TestNamedTuple3 = namedtuple(f'TestNamedTuple3',['y'])
def test_impl(a):
a.y[:] = 5
def comparer(a, b):
np.testing.assert_almost_equal(a.y, b.y)
x = TestNamedTuple3(y=np.zeros(10))
self.check(test_impl, x, check_arg_equality=[comparer])
def test_inplace_binop(self):
def test_impl(a, b):
b += a
return b
X = np.arange(10) + 10
Y = np.arange(10) + 100
self.check(test_impl, X, Y)
self.assertEqual(countParfors(test_impl,
(types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'))), 1)
def test_tuple_concat(self):
# issue5383
def test_impl(a):
n = len(a)
array_shape = n, n
indices = np.zeros(((1,) + array_shape + (1,)), dtype=np.uint64)
k_list = indices[0, :]
for i, g in enumerate(a):
k_list[i, i] = i
return k_list
x = np.array([1, 1])
self.check(test_impl, x)
def test_tuple_concat_with_reverse_slice(self):
# issue5383
def test_impl(a):
n = len(a)
array_shape = n, n
indices = np.zeros(((1,) + array_shape + (1,))[:-1],
dtype=np.uint64)
k_list = indices[0, :]
for i, g in enumerate(a):
k_list[i, i] = i
return k_list
x = np.array([1, 1])
self.check(test_impl, x)
def test_array_tuple_concat(self):
# issue6399
def test_impl(a):
S = (a,) + (a, a)
return S[0].sum()
x = np.ones((3,3))
self.check(test_impl, x)
def test_high_dimension1(self):
# issue6749
def test_impl(x):
return x * 5.0
x = np.ones((2, 2, 2, 2, 2, 15))
self.check(test_impl, x)
def test_tuple_arg(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.empty(sz), sz)
def test_tuple_arg_not_whole_array(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz), (10, 3))
def test_tuple_for_pndindex(self):
def test_impl(x):
sz = (10, 5)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz))
def test_tuple_arg_literal(self):
def test_impl(x, first):
sz = (first, 5)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz), 10)
def test_tuple_of_literal_nonliteral(self):
# This test has to be done manually as the self.check uses
# compile_isolated and one function cannot "see" the other
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
def call(x, fn):
return fn(x, (10, 3)) # Only want to iterate to the 3rd
get_input = lambda: np.zeros((10, 10))
expected = call(get_input(), test_impl)
def check(dec):
f1 = dec(test_impl)
f2 = njit(call) # no parallel semantics in the caller
got = f2(get_input(), f1)
self.assertPreciseEqual(expected, got)
for d in (njit, njit(parallel=True)):
check(d)
def test_tuple_arg_1d(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10,)
self.check(test_impl, np.zeros(sz), sz)
def test_tuple_arg_1d_literal(self):
def test_impl(x):
sz = (10,)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10,)
self.check(test_impl, np.zeros(sz))
def test_int_arg_pndindex(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
self.check(test_impl, np.zeros((10, 10)), 3)
def test_prange_unknown_call1(self):
@register_jitable
def issue7854_proc(u, i, even, size):
for j in range((even + i + 1) % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
# issue7854
# Forbid fusion in unanalyzable call inside prange.
def test_impl(u, size):
for i in numba.prange(1, size - 1):
issue7854_proc(u, i, 0, size)
for i in numba.prange(1, size - 1):
issue7854_proc(u, i, 1, size)
return u
size = 4
u = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, u, size)
def test_prange_index_calc1(self):
# Should forbid fusion due to cross-iteration dependency as
# detected by loop index calcuation (i+1) as array index.
def test_impl(u, size):
for i in numba.prange(1, size - 1):
for j in range((i + 1) % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
for i in numba.prange(1, size - 1):
for j in range(i % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
return u
size = 4
u = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, u, size)
def test_prange_reverse_order1(self):
# Testing if reversed loop index usage as array index
# prevents fusion.
def test_impl(a, b, size):
for i in numba.prange(size):
for j in range(size):
a[i, j] = b[i, j] + 1
for i in numba.prange(size):
for j in range(size):
b[j, i] = 3
return a[0, 0] + b[0, 0]
size = 10
a = np.zeros((size, size))
b = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, b, size)
def test_prange_parfor_index_then_not(self):
# Testing if accessing an array first with a parfor index then
# without will prevent fusion.
def test_impl(a, size):
b = 0
for i in numba.prange(size):
a[i] = i
for i in numba.prange(size):
b += a[5]
return b
size = 10
a = np.zeros(size)
cptypes = (numba.float64[:], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, size)
def test_prange_parfor_index_const_tuple_fusion(self):
# Testing if accessing a tuple with prange index
# and later with a constant will not prevent fusion.
def test_impl(a, tup, size):
acc = 0
for i in numba.prange(size):
a[i] = i + tup[i]
for i in numba.prange(size):
acc += a[i] + tup[1]
return acc
size = 10
a = np.zeros(size)
b = tuple(a)
cptypes = (numba.float64[:],
types.containers.UniTuple(types.float64, size),
types.intp)
self.assertEqual(countParfors(test_impl, cptypes), 1)
self.check(test_impl, a, b, size)
def test_prange_non_parfor_index_then_opposite(self):
# Testing if accessing an array first without a parfor index then
# with will prevent fusion.
def test_impl(a, b, size):
for i in numba.prange(size):
b[i] = a[5]
for i in numba.prange(size):
a[i] = i
# Need this to stop previous prange from being optimized away.
b[0] += a[0]
return b
size = 10
a = np.zeros(size)
b = np.zeros(size)
cptypes = (numba.float64[:], numba.float64[:], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, b, size)
def test_prange_optional(self):
def test_impl(arr, pred=None):
for i in prange(1):
if pred is not None:
arr[i] = 0.0
arr = np.ones(10)
self.check(test_impl, arr, None,
check_arg_equality=[np.testing.assert_almost_equal,
lambda x, y: x == y])
self.assertEqual(arr.sum(), 10.0)
def test_untraced_value_tuple(self):
# This is a test for issue #6478.
def test_impl():
a = (1.2, 1.3)
return a[0]
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_recursive_untraced_value_tuple(self):
# This is a test for issue #6478.
def test_impl():
a = ((1.2, 1.3),)
return a[0][0]
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_untraced_value_parfor(self):
# This is a test for issue #6478.
def test_impl(arr):
a = (1.2, 1.3)
n1 = len(arr)
arr2 = np.empty(n1, np.float64)
for i in prange(n1):
arr2[i] = arr[i] * a[0]
n2 = len(arr2)
arr3 = np.empty(n2, np.float64)
for j in prange(n2):
arr3[j] = arr2[j] - a[1]
total = 0.0
n3 = len(arr3)
for k in prange(n3):
total += arr3[k]
return total + a[0]
arg = (types.Array(types.int64, 1, 'C'), )
self.assertEqual(countParfors(test_impl, arg), 1)
arr = np.arange(10, dtype=np.int64)
self.check(test_impl, arr)
def test_setitem_2d_one_replaced(self):
# issue7843
def test_impl(x):
count = 0
for n in range(x.shape[0]):
# Useless "if" necessary to trigger bug.
if n:
n
x[count, :] = 1
count += 1
return x
self.check(test_impl, np.zeros((3, 1)))
def test_1array_control_flow(self):
# issue8146
def test_impl(arr, flag1, flag2):
inv = np.arange(arr.size)
if flag1:
return inv.astype(np.float64)
if flag2:
ret = inv[inv]
else:
ret = inv[inv - 1]
return ret / arr.size
arr = np.arange(100)
self.check(test_impl, arr, True, False)
self.check(test_impl, arr, True, True)
self.check(test_impl, arr, False, False)
def test_2array_1_control_flow(self):
# issue8146
def test_impl(arr, l, flag):
inv1 = np.arange(arr.size)
inv2 = np.arange(l, arr.size + l)
if flag:
ret = inv1[inv1]
else:
ret = inv1[inv1 - 1]
return ret / inv2
arr = np.arange(100)
self.check(test_impl, arr, 10, True)
self.check(test_impl, arr, 10, False)
def test_2array_2_control_flow(self):
# issue8146
def test_impl(arr, l, flag):
inv1 = np.arange(arr.size)
inv2 = np.arange(l, arr.size + l)
if flag:
ret1 = inv1[inv1]
ret2 = inv2[inv1]
else:
ret1 = inv1[inv1 - 1]
ret2 = inv2[inv1 - 1]
return ret1 / ret2
arr = np.arange(100)
self.check(test_impl, arr, 10, True)
self.check(test_impl, arr, 10, False)
def test_issue8515(self):
# issue8515: an array is filled in the first prange and
# then accessed with c[i - 1] in the next prange which
# should prevent fusion with the previous prange.
def test_impl(n):
r = np.zeros(n, dtype=np.intp)
c = np.zeros(n, dtype=np.intp)
for i in prange(n):
for j in range(i):
c[i] += 1
for i in prange(n):
if i == 0:
continue
r[i] = c[i] - c[i - 1]
return r[1:]
self.check(test_impl, 15)
self.assertEqual(countParfors(test_impl, (types.int64, )), 2)
def test_issue9029(self):
# issue9029: too many parfors executed in one function
# overflowed the stack.
def test_impl(i1, i2):
N = 30
S = 3
a = np.empty((N,N))
# The stack should overflow if there are 30*30*2 (# of parfors)
# iterations.
for y in range(N):
for x in range(N):
values = np.ones(S)
v = values[0]
p2 = np.empty(S)
for i in prange(i1, i2):
p2[i] = 1
j = p2[0]
a[y,x] = v + j
return a
# We pass in 0 and 3 so that the function can't analyze the loop
# bounds on the prange to generate a signed loop whereas the
# np.ones will be an unsigned loop.
self.check(test_impl, 0, 3)
def test_fusion_no_side_effects(self):
def test_impl(a, b):
X = np.ones(100)
b = math.ceil(b)
Y = np.ones(100)
c = int(max(a, b))
return X + Y + c
self.check(test_impl, 3.7, 4.3)
self.assertEqual(countParfors(test_impl, (types.float64, types.float64)), 1)
def test_issue9256_lower_sroa_conflict(self):
@njit(parallel=True)
def def_in_loop(x):
c = 0
set_num_threads(1)
for i in prange(x):
c = i
return c
self.assertEqual(def_in_loop(10), def_in_loop.py_func(10))
def test_issue9256_lower_sroa_conflict_variant1(self):
def def_in_loop(x):
c = x
set_num_threads(1)
for _i in prange(x):
if c: # forces 3 SSA versions
d = x + 4
return c, d > 0
expected = def_in_loop(4)
self.assertEqual(expected, njit(parallel=False)(def_in_loop)(4))
self.assertEqual(expected, njit(parallel=True)(def_in_loop)(4))
def test_issue9256_lower_sroa_conflict_variant2(self):
def def_in_loop(x):
c = x
set_num_threads(1)
for _i in prange(x):
if c:
for _j in range(x): # forces 4 SSA versions
d = x + 4
return c, d > 0
expected = def_in_loop(4)
self.assertEqual(expected, njit(parallel=False)(def_in_loop)(4))
self.assertEqual(expected, njit(parallel=True)(def_in_loop)(4))
@needs_lapack # use of np.linalg.solve
@skip_ppc64le_invalid_ctr_loop
def test_issue9490_non_det_ssa_problem(self):
# Test modified to include https://github.com/numba/numba/issues/9581
# which is an issue with hoisting
cmd = [
sys.executable,
"-m",
"numba.tests.parfor_iss9490_usecase",
]
envs = {
**os.environ,
# Reproducer consistently fail with the following hashseed.
"PYTHONHASHSEED": "1",
# See https://github.com/numba/numba/issues/9501
# for details of why num-thread pinning is needed.
"NUMBA_NUM_THREADS": "1",
}
try:
subp.check_output(cmd, env=envs,
stderr=subp.STDOUT,
encoding='utf-8')
except subp.CalledProcessError as e:
msg = f"subprocess failed with output:\n{e.output}"
self.fail(msg=msg)
@skip_parfors_unsupported
| TestParfors |
python | django__django | tests/update/models.py | {
"start": 123,
"end": 357
} | class ____(models.Model):
name = models.CharField(max_length=20)
value = models.CharField(max_length=20)
another_value = models.CharField(max_length=20, blank=True)
is_active = models.BooleanField(default=True)
| DataPoint |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 38500,
"end": 39000
} | class ____(DictField):
"""A field that maps a name to a specified field type. Similar to
a DictField, except the 'value' of each item must match the specified
field type.
"""
def __init__(self, field=None, *args, **kwargs):
# XXX ValidationError raised outside the "validate" method.
if not isinstance(field, BaseField):
self.error("Argument to MapField constructor must be a valid field")
super().__init__(field=field, *args, **kwargs)
| MapField |
python | scipy__scipy | scipy/linalg/tests/test_decomp_lu.py | {
"start": 11321,
"end": 12629
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(1682281250228846)
def test_lu(self):
a0 = self.rng.random((10, 10))
b = self.rng.random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a, b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a, b)
assert_allclose(x1, x2)
def test_check_finite(self):
a = self.rng.random((10, 10))
b = self.rng.random((10,))
x1 = solve(a, b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a, b, check_finite=False)
assert_allclose(x1, x2)
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt, dt_b):
lu_and_piv = (np.empty((0, 0), dtype=dt), np.array([]))
b = np.asarray([], dtype=dt_b)
x = lu_solve(lu_and_piv, b)
assert x.shape == (0,)
m = lu_solve((np.eye(2, dtype=dt), [0, 1]), np.ones(2, dtype=dt_b))
assert x.dtype == m.dtype
b = np.empty((0, 0), dtype=dt_b)
x = lu_solve(lu_and_piv, b)
assert x.shape == (0, 0)
assert x.dtype == m.dtype
| TestLUSolve |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py | {
"start": 1547,
"end": 4431
} | class ____(BaseModel):
"""Base connection information for Azure Database for PostgreSQL connections.
:param application_name: Name of the application connecting to the database.
:type application_name: str
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode
"""
application_name: str = "azure-postgresql"
host: str | None = None
dbname: str = "postgres"
port: Annotated[NonNegativeInt, Field(le=65535)] = 5432
sslmode: SSLMode = SSLMode.require
def run_coroutine_in_sync(coroutine: Coroutine[Any, Any, R]) -> R:
def run_in_new_loop() -> R:
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
return new_loop.run_until_complete(coroutine)
finally:
new_loop.close()
try:
loop = asyncio.get_running_loop()
except RuntimeError:
result = asyncio.run(coroutine)
else:
if threading.current_thread() is threading.main_thread():
if not loop.is_running():
result = loop.run_until_complete(coroutine)
else:
with ThreadPoolExecutor() as pool:
future = pool.submit(run_in_new_loop)
result = future.result()
else:
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
result = future.result()
return result
def get_username_password(
credentials: BasicAuth | AccessToken,
) -> tuple[str, str]:
"""Get username and password from credentials.
:param credentials: BasicAuth for username/password or AccessToken for JWT token.
:type credentials: BasicAuth | AccessToken
:raises ValueError: User name not found in JWT token header
:raises TypeError: Invalid credentials type
:return: Tuple of username and password strings (plaintext).
:rtype: tuple[str, str]
"""
if isinstance(credentials, BasicAuth):
return credentials.username, credentials.password
elif isinstance(credentials, AccessToken):
token = credentials.token
_header, body_, _signature = token.split(".")
body = json.loads(
base64.b64decode(body_ + "=" * (4 - len(body_) % 4)).decode("utf-8")
)
username: str | None = body.get("upn", body.get("unique_name"))
if username is None:
raise ValueError("User name not found in JWT token header")
return username, token
else:
raise TypeError(
f"Invalid credentials type: {type(credentials)}. "
"Expected BasicAuth or TokenCredential."
)
| BaseConnectionInfo |
python | numba__numba | numba/core/typing/collections.py | {
"start": 839,
"end": 1111
} | class ____(AbstractTemplate):
key = operator.truth
def generic(self, args, kws):
assert not kws
(val,) = args
if isinstance(val, (types.Sequence)):
return signature(types.boolean, val)
@infer_global(operator.getitem)
| SequenceBool |
python | pypa__pipenv | pipenv/patched/pip/_internal/operations/freeze.py | {
"start": 832,
"end": 8919
} | class ____(NamedTuple):
requirement: str
comments: List[str]
def freeze(
requirement: Optional[List[str]] = None,
local_only: bool = False,
user_only: bool = False,
paths: Optional[List[str]] = None,
isolated: bool = False,
exclude_editable: bool = False,
skip: Container[str] = (),
) -> Generator[str, None, None]:
installations: Dict[str, FrozenRequirement] = {}
dists = get_environment(paths).iter_installed_distributions(
local_only=local_only,
skip=(),
user_only=user_only,
)
for dist in dists:
req = FrozenRequirement.from_dist(dist)
if exclude_editable and req.editable:
continue
installations[req.canonical_name] = req
if requirement:
# the options that don't get turned into an InstallRequirement
# should only be emitted once, even if the same option is in multiple
# requirements files, so we need to keep track of what has been emitted
# so that we don't emit it again if it's seen again
emitted_options: Set[str] = set()
# keep track of which files a requirement is in so that we can
# give an accurate warning if a requirement appears multiple times.
req_files: Dict[str, List[str]] = collections.defaultdict(list)
for req_file_path in requirement:
with open(req_file_path) as req_file:
for line in req_file:
if (
not line.strip()
or line.strip().startswith("#")
or line.startswith(
(
"-r",
"--requirement",
"-f",
"--find-links",
"-i",
"--index-url",
"--pre",
"--trusted-host",
"--process-dependency-links",
"--extra-index-url",
"--use-feature",
)
)
):
line = line.rstrip()
if line not in emitted_options:
emitted_options.add(line)
yield line
continue
if line.startswith("-e") or line.startswith("--editable"):
if line.startswith("-e"):
line = line[2:].strip()
else:
line = line[len("--editable") :].strip().lstrip("=")
line_req = install_req_from_editable(
line,
isolated=isolated,
)
else:
line_req = install_req_from_line(
COMMENT_RE.sub("", line).strip(),
isolated=isolated,
)
if not line_req.name:
logger.info(
"Skipping line in requirement file [%s] because "
"it's not clear what it would install: %s",
req_file_path,
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
else:
line_req_canonical_name = canonicalize_name(line_req.name)
if line_req_canonical_name not in installations:
# either it's not installed, or it is installed
# but has been processed already
if not req_files[line_req.name]:
logger.warning(
"Requirement file [%s] contains %s, but "
"package %r is not installed",
req_file_path,
COMMENT_RE.sub("", line).strip(),
line_req.name,
)
else:
req_files[line_req.name].append(req_file_path)
else:
yield str(installations[line_req_canonical_name]).rstrip()
del installations[line_req_canonical_name]
req_files[line_req.name].append(req_file_path)
# Warn about requirements that were included multiple times (in a
# single requirements file or in different requirements files).
for name, files in req_files.items():
if len(files) > 1:
logger.warning(
"Requirement %s included multiple times [%s]",
name,
", ".join(sorted(set(files))),
)
yield ("## The following requirements were added by pip freeze:")
for installation in sorted(installations.values(), key=lambda x: x.name.lower()):
if installation.canonical_name not in skip:
yield str(installation).rstrip()
def _format_as_name_version(dist: BaseDistribution) -> str:
try:
dist_version = dist.version
except InvalidVersion:
# legacy version
return f"{dist.raw_name}==={dist.raw_version}"
else:
return f"{dist.raw_name}=={dist_version}"
def _get_editable_info(dist: BaseDistribution) -> _EditableInfo:
"""
Compute and return values (req, comments) for use in
FrozenRequirement.from_dist().
"""
editable_project_location = dist.editable_project_location
assert editable_project_location
location = os.path.normcase(os.path.abspath(editable_project_location))
from pipenv.patched.pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs
vcs_backend = vcs.get_backend_for_dir(location)
if vcs_backend is None:
display = _format_as_name_version(dist)
logger.debug(
'No VCS found for editable requirement "%s" in: %r',
display,
location,
)
return _EditableInfo(
requirement=location,
comments=[f"# Editable install with no version control ({display})"],
)
vcs_name = type(vcs_backend).__name__
try:
req = vcs_backend.get_src_requirement(location, dist.raw_name)
except RemoteNotFoundError:
display = _format_as_name_version(dist)
return _EditableInfo(
requirement=location,
comments=[f"# Editable {vcs_name} install with no remote ({display})"],
)
except RemoteNotValidError as ex:
display = _format_as_name_version(dist)
return _EditableInfo(
requirement=location,
comments=[
f"# Editable {vcs_name} install ({display}) with either a deleted "
f"local remote or invalid URI:",
f"# '{ex.url}'",
],
)
except BadCommand:
logger.warning(
"cannot determine version of editable source in %s "
"(%s command not found in path)",
location,
vcs_backend.name,
)
return _EditableInfo(requirement=location, comments=[])
except InstallationError as exc:
logger.warning("Error when trying to get requirement for VCS system %s", exc)
else:
return _EditableInfo(requirement=req, comments=[])
logger.warning("Could not determine repository location of %s", location)
return _EditableInfo(
requirement=location,
comments=["## !! Could not determine repository location"],
)
@dataclass(frozen=True)
| _EditableInfo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_cards.py | {
"start": 3477,
"end": 9128
} | class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_cards_response().with_record(_a_card()).with_record(_a_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_cards_response().with_pagination().with_record(_a_card().with_id("last_record_id_from_first_page")).build(),
)
http_mocker.get(
_cards_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(_A_START_DATE)
.with_created_lte(_NOW)
.with_limit(100)
.build(),
_cards_response().with_record(_a_card()).with_record(_a_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_no_state_when_read_then_return_ignore_lookback(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_cards_response().with_record(_a_card()).build(),
)
self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
# request matched http_mocker
@HttpMocker()
def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_cards_response().with_record(_a_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
assert output.records[0].record.data["updated"] == output.records[0].record.data["created"]
@HttpMocker()
def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
_cards_request()
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_cards_response().build(),
)
http_mocker.get(
_cards_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(),
_cards_response().build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
# request matched http_mocker
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing")
@HttpMocker()
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_any_query_params().build(),
[
a_response_with_status(429),
_cards_response().with_record(_a_card()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_any_query_params().build(),
[a_response_with_status(500), _cards_response().with_record(_a_card()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_cards_request().with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=1):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
| FullRefreshTest |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 11927,
"end": 12318
} | class ____(Transform):
input_dims = output_dims = 1
def __init__(self, base):
super().__init__()
self.base = base
def __str__(self):
return f"{type(self).__name__}(base={self.base})"
def transform_non_affine(self, values):
return np.power(self.base, values)
def inverted(self):
return LogTransform(self.base)
| InvertedLogTransform |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self1.py | {
"start": 770,
"end": 2251
} | class ____:
x: Self
def method1(self) -> Self:
return self
def method2(self, a: Self) -> None:
x: Self = a
y = Self
def method3(self: Self) -> Self:
# This should generate an error because Self doesn't accept a type arg.
y: Self[int]
return self
# This should generate an error because Self can't be used with
# methods that declare a non-Self type for "self".
def method4(self: T, a: Self) -> T:
# This should generate an error because Self can't be used with
# methods that declare a non-Self type for "self".
x: Self
return self
@classmethod
def method5(cls) -> type[Self]:
return cls
@classmethod
def method6(cls, a: Self) -> None: ...
@classmethod
def method7(cls: type[Self]) -> type[Self]:
return cls
# This should generate an error because Self can't be used with
# methods that declare a non-Self type for "self".
@classmethod
def method8(cls: type[T], a: Self) -> type[T]:
# This should generate an error because Self can't be used with
# methods that declare a non-Self type for "self".
x: Self
return cls
# This should generate an error because Self can't be used in
# a static method.
@staticmethod
def stat_method1(a: Self) -> None:
# This should generate an error because Self can't be used in
# a static method.
x: Self
| B |
python | readthedocs__readthedocs.org | readthedocs/profiles/views.py | {
"start": 2572,
"end": 2650
} | class ____(SettingsOverrideObject):
_default_class = LoginViewBase
| LoginView |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox18.py | {
"start": 315,
"end": 1579
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox18.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text")
worksheet.insert_textbox(
"E19", "This is some text", {"align": {"vertical": "middle"}}
)
worksheet.insert_textbox(
"E29", "This is some text", {"align": {"vertical": "bottom"}}
)
worksheet.insert_textbox(
"E39",
"This is some text",
{"align": {"vertical": "top", "horizontal": "center"}},
)
worksheet.insert_textbox(
"E49",
"This is some text",
{"align": {"vertical": "middle", "horizontal": "center"}},
)
worksheet.insert_textbox(
"E59",
"This is some text",
{"align": {"vertical": "bottom", "horizontal": "center"}},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 15460,
"end": 15565
} | class ____:
def __fspath__(self):
return os.path.join(BASE, '_snippets')
| _PathLikeExampleObject |
python | wandb__wandb | wandb/vendor/pygments/lexers/d.py | {
"start": 418,
"end": 6980
} | class ____(RegexLexer):
"""
For D source.
.. versionadded:: 1.2
"""
name = 'D'
filenames = ['*.d', '*.di']
aliases = ['d']
mimetypes = ['text/x-dsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# (r'\\\n', Text), # line continuations
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nested_comment'),
# Keywords
(words((
'abstract', 'alias', 'align', 'asm', 'assert', 'auto', 'body',
'break', 'case', 'cast', 'catch', 'class', 'const', 'continue',
'debug', 'default', 'delegate', 'delete', 'deprecated', 'do', 'else',
'enum', 'export', 'extern', 'finally', 'final', 'foreach_reverse',
'foreach', 'for', 'function', 'goto', 'if', 'immutable', 'import',
'interface', 'invariant', 'inout', 'in', 'is', 'lazy', 'mixin',
'module', 'new', 'nothrow', 'out', 'override', 'package', 'pragma',
'private', 'protected', 'public', 'pure', 'ref', 'return', 'scope',
'shared', 'static', 'struct', 'super', 'switch', 'synchronized',
'template', 'this', 'throw', 'try', 'typedef', 'typeid', 'typeof',
'union', 'unittest', 'version', 'volatile', 'while', 'with',
'__gshared', '__traits', '__vector', '__parameters'),
suffix=r'\b'),
Keyword),
(words((
'bool', 'byte', 'cdouble', 'cent', 'cfloat', 'char', 'creal',
'dchar', 'double', 'float', 'idouble', 'ifloat', 'int', 'ireal',
'long', 'real', 'short', 'ubyte', 'ucent', 'uint', 'ulong',
'ushort', 'void', 'wchar'), suffix=r'\b'),
Keyword.Type),
(r'(false|true|null)\b', Keyword.Constant),
(words((
'__FILE__', '__MODULE__', '__LINE__', '__FUNCTION__', '__PRETTY_FUNCTION__'
'', '__DATE__', '__EOF__', '__TIME__', '__TIMESTAMP__', '__VENDOR__',
'__VERSION__'), suffix=r'\b'),
Keyword.Pseudo),
(r'macro\b', Keyword.Reserved),
(r'(string|wstring|dstring|size_t|ptrdiff_t)\b', Name.Builtin),
# FloatLiteral
# -- HexFloat
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[fFL]?[i]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[fFL]?[i]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[fFL]?[i]?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number.Bin),
# -- Octal
(r'0[0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)([LUu]|Lu|LU|uL|UL)?', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\&\w+;|.)'""",
String.Char),
# StringLiteral
# -- WysiwygString
(r'r"[^"]*"[cwd]?', String),
# -- AlternateWysiwygString
(r'`[^`]*`[cwd]?', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"[cwd]?', String),
# -- EscapeSequence
(r"\\(['\"?\\abfnrtv]|x[0-9a-fA-F]{2}|[0-7]{1,3}"
r"|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}|&\w+;)",
String),
# -- HexString
(r'x"[0-9a-fA-F_\s]*"[cwd]?', String),
# -- DelimitedString
(r'q"\[', String, 'delimited_bracket'),
(r'q"\(', String, 'delimited_parenthesis'),
(r'q"<', String, 'delimited_angle'),
(r'q"\{', String, 'delimited_curly'),
(r'q"([a-zA-Z_]\w*)\n.*?\n\1"', String),
(r'q"(.).*?\1"', String),
# -- TokenString
(r'q\{', String, 'token_string'),
# Attributes
(r'@([a-zA-Z_]\w*)?', Name.Decorator),
# Tokens
(r'(~=|\^=|%=|\*=|==|!>=|!<=|!<>=|!<>|!<|!>|!=|>>>=|>>>|>>=|>>|>='
r'|<>=|<>|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.\.|\.\.|/=)'
r'|[/.&|\-+<>!()\[\]{}?,;:$=*%^~]', Punctuation),
# Identifier
(r'[a-zA-Z_]\w*', Name),
# Line
(r'#line\s.*\n', Comment.Special),
],
'nested_comment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
'token_string': [
(r'\{', Punctuation, 'token_string_nest'),
(r'\}', String, '#pop'),
include('root'),
],
'token_string_nest': [
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
include('root'),
],
'delimited_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, 'delimited_inside_bracket'),
(r'\]"', String, '#pop'),
],
'delimited_inside_bracket': [
(r'[^\[\]]+', String),
(r'\[', String, '#push'),
(r'\]', String, '#pop'),
],
'delimited_parenthesis': [
(r'[^()]+', String),
(r'\(', String, 'delimited_inside_parenthesis'),
(r'\)"', String, '#pop'),
],
'delimited_inside_parenthesis': [
(r'[^()]+', String),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'delimited_angle': [
(r'[^<>]+', String),
(r'<', String, 'delimited_inside_angle'),
(r'>"', String, '#pop'),
],
'delimited_inside_angle': [
(r'[^<>]+', String),
(r'<', String, '#push'),
(r'>', String, '#pop'),
],
'delimited_curly': [
(r'[^{}]+', String),
(r'\{', String, 'delimited_inside_curly'),
(r'\}"', String, '#pop'),
],
'delimited_inside_curly': [
(r'[^{}]+', String),
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
],
}
| DLexer |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 5219,
"end": 5384
} | class ____(ArgumentError):
"""Raised when a dynamically-loaded module (usually a database dialect)
of a particular name cannot be located."""
| NoSuchModuleError |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 8317,
"end": 8384
} | class ____(Event):
name: str = "ai_command_run"
| AiCommandRunEvent |
python | skorch-dev__skorch | skorch/probabilistic.py | {
"start": 19020,
"end": 25074
} | class ____(_GPRegressorPredictMixin, GPBase):
# pylint: disable=missing-docstring
__doc__ = get_exact_gp_regr_doc(NeuralNet.__doc__)
def __init__(
self,
module,
*args,
likelihood=gpytorch.likelihoods.GaussianLikelihood,
criterion=gpytorch.mlls.ExactMarginalLogLikelihood,
batch_size=-1,
**kwargs
):
super().__init__(
module,
*args,
criterion=criterion,
likelihood=likelihood,
batch_size=batch_size,
**kwargs
)
def initialize_module(self):
"""Initializes likelihood and module."""
# pylint: disable=attribute-defined-outside-init
# We need a custom implementation here because the module is initialized
# with likelihood as an argument, which would not be passed otherwise.
# We cannot use self.initialized_instance, since we need to know if
# likelihood was actually (re-)initialized or not.
likelihood = self.likelihood
ll_kwargs = self.get_params_for('likelihood')
module = self.module
module_kwargs = self.get_params_for('module')
initialized_ll = isinstance(likelihood, torch.nn.Module)
initialized_module = isinstance(module, torch.nn.Module)
initialized_both = initialized_ll and initialized_module
if initialized_ll and not ll_kwargs:
# likelihood already initialized and no params changed
self.likelihood_ = likelihood
else:
# likelihood needs to be initialized because it's not yet or because
# its arguments changed
if initialized_ll:
likelihood = type(likelihood)
self.likelihood_ = likelihood(**ll_kwargs)
# ExactGP requires likelihood to be passed
if 'likelihood' not in module_kwargs:
module_kwargs['likelihood'] = self.likelihood_
if initialized_both and not module_kwargs:
# module and likelihood were already initialized no no params changed
self.module_ = module
else:
# module needs to be initialized because it's not yet or because
# the likelihood and/or its arguments changed
if initialized_module:
module = type(module)
self.module_ = module(**module_kwargs)
if not isinstance(self.module_, gpytorch.models.ExactGP):
raise TypeError("{} requires 'module' to be a gpytorch.models.ExactGP."
.format(self.__class__.__name__))
return self
def fit(self, X, y=None, **fit_params):
"""Initialize and fit the module.
If the module was already initialized, by calling fit, the
module will be re-initialized (unless ``warm_start`` is True).
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
y : target data, compatible with skorch.dataset.Dataset
The same data types as for ``X`` are supported. If your X is
a Dataset that contains the target, ``y`` may be set to
None.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call.
"""
if not self.warm_start or not self.initialized_:
self.initialize()
# set training data of the ExactGP module
self.module_.set_train_data(
inputs=to_tensor(X, device=self.device),
targets=to_tensor(y, device=self.device),
strict=False,
)
self.partial_fit(X, y, **fit_params)
return self
gp_regr_doc_start = """Gaussian Process regressor
Use this for variational and approximate Gaussian process regression. This
implies that the module should by a :class:`~gpytorch.models.ApproximateGP`
module.
"""
gp_regr_module_text = """
Module : gpytorch.models.ApproximateGP (class or instance)
The GPyTorch module; in contrast to exact GP, the return distribution does
not need to be Gaussian.
"""
gp_regr_criterion_text = """
likelihood : gpytorch.likelihoods.GaussianLikelihood (class or instance)
The likelihood used for the exact GP regressor. Usually doesn't need to be
changed.
criterion : gpytorch.mlls.VariationalELBO
The objective function to learn the approximate posterior of of the GP
regressor.
"""
def get_gp_regr_doc(doc):
"""Customizes the net docs to avoid duplication."""
# dedent/indent roundtrip required for consistent indention in both
# Python <3.13 and Python >=3.13
# Because <3.13 => no automatic dedent, but it is the case in >=3.13
indentation = " "
doc = textwrap.indent(textwrap.dedent(doc.split("\n", 5)[-1]), indentation)
params_start_idx = doc.find(' Parameters\n ----------')
doc = doc[params_start_idx:]
doc = gp_regr_doc_start + doc
pattern = re.compile(r'(\n\s+)(module .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + gp_regr_module_text + doc[end:]
pattern = re.compile(r'(\n\s+)(criterion .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + gp_regr_criterion_text + doc[end:]
pattern = re.compile(r'(\n\s+)(train_split .*\n)(\s.+|.){1,99}')
start, end = pattern.search(doc).span()
doc = doc[:start] + gp_regr_train_split_text + doc[end:]
doc = doc + gp_likelihood_attribute_text
return doc
| ExactGPRegressor |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 27094,
"end": 29789
} | class ____(fixtures.MappedTest):
run_inserts = None
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id"), nullable=False),
Column("email_address", String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
Address, addresses, users, User = (
cls.classes.Address,
cls.tables.addresses,
cls.tables.users,
cls.classes.User,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"address": relationship(
Address,
backref=backref(
"user",
single_parent=True,
cascade="all, delete-orphan",
),
uselist=False,
)
},
)
def test_replace_attribute_no_flush(self):
# test [ticket:2921]
User, Address = self.classes.User, self.classes.Address
a1 = Address(email_address="some address")
u1 = User(name="u1", address=a1)
sess = fixture_session()
sess.add(u1)
sess.commit()
# in this case, u1.address has active history set, because
# this operation necessarily replaces the old object which must be
# loaded.
# the set operation requires that "u1" is unexpired, because the
# replace operation wants to load the
# previous value. The original test case for #2921 only included
# that the lazyload operation passed a no autoflush flag through
# to the operation, however in #5226 this has been enhanced to pass
# the no autoflush flag down through to the unexpire of the attributes
# as well, so that attribute unexpire can otherwise invoke autoflush.
assert "id" not in u1.__dict__
a2 = Address(email_address="asdf")
sess.add(a2)
u1.address = a2
| O2OSingleParentNoFlushTest |
python | doocs__leetcode | solution/1800-1899/1866.Number of Ways to Rearrange Sticks With K Sticks Visible/Solution2.py | {
"start": 0,
"end": 290
} | class ____:
def rearrangeSticks(self, n: int, k: int) -> int:
mod = 10**9 + 7
f = [1] + [0] * k
for i in range(1, n + 1):
for j in range(k, 0, -1):
f[j] = (f[j] * (i - 1) + f[j - 1]) % mod
f[0] = 0
return f[k]
| Solution |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_api_key_index.py | {
"start": 120,
"end": 1027
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-api-key-index"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def test_org_admin_can_access(self) -> None:
self.get_success_response(self.organization.slug)
def test_member_no_access(self) -> None:
user = self.create_user("bar@example.com")
self.create_member(organization=self.organization, user=user, role="member")
self.login_as(user)
self.get_error_response(self.organization.slug, status_code=403)
def test_superuser_can_access(self) -> None:
admin_user = self.create_user("admin@example.com", is_superuser=True)
self.create_member(organization=self.organization, user=admin_user, role="admin")
self.login_as(admin_user, superuser=True)
self.get_success_response(self.organization.slug)
| OrganizationApiKeyIndex |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 137545,
"end": 140351
} | class ____:
def test_fancy_assign_ndarray(self):
np.random.seed(1234)
D = self.asdense(np.random.rand(5, 7))
S = self.spcreator(D)
X = np.random.rand(2, 3)
I = np.array([[1, 2, 3], [3, 4, 2]])
J = np.array([[5, 6, 3], [2, 3, 1]])
with check_remains_sorted(S):
S[I,J] = X
D[I,J] = X
assert_equal(S.toarray(), D)
I_bad = I + 5
J_bad = J + 7
C = [1, 2, 3]
with check_remains_sorted(S):
S[I,J] = C
D[I,J] = C
assert_equal(S.toarray(), D)
with check_remains_sorted(S):
S[I,J] = 3
D[I,J] = 3
assert_equal(S.toarray(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_indexing_multidim_set(self):
n, m = (5, 10)
def _test_set_slice(i, j):
A = self.spcreator((n, m))
with check_remains_sorted(A), warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
A[i, j] = 1
B = self.asdense(np.zeros((n, m)))
B[i, j] = 1
assert_array_almost_equal(A.toarray(), B)
# [[[1, 2], [1, 2]], [1, 2]]
for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]),
(np.array([0, 4]), [[0, 3], [1, 2]]),
([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]:
_test_set_slice(i, j)
def test_fancy_assign_list(self):
np.random.seed(1234)
D = self.asdense(np.random.rand(5, 7))
S = self.spcreator(D)
X = np.random.rand(2, 3)
I = [[1, 2, 3], [3, 4, 2]]
J = [[5, 6, 3], [2, 3, 1]]
S[I,J] = X
D[I,J] = X
assert_equal(S.toarray(), D)
I_bad = [[ii + 5 for ii in i] for i in I]
J_bad = [[jj + 7 for jj in j] for j in J]
C = [1, 2, 3]
S[I,J] = C
D[I,J] = C
assert_equal(S.toarray(), D)
S[I,J] = 3
D[I,J] = 3
assert_equal(S.toarray(), D)
assert_raises(IndexError, S.__setitem__, (I_bad,J), C)
assert_raises(IndexError, S.__setitem__, (I,J_bad), C)
def test_fancy_assign_slice(self):
np.random.seed(1234)
D = self.asdense(np.random.rand(5, 7))
S = self.spcreator(D)
I = [1, 2, 3, 3, 4, 2]
J = [5, 6, 3, 2, 3, 1]
I_bad = [ii + 5 for ii in I]
J_bad = [jj + 7 for jj in J]
C1 = [1, 2, 3, 4, 5, 6, 7]
C2 = np.arange(5)[:, None]
assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C1)
assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C2)
| _TestFancyMultidimAssign |
python | doocs__leetcode | solution/0900-0999/0993.Cousins in Binary Tree/Solution.py | {
"start": 192,
"end": 851
} | class ____:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
q = deque([(root, None)])
depth = 0
p1 = p2 = None
d1 = d2 = None
while q:
for _ in range(len(q)):
node, parent = q.popleft()
if node.val == x:
p1, d1 = parent, depth
elif node.val == y:
p2, d2 = parent, depth
if node.left:
q.append((node.left, node))
if node.right:
q.append((node.right, node))
depth += 1
return p1 != p2 and d1 == d2
| Solution |
python | gevent__gevent | src/gevent/tests/test__server_pywsgi.py | {
"start": 2718,
"end": 2826
} | class ____(test__server.TestRawSpawn): # pylint:disable=too-many-ancestors
Settings = Settings
| TestRawSpawn |
python | huggingface__transformers | src/transformers/models/xmod/modeling_xmod.py | {
"start": 1937,
"end": 7914
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(
input_ids, self.padding_idx, past_key_values_length
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
# NOTE: We assume either pos ids to have bsz == 1 (broadcastable) or bsz == effective bsz (input_shape[0])
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Xmod
| XmodEmbeddings |
python | python-pillow__Pillow | src/PIL/Image.py | {
"start": 3560,
"end": 3713
} | class ____(IntEnum):
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters (also defined in Imaging.h)
| Transform |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_eks.py | {
"start": 1359,
"end": 2015
} | class ____:
def setup_method(self):
self.async_conn_patcher = patch("airflow.providers.amazon.aws.hooks.eks.EksHook.get_async_conn")
self.mock_async_conn = self.async_conn_patcher.start()
self.mock_client = AsyncMock()
self.mock_async_conn.return_value.__aenter__.return_value = self.mock_client
self.async_wait_patcher = patch(
"airflow.providers.amazon.aws.triggers.eks.async_wait", return_value=True
)
self.mock_async_wait = self.async_wait_patcher.start()
def teardown_method(self):
self.async_conn_patcher.stop()
self.async_wait_patcher.stop()
| TestEksTrigger |
python | wireservice__csvkit | csvkit/convert/fixed.py | {
"start": 4092,
"end": 5739
} | class ____:
"""
Extracts column, start, and length columns from schema rows. Once
instantiated, each time the instance is called with a row, a
``(column,start,length)`` tuple will be returned based on values in that
row and the constructor kwargs.
"""
REQUIRED_COLUMNS = [('column', None), ('start', int), ('length', int)]
start = None
length = None
column = None
one_based = None
def __init__(self, header):
"""
Constructs a schema row decoder.
"""
for p, val_type in self.REQUIRED_COLUMNS:
try:
if val_type:
setattr(self, p, val_type(header.index(p)))
else:
setattr(self, p, header.index(p))
except ValueError:
raise ValueError(f'A column named "{p}" must exist in the schema file.')
def __call__(self, row):
"""
Return a tuple (column, start, length) based on this instance's
parameters. If the first time this is called, the row's 'start'
value is 1, then all 'start' values including the first will be one
less than in the actual input data, to adjust for one-based
specifications. Values for 'start' and 'length' will be cast to
integers.
"""
if self.one_based is None:
self.one_based = int(row[self.start]) == 1
if self.one_based:
adjusted_start = int(row[self.start]) - 1
else:
adjusted_start = int(row[self.start])
return FixedWidthField(row[self.column], adjusted_start, int(row[self.length]))
| SchemaDecoder |
python | OmkarPathak__pygorithm | tests/test_binary.py | {
"start": 3553,
"end": 5558
} | class ____(unittest.TestCase):
def test_ascii_to_base16(self):
array = ['54', '68', '65', '20', '51', '75', '69', '63', '6B', '20', '42', '72', '6F', '77', '6E', '20', '46',
'6F', '78', '20', '4A', '75', '6D', '70', '73', '20', '4F', '76', '65', '72', '20', '74', '68', '65',
'20', '4C', '61', '7A', '79', '20', '44', '6F', '67']
array_2 = ['77', '48', '40', '74', '20', '5F', '54', '2D', '68', '33', '20', '2F', '2F', '2D', '46', '3D', '7E',
'21', '63', '6B']
self.assertEqual(ascii.to_base16("The Quick Brown Fox Jumps Over the Lazy Dog"), array)
self.assertEqual(ascii.to_base16("wH@t _T-h3 //-F=~!ck"), array_2)
def test_ascii_to_base2(self):
array = ['01010100', '01101000', '01100101', '00100000', '01010001', '01110101', '01101001',
'01100011',
'01101011', '00100000', '01000010', '01110010', '01101111', '01110111', '01101110',
'00100000',
'01000110', '01101111', '01111000', '00100000', '01001010', '01110101', '01101101',
'01110000',
'01110011', '00100000', '01001111', '01110110', '01100101', '01110010', '00100000',
'01110100',
'01101000', '01100101', '00100000', '01001100', '01100001', '01111010', '01111001',
'00100000',
'01000100', '01101111', '01100111']
array_2 = ['01110111', '01001000', '01000000', '01110100', '00100000', '01011111', '01010100', '00101101',
'01101000',
'00110011', '00100000', '00101111', '00101111', '00101101', '01000110', '00111101', '01111110',
'00100001',
'01100011', '01101011']
self.assertEqual(ascii.to_base2("wH@t _T-h3 //-F=~!ck"), array_2)
self.assertEqual(ascii.to_base2("The Quick Brown Fox Jumps Over the Lazy Dog"), array)
if __name__ == '__main__':
unittest.main()
| TestASCII |
python | walkccc__LeetCode | solutions/1839. Longest Substring Of All Vowels in Order/1839.py | {
"start": 0,
"end": 377
} | class ____:
def longestBeautifulSubstring(self, word: str) -> int:
ans = 0
count = 1
l = 0
for r in range(1, len(word)):
curr = word[r]
prev = word[r - 1]
if curr >= prev:
if curr > prev:
count += 1
if count == 5:
ans = max(ans, r - l + 1)
else:
count = 1
l = r
return ans
| Solution |
python | ApeWorX__ape | src/ape/utils/_github.py | {
"start": 1876,
"end": 9873
} | class ____:
# Generic git/github client attributes.
TOKEN_KEY = "GITHUB_ACCESS_TOKEN"
API_URL_PREFIX = "https://api.github.com"
git: GitProcessWrapper = GitProcessWrapper()
# ApeWorX-specific attributes.
ORGANIZATION_NAME = "ApeWorX"
FRAMEWORK_NAME = "ape"
_repo_cache: dict[str, dict] = {}
def __init__(self, session: Optional[Session] = None):
if session:
# NOTE: Mostly allowed for testing purposes.
self.__session = session
else:
headers = {"Content-Type": "application/json", "User-Agent": USER_AGENT}
if auth := os.environ[self.TOKEN_KEY] if self.TOKEN_KEY in os.environ else None:
headers["Authorization"] = f"token {auth}"
session = Session()
session.headers = {**session.headers, **headers}
adapter = HTTPAdapter(
max_retries=Retry(total=10, backoff_factor=1.0, status_forcelist=[403]),
)
session.mount("https://", adapter)
self.__session = session
@cached_property
def org(self) -> dict:
"""
Our organization on ``Github``.
"""
return self.get_organization(self.ORGANIZATION_NAME)
@cached_property
def available_plugins(self) -> set[str]:
return {
repo["name"].replace("-", "_")
for repo in self.get_org_repos()
if not repo.get("private", False) and repo["name"].startswith(f"{self.FRAMEWORK_NAME}-")
}
def get_org_repos(self) -> Iterator[dict]:
params = {"per_page": 100, "page": 1}
while True:
response = self._get(f"orgs/{self.ORGANIZATION_NAME}/repos", params=params)
repository_count = len(response)
if repository_count == 0:
break
yield from response
params["page"] += 1
def get_release(self, org_name: str, repo_name: str, version: str) -> dict:
if version == "latest":
return self.get_latest_release(org_name, repo_name)
def _try_get_release(vers):
try:
return self._get_release(org_name, repo_name, vers)
except Exception:
return None
if release := _try_get_release(version):
return release
else:
original_version = str(version)
# Try an alternative tag style
if version.startswith("v"):
version = version.lstrip("v")
else:
version = f"v{version}"
if release := _try_get_release(version):
return release
raise UnknownVersionError(original_version, repo_name)
def _get_release(self, org_name: str, repo_name: str, version: str) -> dict:
return self._get(f"repos/{org_name}/{repo_name}/releases/tags/{version}")
def get_repo(self, org_name: str, repo_name: str) -> dict:
repo_path = f"{org_name}/{repo_name}"
if repo_path not in self._repo_cache:
try:
self._repo_cache[repo_path] = self._get_repo(org_name, repo_name)
return self._repo_cache[repo_path]
except Exception as err:
raise ProjectError(f"Unknown repository '{repo_path}'") from err
return self._repo_cache[repo_path]
def _get_repo(self, org_name: str, repo_name: str) -> dict:
try:
return self._get(f"repos/{org_name}/{repo_name}")
except HTTPError as err:
if err.response.status_code == 404:
raise ProjectError(f"Unknown repository '{org_name}/{repo_name}'")
raise # The original HTTPError
def get_latest_release(self, org_name: str, repo_name: str) -> dict:
return self._get(f"repos/{org_name}/{repo_name}/releases/latest")
def get_organization(self, org_name: str) -> dict:
return self._get(f"orgs/{org_name}")
def clone_repo(
self,
org_name: str,
repo_name: str,
target_path: Union[str, Path],
branch: Optional[str] = None,
scheme: str = "https",
):
repo = self.get_repo(org_name, repo_name)
branch = branch or repo["default_branch"]
logger.info(f"Cloning branch '{branch}' from '{repo['name']}'.")
url = repo["git_url"]
if "ssh" in scheme or "git" in scheme:
url = url.replace("git://github.com/", "git@github.com:")
elif "http" in scheme:
url = url.replace("git://", "https://")
else:
raise ValueError(f"Scheme '{scheme}' not supported.")
target_path = Path(target_path)
target_path.parent.mkdir(parents=True, exist_ok=True)
if target_path.exists():
# Target repo cannot exist.
target_path = target_path / repo_name
self.git.clone(url, branch=branch, target_path=target_path)
def download_package(
self, org_name: str, repo_name: str, version: str, target_path: Union[Path, str]
):
target_path = Path(target_path) # Handles str
if not target_path or not target_path.is_dir():
raise ValueError(f"'target_path' must be a valid directory (got '{target_path}').")
release = self.get_release(org_name, repo_name, version)
description = f"Downloading {org_name}/{repo_name}@{version}"
release_content = stream_response(
release["zipball_url"], progress_bar_description=description
)
# Use temporary path to isolate a package when unzipping
with tempfile.TemporaryDirectory() as tmp:
temp_path = Path(tmp)
with zipfile.ZipFile(BytesIO(release_content)) as zf:
zf.extractall(temp_path)
# Copy the directory contents into the target path.
downloaded_packages = [f for f in temp_path.iterdir() if f.is_dir()]
if len(downloaded_packages) < 1:
raise CompilerError(f"Unable to download package at '{org_name}/{repo_name}'.")
package_path = temp_path / downloaded_packages[0]
for source_file in package_path.iterdir():
shutil.move(str(source_file), str(target_path))
def _get(self, url: str, params: Optional[dict] = None) -> Any:
return self._request("GET", url, params=params)
def _request(self, method: str, url: str, **kwargs) -> Any:
url = f"{self.API_URL_PREFIX}/{url}"
response = self.__session.request(method, url, **kwargs)
try:
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401 and self.__session.headers.get("Authorization"):
token = self.__session.headers["Authorization"]
del self.__session.headers["Authorization"]
response = self.__session.request(method, url, **kwargs)
try:
response.raise_for_status() # Raise exception if the retry also fails
except HTTPError:
# Even without the Authorization token, the request still failed.
# Raise the original error in this case. Also, put back token just in case.
self.__session.headers["Authorization"] = token
raise err
else:
# The request failed with Authorization but succeeded without.
# Let the user know their token is likely expired.
logger.warning(
"Requests are not authorized! GITHUB_ACCESS_TOKEN is likely expired; "
"received 401 when attempted to use it. If you need GitHub authorization, "
"try resetting your token."
)
return response.json()
raise # Raise the error.
return response.json()
github_client = _GithubClient()
| _GithubClient |
python | google__jax | jax/_src/pallas/core.py | {
"start": 3784,
"end": 3962
} | class ____(AbstractSemaphoreTy):
name = "barrier_semaphore"
type = barrier_semaphore
Backend = Literal["mosaic_tpu", "triton", "mosaic_gpu"]
@runtime_checkable
| BarrierSemaphore |
python | neetcode-gh__leetcode | python/0020-valid-parentheses.py | {
"start": 0,
"end": 375
} | class ____:
def isValid(self, s: str) -> bool:
bracketMap = {")": "(", "]": "[", "}": "{"}
stack = []
for c in s:
if c not in bracketMap:
stack.append(c)
continue
if not stack or stack[-1] != bracketMap[c]:
return False
stack.pop()
return not stack
| Solution |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_delayed_workflow.py | {
"start": 38036,
"end": 38615
} | class ____(TestDelayedWorkflowBase):
def test_cleanup_redis(self) -> None:
self._push_base_events()
project_client = self.batch_client.for_project(self.project.id)
data = project_client.get_hash_data(batch_key=None)
assert set(data.keys()) == self.workflow_group_dcg_mapping
event_data = EventRedisData.from_redis_data(data, continue_on_error=False)
cleanup_redis_buffer(project_client, event_data.events.keys(), None)
data = project_client.get_hash_data(batch_key=None)
assert data == {}
| TestCleanupRedisBuffer |
python | viewflow__viewflow | viewflow/jsonstore.py | {
"start": 5733,
"end": 6045
} | class ____(JSONFieldMixin, fields.DateField):
def to_json(self, value):
if value:
assert isinstance(value, (datetime, date))
return value.strftime("%Y-%m-%d")
def from_json(self, value):
if value is not None:
return dateparse.parse_date(value)
| DateField |
python | allegroai__clearml | clearml/automation/optimization.py | {
"start": 9580,
"end": 11290
} | class ____(object):
class Field(object):
def __init__(self, limit: Optional[float] = None) -> ():
self.limit = limit
self.current = {}
def update(self, uid: Union[str, int], value: float) -> ():
if value is not None:
try:
self.current[uid] = float(value)
except (TypeError, ValueError):
pass
@property
def used(self) -> Optional[float]:
if self.limit is None or not self.current:
return None
return sum(self.current.values()) / float(self.limit)
def __init__(
self,
jobs_limit: Optional[int],
iterations_limit: Optional[int],
compute_time_limit: Optional[float],
) -> ():
self.jobs = self.Field(jobs_limit)
self.iterations = self.Field(iterations_limit)
self.compute_time = self.Field(compute_time_limit)
def to_dict(self) -> Mapping[str, Mapping[str, float]]:
# returned dict is Mapping[Union['jobs', 'iterations', 'compute_time'], Mapping[Union['limit', 'used'], float]]
current_budget = {}
jobs = self.jobs.used
current_budget["jobs"] = {"limit": self.jobs.limit, "used": jobs if jobs else 0}
iterations = self.iterations.used
current_budget["iterations"] = {
"limit": self.iterations.limit,
"used": iterations if iterations else 0,
}
compute_time = self.compute_time.used
current_budget["compute_time"] = {
"limit": self.compute_time.limit,
"used": compute_time if compute_time else 0,
}
return current_budget
| Budget |
python | tornadoweb__tornado | tornado/test/simple_httpclient_test.py | {
"start": 3270,
"end": 3775
} | class ____(RequestHandler):
def get(self):
if self.request.version.startswith("HTTP/1"):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n" b"hello")
stream.close()
else:
self.finish("HTTP/1 required")
| NoContentLengthHandler |
python | pennersr__django-allauth | allauth/socialaccount/providers/github/views.py | {
"start": 285,
"end": 1918
} | class ____(OAuth2Adapter):
provider_id = "github"
settings = app_settings.PROVIDERS.get(provider_id, {})
if "GITHUB_URL" in settings:
web_url = settings.get("GITHUB_URL").rstrip("/")
api_url = "{0}/api/v3".format(web_url)
else:
web_url = "https://github.com"
api_url = "https://api.github.com"
access_token_url = "{0}/login/oauth/access_token".format(web_url)
authorize_url = "{0}/login/oauth/authorize".format(web_url)
profile_url = "{0}/user".format(api_url)
emails_url = "{0}/user/emails".format(api_url)
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "token {}".format(token.token)}
resp = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
resp.raise_for_status()
extra_data = resp.json()
if app_settings.QUERY_EMAIL:
if emails := self.get_emails(headers):
extra_data["emails"] = emails
return self.get_provider().sociallogin_from_response(request, extra_data)
def get_emails(self, headers) -> Optional[list]:
resp = (
get_adapter().get_requests_session().get(self.emails_url, headers=headers)
)
# https://api.github.com/user/emails -- 404 is documented to occur.
if resp.status_code == HTTPStatus.NOT_FOUND:
return None
resp.raise_for_status()
return resp.json()
oauth2_login = OAuth2LoginView.adapter_view(GitHubOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(GitHubOAuth2Adapter)
| GitHubOAuth2Adapter |
python | redis__redis-py | redis/asyncio/connection.py | {
"start": 31812,
"end": 34809
} | class ____:
__slots__ = (
"keyfile",
"certfile",
"cert_reqs",
"include_verify_flags",
"exclude_verify_flags",
"ca_certs",
"ca_data",
"context",
"check_hostname",
"min_version",
"ciphers",
)
def __init__(
self,
keyfile: Optional[str] = None,
certfile: Optional[str] = None,
cert_reqs: Optional[Union[str, ssl.VerifyMode]] = None,
include_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
exclude_verify_flags: Optional[List["ssl.VerifyFlags"]] = None,
ca_certs: Optional[str] = None,
ca_data: Optional[str] = None,
check_hostname: bool = False,
min_version: Optional[TLSVersion] = None,
ciphers: Optional[str] = None,
):
if not SSL_AVAILABLE:
raise RedisError("Python wasn't built with SSL support")
self.keyfile = keyfile
self.certfile = certfile
if cert_reqs is None:
cert_reqs = ssl.CERT_NONE
elif isinstance(cert_reqs, str):
CERT_REQS = { # noqa: N806
"none": ssl.CERT_NONE,
"optional": ssl.CERT_OPTIONAL,
"required": ssl.CERT_REQUIRED,
}
if cert_reqs not in CERT_REQS:
raise RedisError(
f"Invalid SSL Certificate Requirements Flag: {cert_reqs}"
)
cert_reqs = CERT_REQS[cert_reqs]
self.cert_reqs = cert_reqs
self.include_verify_flags = include_verify_flags
self.exclude_verify_flags = exclude_verify_flags
self.ca_certs = ca_certs
self.ca_data = ca_data
self.check_hostname = (
check_hostname if self.cert_reqs != ssl.CERT_NONE else False
)
self.min_version = min_version
self.ciphers = ciphers
self.context: Optional[SSLContext] = None
def get(self) -> SSLContext:
if not self.context:
context = ssl.create_default_context()
context.check_hostname = self.check_hostname
context.verify_mode = self.cert_reqs
if self.include_verify_flags:
for flag in self.include_verify_flags:
context.verify_flags |= flag
if self.exclude_verify_flags:
for flag in self.exclude_verify_flags:
context.verify_flags &= ~flag
if self.certfile and self.keyfile:
context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
if self.ca_certs or self.ca_data:
context.load_verify_locations(cafile=self.ca_certs, cadata=self.ca_data)
if self.min_version is not None:
context.minimum_version = self.min_version
if self.ciphers is not None:
context.set_ciphers(self.ciphers)
self.context = context
return self.context
| RedisSSLContext |
python | langchain-ai__langchain | libs/partners/openai/tests/integration_tests/chat_models/test_responses_api.py | {
"start": 7279,
"end": 7321
} | class ____(BaseModel):
response: str
| Foo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict21.py | {
"start": 202,
"end": 248
} | class ____(TypedDict):
v1: Required[int]
| TD1 |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_artifact.py | {
"start": 221,
"end": 299
} | class ____(GQLResult):
result: Optional[UpdateArtifactResult]
| UpdateArtifact |
python | google__python-fire | fire/test_components.py | {
"start": 2846,
"end": 3020
} | class ____:
def identity(self, bool_one=False, bool_two=False):
return bool_one, bool_two
def identity2(self, a=None, alpha=None):
return a, alpha
| SimilarArgNames |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/test_expect_column_values_to_be_in_set.py | {
"start": 476,
"end": 5985
} | class ____(gxe.ExpectColumnValuesToBeInSet):
value_set: List[str] = ["FR", "DE", "CH", "ES", "IT", "BE", "NL", "PL"]
# </snippet>
@pytest.mark.big
def test_expect_column_values_to_be_in_set_fail(
data_context_with_datasource_pandas_engine,
):
context: AbstractDataContext = data_context_with_datasource_pandas_engine
df = pd.DataFrame(
{
"a": [
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
]
}
)
batch_request = RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": df},
batch_identifiers={"default_identifier_name": "my_identifier"},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name="test",
)
result = validator.expect_column_values_to_be_in_set(
column="a", value_set=["2021-06-18"]
)
assert result.success is False
@pytest.mark.filesystem
def test_expect_column_values_in_set_pass(
data_context_with_datasource_pandas_engine,
):
context: AbstractDataContext = data_context_with_datasource_pandas_engine
df = pd.DataFrame(
{
"a": [
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
]
}
)
batch_request = RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": df},
batch_identifiers={"default_identifier_name": "my_identifier"},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name="test",
)
result = validator.expect_column_values_to_be_in_set(
column="a",
value_set=[
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
],
)
assert result.success is True
@pytest.mark.big
def test_expect_column_values_country_fail(
data_context_with_datasource_pandas_engine,
):
context: AbstractDataContext = data_context_with_datasource_pandas_engine
df = pd.DataFrame(
{
"a": [
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
]
}
)
batch_request = RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": df},
batch_identifiers={"default_identifier_name": "my_identifier"},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name="test",
)
result = validator.expect_column_values_to_be_two_letter_country_code(column="a")
assert result.success is False
@pytest.mark.big
def test_expect_column_values_country_pass(
data_context_with_datasource_pandas_engine,
):
context: AbstractDataContext = data_context_with_datasource_pandas_engine
df = pd.DataFrame({"a": ["FR", "DE", "CH", "ES", "IT", "BE", "NL", "PL"]})
batch_request = RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": df},
batch_identifiers={"default_identifier_name": "my_identifier"},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name="test",
)
result = validator.expect_column_values_to_be_two_letter_country_code(column="a")
assert result.success is True
@pytest.mark.big
def test_expect_column_values_to_be_in_set_invalid_set(
data_context_with_datasource_pandas_engine,
):
context: AbstractDataContext = data_context_with_datasource_pandas_engine
df = pd.DataFrame(
{
"a": [
"2021-01-01",
"2021-01-31",
"2021-02-28",
"2021-03-20",
"2021-02-21",
"2021-05-01",
"2021-06-18",
]
}
)
batch_request = RuntimeBatchRequest(
datasource_name="my_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="my_data_asset",
runtime_parameters={"batch_data": df},
batch_identifiers={"default_identifier_name": "my_identifier"},
)
validator = context.get_validator(
batch_request=batch_request,
create_expectation_suite_with_name="test",
)
with pytest.raises(pydantic.ValidationError):
_ = validator.expect_column_values_to_be_in_set(column="a", value_set="foo")
| ExpectColumnValuesToBeTwoLetterCountryCode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.