language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | urllib3__urllib3 | test/with_dummyserver/test_connectionpool.py | {
"start": 57742,
"end": 58203
} | class ____(HypercornDummyServerTestCase):
def test_pool_size_redirect(self) -> None:
retries = Retry(
total=1, raise_on_status=False, status_forcelist=[404], redirect=True
)
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/redirect", preload_content=False)
assert pool.num_connections == 1
| TestRedirectPoolSize |
python | getsentry__sentry | tests/sentry_plugins/heroku/test_plugin.py | {
"start": 795,
"end": 4680
} | class ____(TestCase):
"""
tests that when finish_release is called on a release hook,
we try to get the previous commits based on the version ref
and that we create `ReleaseHeadCommit`s for the version
"""
@patch("sentry.tasks.commits.fetch_commits")
def test_minimal(self, mock_fetch_commits: MagicMock) -> None:
project = self.create_project()
version = "bbee5b51f84611e4b14834363b8514c2"
data_list = [
{
"id": "c7155651831549cf8a5e47889fce17eb",
"message": "foo",
"author_email": "jane@example.com",
},
{
"id": "62de626b7c7cfb8e77efb4273b1a3df4123e6216",
"message": "hello",
"author_name": "Jess",
},
{
"id": "58de626b7c7cfb8e77efb4273b1a3df4123e6345",
"message": "bar",
"author_name": "Joe^^",
},
{
"id": "bbee5b51f84611e4b14834363b8514c2",
"message": "blah",
"author_email": "katie@example.com",
},
]
user = self.create_user(email="stebe@sentry.io")
repo = Repository.objects.create(
organization_id=project.organization_id, name=project.name, provider="dummy"
)
ProjectOption.objects.set_value(key="heroku:repository", project=project, value=repo.name)
for data in data_list:
Commit.objects.create(
key=data["id"], organization_id=self.project.organization_id, repository_id=repo.id
)
old_release = Release.objects.create(
version="a" * 40,
organization_id=project.organization_id,
date_added=timezone.now() - timedelta(minutes=30),
)
old_release.add_project(project)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=old_release,
commit=Commit.objects.get(key="c7155651831549cf8a5e47889fce17eb"),
order=0,
)
ReleaseHeadCommit.objects.create(
organization_id=project.organization_id,
repository_id=repo.id,
release=old_release,
commit=Commit.objects.get(key="c7155651831549cf8a5e47889fce17eb"),
)
release_heads = ReleaseHeadCommit.objects.filter(
organization_id=project.organization_id,
repository_id=repo.id,
commit=Commit.objects.get(key="bbee5b51f84611e4b14834363b8514c2"),
)
assert len(release_heads) == 0
hook = HerokuReleaseHook(project)
hook.finish_release(version=version, owner_id=user.id)
release = Release.objects.get(projects=project, version=version)
new_release_heads = ReleaseHeadCommit.objects.filter(
organization_id=project.organization_id,
repository_id=repo.id,
release=release,
commit=Commit.objects.get(key="bbee5b51f84611e4b14834363b8514c2"),
)
assert len(new_release_heads) == 1
assert release.version == "bbee5b51f84611e4b14834363b8514c2"
deploy = Deploy.objects.filter(
organization_id=project.organization_id,
release=release,
environment_id=Environment.objects.get(
organization_id=project.organization_id, name="production"
).id,
)
assert len(deploy) == 1
mock_fetch_commits.apply_async.assert_called_with(
kwargs={
"release_id": release.id,
"user_id": user.id,
"refs": [{"commit": "bbee5b51f84611e4b14834363b8514c2", "repository": repo.name}],
"prev_release_id": old_release.id,
}
)
| SetRefsTest |
python | sympy__sympy | sympy/physics/units/prefixes.py | {
"start": 255,
"end": 6260
} | class ____(Expr):
"""
This class represent prefixes, with their name, symbol and factor.
Prefixes are used to create derived units from a given unit. They should
always be encapsulated into units.
The factor is constructed from a base (default is 10) to some power, and
it gives the total multiple or fraction. For example the kilometer km
is constructed from the meter (factor 1) and the kilo (10 to the power 3,
i.e. 1000). The base can be changed to allow e.g. binary prefixes.
A prefix multiplied by something will always return the product of this
other object times the factor, except if the other object:
- is a prefix and they can be combined into a new prefix;
- defines multiplication with prefixes (which is the case for the Unit
class).
"""
_op_priority = 13.0
is_commutative = True
def __new__(cls, name, abbrev, exponent, base=sympify(10), latex_repr=None):
name = sympify(name)
abbrev = sympify(abbrev)
exponent = sympify(exponent)
base = sympify(base)
obj = Expr.__new__(cls, name, abbrev, exponent, base)
obj._name = name
obj._abbrev = abbrev
obj._scale_factor = base**exponent
obj._exponent = exponent
obj._base = base
obj._latex_repr = latex_repr
return obj
@property
def name(self):
return self._name
@property
def abbrev(self):
return self._abbrev
@property
def scale_factor(self):
return self._scale_factor
def _latex(self, printer):
if self._latex_repr is None:
return r'\text{%s}' % self._abbrev
return self._latex_repr
@property
def base(self):
return self._base
def __str__(self):
return str(self._abbrev)
def __repr__(self):
if self.base == 10:
return "Prefix(%r, %r, %r)" % (
str(self.name), str(self.abbrev), self._exponent)
else:
return "Prefix(%r, %r, %r, %r)" % (
str(self.name), str(self.abbrev), self._exponent, self.base)
def __mul__(self, other):
from sympy.physics.units import Quantity
if not isinstance(other, (Quantity, Prefix)):
return super().__mul__(other)
fact = self.scale_factor * other.scale_factor
if isinstance(other, Prefix):
if fact == 1:
return S.One
# simplify prefix
for p in PREFIXES:
if PREFIXES[p].scale_factor == fact:
return PREFIXES[p]
return fact
return self.scale_factor * other
def __truediv__(self, other):
if not hasattr(other, "scale_factor"):
return super().__truediv__(other)
fact = self.scale_factor / other.scale_factor
if fact == 1:
return S.One
elif isinstance(other, Prefix):
for p in PREFIXES:
if PREFIXES[p].scale_factor == fact:
return PREFIXES[p]
return fact
return self.scale_factor / other
def __rtruediv__(self, other):
if other == 1:
for p in PREFIXES:
if PREFIXES[p].scale_factor == 1 / self.scale_factor:
return PREFIXES[p]
return other / self.scale_factor
def prefix_unit(unit, prefixes):
"""
Return a list of all units formed by unit and the given prefixes.
You can use the predefined PREFIXES or BIN_PREFIXES, but you can also
pass as argument a subdict of them if you do not want all prefixed units.
>>> from sympy.physics.units.prefixes import (PREFIXES,
... prefix_unit)
>>> from sympy.physics.units import m
>>> pref = {"m": PREFIXES["m"], "c": PREFIXES["c"], "d": PREFIXES["d"]}
>>> prefix_unit(m, pref) # doctest: +SKIP
[millimeter, centimeter, decimeter]
"""
from sympy.physics.units.quantities import Quantity
from sympy.physics.units import UnitSystem
prefixed_units = []
for prefix in prefixes.values():
quantity = Quantity(
"%s%s" % (prefix.name, unit.name),
abbrev=("%s%s" % (prefix.abbrev, unit.abbrev)),
is_prefixed=True,
)
UnitSystem._quantity_dimensional_equivalence_map_global[quantity] = unit
UnitSystem._quantity_scale_factors_global[quantity] = (prefix.scale_factor, unit)
prefixed_units.append(quantity)
return prefixed_units
yotta = Prefix('yotta', 'Y', 24)
zetta = Prefix('zetta', 'Z', 21)
exa = Prefix('exa', 'E', 18)
peta = Prefix('peta', 'P', 15)
tera = Prefix('tera', 'T', 12)
giga = Prefix('giga', 'G', 9)
mega = Prefix('mega', 'M', 6)
kilo = Prefix('kilo', 'k', 3)
hecto = Prefix('hecto', 'h', 2)
deca = Prefix('deca', 'da', 1)
deci = Prefix('deci', 'd', -1)
centi = Prefix('centi', 'c', -2)
milli = Prefix('milli', 'm', -3)
micro = Prefix('micro', 'mu', -6, latex_repr=r"\mu")
nano = Prefix('nano', 'n', -9)
pico = Prefix('pico', 'p', -12)
femto = Prefix('femto', 'f', -15)
atto = Prefix('atto', 'a', -18)
zepto = Prefix('zepto', 'z', -21)
yocto = Prefix('yocto', 'y', -24)
# https://physics.nist.gov/cuu/Units/prefixes.html
PREFIXES = {
'Y': yotta,
'Z': zetta,
'E': exa,
'P': peta,
'T': tera,
'G': giga,
'M': mega,
'k': kilo,
'h': hecto,
'da': deca,
'd': deci,
'c': centi,
'm': milli,
'mu': micro,
'n': nano,
'p': pico,
'f': femto,
'a': atto,
'z': zepto,
'y': yocto,
}
kibi = Prefix('kibi', 'Y', 10, 2)
mebi = Prefix('mebi', 'Y', 20, 2)
gibi = Prefix('gibi', 'Y', 30, 2)
tebi = Prefix('tebi', 'Y', 40, 2)
pebi = Prefix('pebi', 'Y', 50, 2)
exbi = Prefix('exbi', 'Y', 60, 2)
# https://physics.nist.gov/cuu/Units/binary.html
BIN_PREFIXES = {
'Ki': kibi,
'Mi': mebi,
'Gi': gibi,
'Ti': tebi,
'Pi': pebi,
'Ei': exbi,
}
| Prefix |
python | getsentry__sentry | src/sentry/integrations/api/serializers/models/external_actor.py | {
"start": 434,
"end": 549
} | class ____(TypedDict, total=False):
externalId: str
userId: str
teamId: str
| ExternalActorResponseOptional |
python | jina-ai__jina | tests/integration/deployments/executor.py | {
"start": 49,
"end": 292
} | class ____(Executor):
def __init__(self, arg='hello', **kwargs):
super().__init__(**kwargs)
self.arg = arg
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = self.arg
| DummyExecutor |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/lru_cache_test.py | {
"start": 1201,
"end": 2570
} | class ____(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x):
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
return array_ops.identity(x, name="output")
def GetParams(self):
dtype = dtypes.float32
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10,
2]],
[[2, 10, 10, 2]]]
return trt_test.TfTrtIntegrationTestParams(
graph_fn=self.GraphFn,
input_specs=[
tensor_spec.TensorSpec([None, 10, 10, 2], dtypes.float32, "input")
],
output_specs=[
tensor_spec.TensorSpec([None, 10, 10, 1], dtypes.float32, "output")
],
input_dims=input_dims,
expected_output_dims=expected_output_dims)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and not trt_test.IsQuantizationMode(
run_params.precision_mode)), "test dynamic engine and non-INT8"
if __name__ == "__main__":
test.main()
| LRUCacheTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/task_proxy_operator.py | {
"start": 2372,
"end": 7623
} | class ____(BaseProxyTaskToDagsterOperator):
"""The default task proxying operator - which opens a blank session and expects the dagster URL to be set in the environment.
The dagster url is expected to be set in the environment as DAGSTER_URL.
This operator should not be instantiated directly - it is instantiated by :py:func:`proxying_to_dagster` if no
override operator is provided.
"""
def get_dagster_session(self, context: Context) -> requests.Session:
return requests.Session()
def get_dagster_url(self, context: Context) -> str:
return os.environ["DAGSTER_URL"]
def build_dagster_task(
original_task: BaseOperator,
dagster_operator_klass: type[BaseProxyTaskToDagsterOperator],
) -> BaseProxyTaskToDagsterOperator:
return instantiate_dagster_operator(original_task, dagster_operator_klass)
def instantiate_dagster_operator(
original_task: BaseOperator,
dagster_operator_klass: type[BaseProxyTaskToDagsterOperator],
) -> BaseProxyTaskToDagsterOperator:
"""Instantiates a DagsterOperator as a copy of the provided airflow task.
We attempt to copy as many of the original task's attributes as possible, while respecting
that attributes may change between airflow versions. In order to do this, we inspect the
arguments available to the BaseOperator constructor and copy over any of those arguments that
are available as attributes on the original task.
This approach has limitations:
- If the task attribute is transformed and stored on another property, it will not be copied.
- If the task attribute is transformed in a way that makes it incompatible with the constructor arg
and stored in the same property, that will attempt to be copied and potentiall break initialization.
In the future, if we hit problems with this, we may need to add argument overrides to ensure we either
attempt to include certain additional attributes, or exclude others. If this continues to be a problem
across airflow versions, it may be necessary to revise this approach to one that explicitly maps airflow
version to a set of expected arguments and attributes.
"""
base_operator_args, base_operator_args_with_defaults = get_params(BaseOperator.__init__)
init_kwargs = {}
ignore_args = [
# These don't make sense in context to copy
"kwargs",
"args",
"dag",
# The weight rule stored on the base operator is a private subclass of PriorityWeightStrategy,
# which satisfies the type signature of the constructor, but fails the validation process in
# the constructor. See https://github.com/apache/airflow/blob/2b15e9f26fee27b6c1fbc8167d0e0558198ffa7a/airflow/task/priority_strategy.py#L127
# for more details.
# We could likely add custom handling here to support the parameter.
# For now, we ignore it, as it's currently an experimental feature in Airflow.
"weight_rule",
]
for arg in base_operator_args:
if arg in ignore_args or getattr(original_task, arg, None) is None:
continue
init_kwargs[arg] = getattr(original_task, arg)
for kwarg, default in base_operator_args_with_defaults.items():
if kwarg in ignore_args or getattr(original_task, kwarg, None) is None:
continue
init_kwargs[kwarg] = getattr(original_task, kwarg, default)
# Make sure that the operator overrides take precedence.
return dagster_operator_klass(**init_kwargs)
def get_params(func: Callable[..., Any]) -> tuple[set[str], dict[str, Any]]:
"""Retrieves the args and kwargs from the signature of a given function or method.
For kwargs, default values are retrieved as well.
Args:
func (Callable[..., Any]): The function or method to inspect.
Returns:
Tuple[Set[str], Dict[str, Any]]:
- A set of argument names that do not have default values.
- A dictionary of keyword argument names and their default values.
"""
# Get the function's signature
sig = inspect.signature(func)
# Initialize sets for args without defaults and kwargs with defaults
args_with_defaults = {}
args = set()
# Iterate over function parameters
for name, param in sig.parameters.items():
if param.default is inspect.Parameter.empty and name != "self": # Exclude 'self'
args.add(name)
else:
if name != "self": # Exclude 'self'
args_with_defaults[name] = param.default
return args, args_with_defaults
def matched_dag_id_task_id(asset_node: Mapping[str, Any], dag_id: str, task_id: str) -> bool:
json_metadata_entries = {
entry["label"]: entry["jsonString"]
for entry in asset_node["metadataEntries"]
if entry["__typename"] == "JsonMetadataEntry"
}
mapping_entry = json_metadata_entries.get(TASK_MAPPING_METADATA_KEY)
if mapping_entry:
task_handle_dict_list = json.loads(mapping_entry)
for task_handle_dict in task_handle_dict_list:
if task_handle_dict["dag_id"] == dag_id and task_handle_dict["task_id"] == task_id:
return True
return False
| DefaultProxyTaskToDagsterOperator |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_values_not_to_be_all_null.py | {
"start": 1878,
"end": 6833
} | class ____(MulticolumnMapExpectation):
"""Expect the certain set of columns not to be null at the same time."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"only_for": ["pandas"],
"data": {
"no_nulls": [5, 6, 5, 12, -3],
"some_nulls": [np.nan, -3, np.nan, np.nan, -9],
"one_non_null": [np.nan, 2, np.nan, np.nan, np.nan],
"all_nulls": [np.nan, np.nan, np.nan, np.nan, np.nan],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column_list": ["no_nulls", "some_nulls"]},
"out": {
"success": True,
},
},
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_list": ["some_nulls", "one_non_null"],
"mostly": 0.4,
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_list": ["some_nulls", "one_non_null", "all_nulls"],
"mostly": 1,
},
"out": {
"success": False,
},
},
],
},
{
"only_for": ["sqlite"],
"data": {
"no_nulls": [5, 6, 5, 12, -3],
"some_nulls": [None, -3, None, None, -9],
"one_non_null": [None, 2, None, None, None],
"all_nulls": [None, None, None, None, None],
"one_null": [None, 1, 1, 1, 1],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column_list": ["no_nulls", "some_nulls"]},
"out": {
"success": True,
},
},
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_list": ["some_nulls", "one_non_null"],
"mostly": 0.4,
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_list": ["some_nulls", "one_non_null", "all_nulls"],
"mostly": 1,
},
"out": {
"success": False,
},
},
{
"title": "basic_negative_test_2",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column_list": ["all_nulls", "one_null"],
"mostly": 1,
},
"out": {
"success": False,
},
},
],
},
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "multicolumn_values.not_all_null"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"column_list",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {"ignore_row_if": "never"}
library_metadata = {
"tags": ["null_check"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@liyusa",
"@itaise", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectMulticolumnValuesNotToBeAllNull().print_diagnostic_checklist()
| ExpectMulticolumnValuesNotToBeAllNull |
python | astropy__astropy | astropy/utils/iers/iers.py | {
"start": 6000,
"end": 18968
} | class ____(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.0):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO + mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0.0, return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["UT1_UTC"], self.ut1_utc_source if return_status else None
)
def dcip_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1,
jd2,
["dX_2000A", "dY_2000A"],
self.dcip_source if return_status else None,
)
def pm_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["PM_x", "PM_y"], self.pm_source if return_status else None
)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
if conf.iers_degraded_accuracy == "error":
msg = (
"(some) times are outside of range covered by IERS table. Cannot"
" convert with full accuracy. To allow conversion with degraded"
" accuracy set astropy.utils.iers.conf.iers_degraded_accuracy to"
' "warn" or "silent". For more information about setting this'
" configuration parameter or controlling its value globally, see"
" the Astropy configuration system documentation"
" https://docs.astropy.org/en/stable/config/index.html."
)
raise IERSRangeError(msg)
elif conf.iers_degraded_accuracy == "warn":
# No IERS data covering the time(s) and user requested a warning.
msg = (
"(some) times are outside of range covered by IERS table, "
"accuracy is degraded."
)
warn(msg, IERSDegradedAccuracyWarning)
# No IERS data covering the time(s) and user is OK with no warning.
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, "__array__") or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self["MJD"].value, mjd, side="right")
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self["MJD"][i0].value, self["MJD"][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == "UT1_UTC":
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
# Pass in initial to np.max to allow things to work for empty mjd.
self._check_interpolate_indices(i1, i, np.max(mjd, initial=50000))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if getattr(col, "unit", None) is not None and isinstance(col, MaskedColumn):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
| IERS |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/anchored_artists.py | {
"start": 10060,
"end": 17164
} | class ____(AnchoredOffsetbox):
def __init__(self, transform, label_x, label_y, length=0.15,
fontsize=0.08, loc='upper left', angle=0, aspect_ratio=1,
pad=0.4, borderpad=0.4, frameon=False, color='w', alpha=1,
sep_x=0.01, sep_y=0, fontproperties=None, back_length=0.15,
head_width=10, head_length=15, tail_width=2,
text_props=None, arrow_props=None,
**kwargs):
"""
Draw two perpendicular arrows to indicate directions.
Parameters
----------
transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`!matplotlib.axes.Axes.transAxes`.
label_x, label_y : str
Label text for the x and y arrows
length : float, default: 0.15
Length of the arrow, given in coordinates of *transform*.
fontsize : float, default: 0.08
Size of label strings, given in coordinates of *transform*.
loc : str, default: 'upper left'
Location of the arrow. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
angle : float, default: 0
The angle of the arrows in degrees.
aspect_ratio : float, default: 1
The ratio of the length of arrow_x and arrow_y.
Negative numbers can be used to change the direction.
pad : float, default: 0.4
Padding around the labels and arrows, in fraction of the font size.
borderpad : float, default: 0.4
Border padding, in fraction of the font size.
frameon : bool, default: False
If True, draw a box around the arrows and labels.
color : str, default: 'white'
Color for the arrows and labels.
alpha : float, default: 1
Alpha values of the arrows and labels
sep_x, sep_y : float, default: 0.01 and 0 respectively
Separation between the arrows and labels in coordinates of
*transform*.
fontproperties : `~matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
back_length : float, default: 0.15
Fraction of the arrow behind the arrow crossing.
head_width : float, default: 10
Width of arrow head, sent to `.ArrowStyle`.
head_length : float, default: 15
Length of arrow head, sent to `.ArrowStyle`.
tail_width : float, default: 2
Width of arrow tail, sent to `.ArrowStyle`.
text_props, arrow_props : dict
Properties of the text and arrows, passed to `.TextPath` and
`.FancyArrowPatch`.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
arrow_x, arrow_y : `~matplotlib.patches.FancyArrowPatch`
Arrow x and y
text_path_x, text_path_y : `~matplotlib.text.TextPath`
Path for arrow labels
p_x, p_y : `~matplotlib.patches.PathPatch`
Patch for arrow labels
box : `~matplotlib.offsetbox.AuxTransformBox`
Container for the arrows and labels.
Notes
-----
If *prop* is passed as a keyword argument, but *fontproperties* is
not, then *prop* is assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import (
... AnchoredDirectionArrows)
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10, 10)))
>>> arrows = AnchoredDirectionArrows(ax.transAxes, '111', '110')
>>> ax.add_artist(arrows)
>>> fig.show()
Using several of the optional parameters, creating downward pointing
arrow and high contrast text labels.
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(family='monospace')
>>> arrows = AnchoredDirectionArrows(ax.transAxes, 'East', 'South',
... loc='lower left', color='k',
... aspect_ratio=-1, sep_x=0.02,
... sep_y=-0.01,
... text_props={'ec':'w', 'fc':'k'},
... fontproperties=fontprops)
"""
if arrow_props is None:
arrow_props = {}
if text_props is None:
text_props = {}
arrowstyle = ArrowStyle("Simple",
head_width=head_width,
head_length=head_length,
tail_width=tail_width)
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if 'color' not in arrow_props:
arrow_props['color'] = color
if 'alpha' not in arrow_props:
arrow_props['alpha'] = alpha
if 'color' not in text_props:
text_props['color'] = color
if 'alpha' not in text_props:
text_props['alpha'] = alpha
t_start = transform
t_end = t_start + transforms.Affine2D().rotate_deg(angle)
self.box = AuxTransformBox(t_end)
length_x = length
length_y = length*aspect_ratio
self.arrow_x = FancyArrowPatch(
(0, back_length*length_y),
(length_x, back_length*length_y),
arrowstyle=arrowstyle,
shrinkA=0.0,
shrinkB=0.0,
**arrow_props)
self.arrow_y = FancyArrowPatch(
(back_length*length_x, 0),
(back_length*length_x, length_y),
arrowstyle=arrowstyle,
shrinkA=0.0,
shrinkB=0.0,
**arrow_props)
self.box.add_artist(self.arrow_x)
self.box.add_artist(self.arrow_y)
text_path_x = TextPath((
length_x+sep_x, back_length*length_y+sep_y), label_x,
size=fontsize, prop=fontproperties)
self.p_x = PathPatch(text_path_x, transform=t_start, **text_props)
self.box.add_artist(self.p_x)
text_path_y = TextPath((
length_x*back_length+sep_x, length_y*(1-back_length)+sep_y),
label_y, size=fontsize, prop=fontproperties)
self.p_y = PathPatch(text_path_y, **text_props)
self.box.add_artist(self.p_y)
super().__init__(loc, pad=pad, borderpad=borderpad, child=self.box,
frameon=frameon, **kwargs)
| AnchoredDirectionArrows |
python | ansible__ansible | lib/ansible/plugins/action/validate_argument_spec.py | {
"start": 358,
"end": 3937
} | class ____(ActionBase):
""" Validate an arg spec"""
TRANSFERS_FILES = False
_requires_connection = False
def get_args_from_task_vars(self, argument_spec, task_vars):
"""
Get any arguments that may come from `task_vars`.
Expand templated variables so we can validate the actual values.
:param argument_spec: A dict of the argument spec.
:param task_vars: A dict of task variables.
:returns: A dict of values that can be validated against the arg spec.
"""
args = {}
for argument_name, argument_attrs in argument_spec.items():
if argument_name in task_vars:
args[argument_name] = task_vars[argument_name]
args = self._templar.template(args)
return args
def run(self, tmp=None, task_vars=None):
"""
Validate an argument specification against a provided set of data.
The `validate_argument_spec` module expects to receive the arguments:
- argument_spec: A dict whose keys are the valid argument names, and
whose values are dicts of the argument attributes (type, etc).
- provided_arguments: A dict whose keys are the argument names, and
whose values are the argument value.
:param tmp: Deprecated. Do not use.
:param task_vars: A dict of task variables.
:return: An action result dict, including a 'argument_errors' key with a
list of validation errors found.
"""
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# This action can be called from anywhere, so pass in some info about what it is
# validating args for so the error results make some sense
result['validate_args_context'] = self._task.args.get('validate_args_context', {})
if 'argument_spec' not in self._task.args:
raise AnsibleError('"argument_spec" arg is required in args: %s' % self._task.args)
# Get the task var called argument_spec. This will contain the arg spec
# data dict (for the proper entry point for a role).
argument_spec_data = self._task.args.get('argument_spec')
# the values that were passed in and will be checked against argument_spec
provided_arguments = self._task.args.get('provided_arguments', {})
if not isinstance(argument_spec_data, dict):
raise AnsibleError('Incorrect type for argument_spec, expected dict and got %s' % type(argument_spec_data))
if not isinstance(provided_arguments, dict):
raise AnsibleError('Incorrect type for provided_arguments, expected dict and got %s' % type(provided_arguments))
args_from_vars = self.get_args_from_task_vars(argument_spec_data, task_vars)
validator = ArgumentSpecValidator(argument_spec_data)
validation_result = validator.validate(combine_vars(args_from_vars, provided_arguments), validate_role_argument_spec=True)
if validation_result.error_messages:
result['failed'] = True
result['msg'] = 'Validation of arguments failed:\n%s' % '\n'.join(validation_result.error_messages)
result['argument_spec_data'] = argument_spec_data
result['argument_errors'] = validation_result.error_messages
return result
result['changed'] = False
result['msg'] = 'The arg spec validation passed'
return result
| ActionModule |
python | rapidsai__cudf | python/cudf_polars/tests/dsl/test_traversal.py | {
"start": 739,
"end": 8183
} | class ____(TypedDict):
expr_mapper: ExprTransformer
IRTransformer: TypeAlias = GenericTransformer[ir.IR, ir.IR, State]
def make_expr(dt, n1, n2):
a1 = expr.Col(dt, n1)
a2 = expr.Col(dt, n2)
return expr.BinOp(dt, plc.binaryop.BinaryOperator.MUL, a1, a2)
def test_traversal_unique():
dt = DataType(pl.Int8())
e1 = make_expr(dt, "a", "a")
unique_exprs = list(traversal([e1]))
assert len(unique_exprs) == 2
assert set(unique_exprs) == {expr.Col(dt, "a"), e1}
assert unique_exprs == [e1, expr.Col(dt, "a")]
e2 = make_expr(dt, "a", "b")
unique_exprs = list(traversal([e2]))
assert len(unique_exprs) == 3
assert set(unique_exprs) == {expr.Col(dt, "a"), expr.Col(dt, "b"), e2}
assert unique_exprs == [e2, expr.Col(dt, "a"), expr.Col(dt, "b")]
e3 = make_expr(dt, "b", "a")
unique_exprs = list(traversal([e3]))
assert len(unique_exprs) == 3
assert set(unique_exprs) == {expr.Col(dt, "a"), expr.Col(dt, "b"), e3}
assert unique_exprs == [e3, expr.Col(dt, "b"), expr.Col(dt, "a")]
def test_post_traversal_unique():
dt = DataType(pl.Int8())
e1 = make_expr(dt, "a", "a")
unique_exprs = list(post_traversal([e1]))
assert unique_exprs == [expr.Col(dt, "a"), e1]
e2 = make_expr(dt, "a", "b")
unique_exprs = list(post_traversal([e2]))
assert unique_exprs == [expr.Col(dt, "a"), expr.Col(dt, "b"), e2]
e3 = make_expr(dt, "b", "a")
unique_exprs = list(post_traversal([e3]))
assert unique_exprs == [expr.Col(dt, "b"), expr.Col(dt, "a"), e3]
def test_post_traversal_multi():
dt = DataType(pl.Int8())
e1 = make_expr(dt, "a", "a")
e2 = make_expr(dt, "a", "b")
e3 = make_expr(dt, "b", "a")
unique_exprs = list(post_traversal([e1, e2, e3]))
assert len(unique_exprs) == 5
assert unique_exprs == [
expr.Col(dt, "b"),
expr.Col(dt, "a"),
e3,
e2,
e1,
]
def rename(e, rec):
mapping = rec.state["mapping"]
if isinstance(e, expr.Col) and e.name in mapping:
return type(e)(e.dtype, mapping[e.name])
return reuse_if_unchanged(e, rec)
def test_caching_visitor():
dt = DataType(pl.Int8())
e1 = make_expr(dt, "a", "b")
mapper = CachingVisitor(rename, state={"mapping": {"b": "c"}})
renamed = mapper(e1)
assert renamed == make_expr(dt, "a", "c")
assert len(mapper.cache) == 3
e2 = make_expr(dt, "a", "a")
mapper = CachingVisitor(rename, state={"mapping": {"b": "c"}})
renamed = mapper(e2)
assert renamed == make_expr(dt, "a", "a")
assert len(mapper.cache) == 2
mapper = CachingVisitor(rename, state={"mapping": {"a": "c"}})
renamed = mapper(e2)
assert renamed == make_expr(dt, "c", "c")
assert len(mapper.cache) == 2
def test_noop_visitor():
dt = DataType(pl.Int8())
e1 = make_expr(dt, "a", "b")
mapper = make_recursive(rename, state={"mapping": {"b": "c"}})
renamed = mapper(e1)
assert renamed == make_expr(dt, "a", "c")
e2 = make_expr(dt, "a", "a")
mapper = make_recursive(rename, state={"mapping": {"b": "c"}})
renamed = mapper(e2)
assert renamed == make_expr(dt, "a", "a")
mapper = make_recursive(rename, state={"mapping": {"a": "c"}})
renamed = mapper(e2)
assert renamed == make_expr(dt, "c", "c")
def test_rewrite_ir_node():
df = pl.LazyFrame({"a": [1, 2, 1], "b": [1, 3, 4]})
q = df.group_by("a").agg(pl.col("b").sum()).sort("b")
t = Translator(q._ldf.visit(), pl.GPUEngine())
orig = t.translate_ir()
new_df = pl.DataFrame({"a": [1, 1, 2], "b": [-1, -2, -4]})
def replace_df(node, rec):
if isinstance(node, ir.DataFrameScan):
return ir.DataFrameScan(
node.schema,
new_df._df,
node.projection,
)
return reuse_if_unchanged(node, rec)
mapper = CachingVisitor(replace_df, state={})
new = mapper(orig)
result = new.evaluate(
cache={},
timer=None,
context=IRExecutionContext.from_config_options(t.config_options),
).to_polars()
expect = pl.DataFrame({"a": [2, 1], "b": [-4, -3]})
assert_frame_equal(result, expect)
def test_rewrite_scan_node(tmp_path):
left = pl.LazyFrame({"a": [1, 2, 3], "b": [1, 3, 4]})
right = pl.DataFrame({"a": [1, 4, 2], "c": [1, 2, 3]})
right.write_parquet(tmp_path / "right.pq")
right_s = pl.scan_parquet(tmp_path / "right.pq")
q = left.join(right_s, on="a", how="inner")
def replace_scan(node, rec):
if isinstance(node, ir.Scan):
return ir.DataFrameScan(
node.schema,
right._df,
node.with_columns,
)
return reuse_if_unchanged(node, rec)
mapper = CachingVisitor(replace_scan, state={})
t = Translator(q._ldf.visit(), pl.GPUEngine())
orig = t.translate_ir()
new = mapper(orig)
result = new.evaluate(
cache={},
timer=None,
context=IRExecutionContext.from_config_options(t.config_options),
).to_polars()
expect = q.collect()
assert_frame_equal(result, expect, check_row_order=False)
def test_rewrite_names_and_ops():
df = pl.LazyFrame({"a": [1, 2, 3], "b": [3, 4, 5], "c": [5, 6, 7], "d": [7, 9, 8]})
q = df.select(pl.col("a") - (pl.col("b") + pl.col("c") * 2), pl.col("d")).sort("d")
# We will replace a -> d, c -> d, and addition with multiplication
expect = (
df.select(
(pl.col("d") - (pl.col("b") * pl.col("d") * 2)).alias("a"), pl.col("d")
)
.sort("d")
.collect()
)
t = Translator(q._ldf.visit(), pl.GPUEngine())
qir = t.translate_ir()
@singledispatch
def _transform(e: expr.Expr, fn: ExprTransformer) -> expr.Expr:
raise NotImplementedError("Unhandled")
@_transform.register
def _(e: expr.Col, fn: ExprTransformer):
# We've added an extra key to the state, so ignore this type error.
mapping = fn.state["mapping"] # type: ignore[typeddict-item]
if e.name in mapping:
return type(e)(e.dtype, mapping[e.name])
return e
@_transform.register
def _(e: expr.BinOp, fn: ExprTransformer):
if e.op == plc.binaryop.BinaryOperator.ADD:
return type(e)(
e.dtype, plc.binaryop.BinaryOperator.MUL, *map(fn, e.children)
)
return reuse_if_unchanged(e, fn)
_transform.register(expr.Expr)(reuse_if_unchanged)
@singledispatch
def _rewrite(node: ir.IR, fn: IRTransformer) -> ir.IR:
raise NotImplementedError("Unhandled")
@_rewrite.register
def _(node: ir.Select, fn: IRTransformer):
expr_mapper = fn.state["expr_mapper"]
return type(node)(
node.schema,
[expr.NamedExpr(e.name, expr_mapper(e.value)) for e in node.exprs],
node.should_broadcast,
fn(node.children[0]),
)
_rewrite.register(ir.IR)(reuse_if_unchanged)
rewriter = CachingVisitor(
_rewrite,
state={
"expr_mapper": CachingVisitor(
_transform, state={"mapping": {"a": "d", "c": "d"}}
)
},
)
new_ir = rewriter(qir)
got = new_ir.evaluate(
cache={},
timer=None,
context=IRExecutionContext.from_config_options(t.config_options),
).to_polars()
assert_frame_equal(expect, got)
| State |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 20849,
"end": 30004
} | class ____(TestCase):
def test_pk_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = PrimaryKeyRelatedField(queryset=ForeignKeyTargetModel.objects.all())
one_to_one = PrimaryKeyRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>])
many_to_many = PrimaryKeyRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all())
through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_nested_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = HyperlinkedRelatedField(queryset=ForeignKeyTargetModel.objects.all(), view_name='foreignkeytargetmodel-detail')
one_to_one = HyperlinkedRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>], view_name='onetoonetargetmodel-detail')
many_to_many = HyperlinkedRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all(), view_name='manytomanytargetmodel-detail')
through = HyperlinkedRelatedField(many=True, read_only=True, view_name='throughtargetmodel-detail')
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations_starred_source(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
extra_kwargs = {
'url': {
'source': '*',
}}
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(source='*', view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.maxDiff = None
self.assertEqual(repr(TestSerializer()), expected)
def test_nested_unique_together_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UniqueTogetherModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='uniquetogethermodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_reverse_foreign_key(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTargetModel
fields = ('id', 'name', 'reverse_foreign_key')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_foreign_key = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_reverse_one_to_one(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTargetModel
fields = ('id', 'name', 'reverse_one_to_one')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_one_to_one = PrimaryKeyRelatedField(queryset=RelationalModel.objects.all())
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_reverse_many_to_many(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTargetModel
fields = ('id', 'name', 'reverse_many_to_many')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_many_to_many = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_reverse_through(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ThroughTargetModel
fields = ('id', 'name', 'reverse_through')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
| TestRelationalFieldMappings |
python | getsentry__sentry | src/sentry/web/frontend/debug/charts/debug_chart_renderer.py | {
"start": 10629,
"end": 14501
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
ret = []
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_PERIOD, discover_total_period
),
"title": "Slack Discover total period",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_PERIOD, discover_multi_y_axis
),
"title": "Slack Discover total period multi y axis",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_PERIOD, discover_empty
),
"title": "Slack Discover total period empty",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_DAILY, discover_total_daily
),
"title": "Discover total daily",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_DAILY, discover_total_daily_multi
),
"title": "Discover total daily multi",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOTAL_DAILY, discover_empty
),
"title": "Discover total daily empty",
}
)
ret.append(
{
"chart": charts.generate_chart(ChartType.SLACK_DISCOVER_TOP5_PERIOD, discover_top5),
"title": "Slack Discover top5 period",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOP5_PERIOD, discover_empty
),
"title": "Slack Discover top5 empty",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOP5_PERIOD_LINE, discover_top5
),
"title": "Slack Discover top5 period line",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_TOP5_PERIOD_LINE, discover_empty
),
"title": "Slack Discover top5 period line empty",
}
)
ret.append(
{
"chart": charts.generate_chart(ChartType.SLACK_DISCOVER_TOP5_DAILY, discover_top5),
"title": "Slack Discover top5 daily",
}
)
ret.append(
{
"chart": charts.generate_chart(ChartType.SLACK_DISCOVER_TOP5_DAILY, discover_empty),
"title": "Slack Discover top5 daily empty",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_PREVIOUS_PERIOD, discover_total_period
),
"title": "Slack Discover previous period",
}
)
ret.append(
{
"chart": charts.generate_chart(
ChartType.SLACK_DISCOVER_PREVIOUS_PERIOD, discover_multi_y_axis
),
"title": "Slack Discover previous period multi y axis",
}
)
return MailPreview(
html_template="sentry/debug/chart-renderer.html",
text_template="sentry/debug/chart-renderer.txt",
context={"charts": ret},
).render(request)
| DebugChartRendererView |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 85224,
"end": 86239
} | class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using
the logits processor should only be used if necessary since it can slow down the generation method.
This logits processor has no `generate` example, as there shouldn't be a correct combination of flags that warrants
its use.
"""
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# set all nan values to 0.0
scores_processed = torch.where(scores != scores, 0.0, scores)
# set all +/-inf values to max/min possible value
scores_processed = torch.where(scores == float("inf"), torch.finfo(scores.dtype).max, scores_processed)
scores_processed = torch.where(scores == -float("inf"), torch.finfo(scores.dtype).min, scores_processed)
return scores_processed
| InfNanRemoveLogitsProcessor |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 13321,
"end": 18390
} | class ____(AbstractPathEffect):
"""
A line-based PathEffect which draws a path with a ticked style.
This line style is frequently used to represent constraints in
optimization. The ticks may be used to indicate that one side
of the line is invalid or to represent a closed boundary of a
domain (i.e. a wall or the edge of a pipe).
The spacing, length, and angle of ticks can be controlled.
This line style is sometimes referred to as a hatched line.
See also the :doc:`/gallery/misc/tickedstroke_demo` example.
"""
def __init__(self, offset=(0, 0),
spacing=10.0, angle=45.0, length=np.sqrt(2),
**kwargs):
"""
Parameters
----------
offset : (float, float), default: (0, 0)
The (x, y) offset to apply to the path, in points.
spacing : float, default: 10.0
The spacing between ticks in points.
angle : float, default: 45.0
The angle between the path and the tick in degrees. The angle
is measured as if you were an ant walking along the curve, with
zero degrees pointing directly ahead, 90 to your left, -90
to your right, and 180 behind you. To change side of the ticks,
change sign of the angle.
length : float, default: 1.414
The length of the tick relative to spacing.
Recommended length = 1.414 (sqrt(2)) when angle=45, length=1.0
when angle=90 and length=2.0 when angle=60.
**kwargs
Extra keywords are stored and passed through to
:meth:`!AbstractPathEffect._update_gc`.
Examples
--------
See :doc:`/gallery/misc/tickedstroke_demo`.
"""
super().__init__(offset)
self._spacing = spacing
self._angle = angle
self._length = length
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""Draw the path with updated gc."""
# Do not modify the input! Use copy instead.
gc0 = renderer.new_gc()
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
trans = affine + self._offset_transform(renderer)
theta = -np.radians(self._angle)
trans_matrix = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
# Convert spacing parameter to pixels.
spacing_px = renderer.points_to_pixels(self._spacing)
# Transform before evaluation because to_polygons works at resolution
# of one -- assuming it is working in pixel space.
transpath = affine.transform_path(tpath)
# Evaluate path to straight line segments that can be used to
# construct line ticks.
polys = transpath.to_polygons(closed_only=False)
for p in polys:
x = p[:, 0]
y = p[:, 1]
# Can not interpolate points or draw line if only one point in
# polyline.
if x.size < 2:
continue
# Find distance between points on the line
ds = np.hypot(x[1:] - x[:-1], y[1:] - y[:-1])
# Build parametric coordinate along curve
s = np.concatenate(([0.0], np.cumsum(ds)))
s_total = s[-1]
num = int(np.ceil(s_total / spacing_px)) - 1
# Pick parameter values for ticks.
s_tick = np.linspace(spacing_px/2, s_total - spacing_px/2, num)
# Find points along the parameterized curve
x_tick = np.interp(s_tick, s, x)
y_tick = np.interp(s_tick, s, y)
# Find unit vectors in local direction of curve
delta_s = self._spacing * .001
u = (np.interp(s_tick + delta_s, s, x) - x_tick) / delta_s
v = (np.interp(s_tick + delta_s, s, y) - y_tick) / delta_s
# Normalize slope into unit slope vector.
n = np.hypot(u, v)
mask = n == 0
n[mask] = 1.0
uv = np.array([u / n, v / n]).T
uv[mask] = np.array([0, 0]).T
# Rotate and scale unit vector into tick vector
dxy = np.dot(uv, trans_matrix) * self._length * spacing_px
# Build tick endpoints
x_end = x_tick + dxy[:, 0]
y_end = y_tick + dxy[:, 1]
# Interleave ticks to form Path vertices
xyt = np.empty((2 * num, 2), dtype=x_tick.dtype)
xyt[0::2, 0] = x_tick
xyt[1::2, 0] = x_end
xyt[0::2, 1] = y_tick
xyt[1::2, 1] = y_end
# Build up vector of Path codes
codes = np.tile([Path.MOVETO, Path.LINETO], num)
# Construct and draw resulting path
h = Path(xyt, codes)
# Transform back to data space during render
renderer.draw_path(gc0, h, affine.inverted() + trans, rgbFace)
gc0.restore()
withTickedStroke = _subclass_with_normal(effect_class=TickedStroke)
| TickedStroke |
python | getsentry__sentry | tests/sentry/codecov/endpoints/test_sync_repos.py | {
"start": 220,
"end": 5780
} | class ____(APITestCase):
endpoint_name = "sentry-api-0-repositories-sync"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="user@example.com")
self.organization = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization,
external_id="1234",
name="testowner",
provider="github",
)
self.login_as(user=self.user)
def reverse_url(self):
"""Custom reverse URL method to handle required URL parameters"""
return reverse(
self.endpoint_name,
kwargs={
"organization_id_or_slug": self.organization.slug,
"owner": self.integration.id,
},
)
@patch("sentry.codecov.endpoints.sync_repos.sync_repos.CodecovApiClient")
def test_post_calls_api(self, mock_codecov_client_class) -> None:
"""Test that starts repository sync process"""
mock_graphql_response = {
"data": {
"syncRepos": {
"isSyncing": True,
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
response = self.client.post(url, data={})
mock_codecov_client_class.assert_called_once_with(git_provider_org="testowner")
mock_codecov_client_instance.query.assert_called_once_with(query=ANY, variables={})
assert response.status_code == 200
assert response.data["isSyncing"] is True
@patch("sentry.codecov.endpoints.sync_repos.sync_repos.CodecovApiClient")
def test_get_calls_api(self, mock_codecov_client_class) -> None:
"""Test that gets sync status"""
mock_graphql_response = {
"data": {
"me": {
"isSyncing": True,
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
response = self.client.get(url, data={})
mock_codecov_client_class.assert_called_once_with(git_provider_org="testowner")
mock_codecov_client_instance.query.assert_called_once_with(query=ANY, variables={})
assert response.status_code == 200
assert response.data["isSyncing"] is True
@patch("sentry.codecov.endpoints.sync_repos.serializers.sentry_sdk.capture_exception")
@patch("sentry.codecov.endpoints.sync_repos.serializers.logger.exception")
def test_serializer_exception_handling(self, mock_logger, mock_capture_exception):
malformed_response = {"wrong_key": "value"}
serializer = SyncReposSerializer(context={"http_method": "POST"})
with pytest.raises(KeyError):
serializer.to_representation(malformed_response)
mock_capture_exception.assert_called_once()
mock_logger.assert_called_once()
@patch("sentry.codecov.endpoints.sync_repos.sync_repos.CodecovApiClient")
def test_scope_map_enforcement(self, mock_codecov_client_class) -> None:
"""Test that the scope map permissions are properly enforced"""
# Mock the API response for POST request
mock_graphql_response = {
"data": {
"syncRepos": {
"isSyncing": True,
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
# Create a user with only org:read permission
user_with_read_only = self.create_user("readonly@test.com")
self.create_member(
user=user_with_read_only,
organization=self.organization,
role="member", # member role has org:read
)
# Create a user with org:write permission
user_with_write = self.create_user("write@test.com")
self.create_member(
user=user_with_write,
organization=self.organization,
role="admin", # admin role has org:write
)
# Create a user with no permissions
user_without_permissions = self.create_user("noperms@test.com")
# Don't add them to the organization
url = self.reverse_url()
# Test that user with org:read can access the endpoint
self.login_as(user_with_read_only)
response = self.client.post(url, data={})
assert response.status_code == 200
# Test that user with org:write can access the endpoint
self.login_as(user_with_write)
response = self.client.post(url, data={})
assert response.status_code == 200
# Test that user without permissions cannot access the endpoint
self.login_as(user_without_permissions)
response = self.client.post(url, data={})
assert response.status_code == 403
| SyncReposEndpointTest |
python | keras-team__keras | keras/src/optimizers/optimizer_sparse_test.py | {
"start": 3734,
"end": 11333
} | class ____(testing.TestCase):
@parameterized.named_parameters(TEST_CASES)
def test_sparse_gradients(
self,
optimizer_class,
init_kwargs={},
expect_model_sparse_variable_updates=False,
expect_optimizer_sparse_variable_updates=False,
):
# This test verifies that:
# - Optimizers use Keras ops everywhere instead of native operators
# (e.g. `ops.add()` instead of `+`) where sparse gradients are handled
# - The used ops handle sparse gradients
# - Optimizers use `self.assign/assign_add/assign_sub` instead of
# calling the method on the variable directly. Otherwise, the sparse
# updates are densified before being applied.
# - For some optimizers, a sparse gradient actually results in a sparse
# variable update as per `expect_model_sparse_variable_updates` and
# `expect_optimizer_sparse_variable_updates`
model_variable = backend.Variable(initializer="ones", shape=(5, 10))
optimizer = optimizer_class(**init_kwargs)
# Mocking "tensorflow.Variable" won't work as it gets substituted with
# the resource variable class.
if backend.backend() == "tensorflow":
import tensorflow as tf
grad = tf.IndexedSlices(0.5 * ops.ones((3, 10)), (0, 2, 4), (5, 10))
sparse_class = tf.IndexedSlices
variable_class = model_variable._value.__class__
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad = jax_sparse.BCOO(
(0.5 * ops.ones((3, 10)), ((0,), (2,), (4,))), shape=(5, 10)
)
sparse_class = jax_sparse.JAXSparse
variable_class = model_variable.__class__
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
optimizer_to_patch = (
optimizer.inner_optimizer
if isinstance(optimizer, optimizers.LossScaleOptimizer)
else optimizer
)
model_sparse_variable_updates = False
optimizer_sparse_variable_updates = False
def mock_optimizer_assign(variable, value):
nonlocal model_sparse_variable_updates
nonlocal optimizer_sparse_variable_updates
if isinstance(variable, backend.Variable):
variable = variable._value
if isinstance(value, sparse_class):
if variable is model_variable._value:
model_sparse_variable_updates = True
elif any(variable is v._value for v in optimizer.variables):
optimizer_sparse_variable_updates = True
def mock_variable_assign(variable, value):
# Make an exception for scalar variables
if len(variable.shape):
pytest.fail(
"Optimizer is calling `assign`, `assign_add` or "
"`assign_sub` directly on a variable. Use "
"`self.assign/assign_add/assign_sub(variable, value)` "
"instead to support sparse updates."
)
# patch "_apply_weight_decay" to exclude this special case.
# patch the optimizer "assign" methods to detect sparse updates.
# patch the tf.Variable "assign" methods to detect direct assign calls.
with (
mock.patch.object(
optimizer_to_patch, "_apply_weight_decay", autospec=True
),
mock.patch.object(
optimizer_to_patch, "assign", autospec=True
) as optimizer_assign,
mock.patch.object(
optimizer_to_patch, "assign_add", autospec=True
) as optimizer_assign_add,
mock.patch.object(
optimizer_to_patch, "assign_sub", autospec=True
) as optimizer_assign_sub,
mock.patch.object(
variable_class, "assign", autospec=True
) as variable_assign,
mock.patch.object(
variable_class, "assign_add", autospec=True
) as variable_assign_add,
mock.patch.object(
variable_class, "assign_sub", autospec=True
) as variable_assign_sub,
):
optimizer_assign.side_effect = mock_optimizer_assign
optimizer_assign_add.side_effect = mock_optimizer_assign
optimizer_assign_sub.side_effect = mock_optimizer_assign
variable_assign.side_effect = mock_variable_assign
variable_assign_add.side_effect = mock_variable_assign
variable_assign_sub.side_effect = mock_variable_assign
optimizer.apply([grad], [model_variable])
self.assertEqual(
model_sparse_variable_updates, expect_model_sparse_variable_updates
)
self.assertEqual(
optimizer_sparse_variable_updates,
expect_optimizer_sparse_variable_updates,
)
@parameterized.named_parameters(TEST_CASES)
def test_sparse_correctness(
self, optimizer_class, init_kwargs={}, **kwargs
):
# This test verifies that applying a sparse gradient gives the same
# numerical results as the same dense gradient.
optimizer_sparse = optimizer_class(**init_kwargs)
optimizer_dense = optimizer_class(**init_kwargs)
var_sparse = backend.Variable(initializer="ones", shape=(5, 3, 2))
var_dense = backend.Variable(initializer="ones", shape=(5, 3, 2))
stateless = backend.backend() == "jax"
if stateless:
optimizer_sparse.build([var_sparse])
optimizer_dense.build([var_dense])
optimizer_sparse_vars = optimizer_sparse.variables
optimizer_dense_vars = optimizer_dense.variables
var_sparse_values = [var_sparse.value]
var_dense_values = [var_dense.value]
for i in range(5):
if backend.backend() == "tensorflow":
import tensorflow as tf
grad_sparse = tf.IndexedSlices(
values=ops.ones((3, 3, 2)) * (10.0 - i),
indices=(0, 2, 4),
dense_shape=(5, 3, 2),
)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
grad_sparse = jax_sparse.BCOO(
(ops.ones((3, 3, 2)) * (10.0 - i), ((0,), (2,), (4,))),
shape=(5, 3, 2),
)
else:
self.fail(
f"Sparse is unsupported with backend {backend.backend()}"
)
grad_dense = ops.convert_to_tensor(grad_sparse, sparse=False)
if stateless:
(
var_sparse_values,
optimizer_sparse_vars,
) = optimizer_sparse.stateless_apply(
optimizer_sparse_vars, [grad_sparse], var_sparse_values
)
(
var_dense_values,
optimizer_dense_vars,
) = optimizer_dense.stateless_apply(
optimizer_dense_vars, [grad_dense], var_dense_values
)
self.assertAllClose(var_sparse_values[0], var_dense_values[0])
else:
optimizer_sparse.apply([grad_sparse], [var_sparse])
optimizer_dense.apply([grad_dense], [var_dense])
self.assertAllClose(var_sparse.value, var_dense.value)
| OptimizerSparseTest |
python | pytorch__pytorch | torch/nn/modules/padding.py | {
"start": 7541,
"end": 7969
} | class ____(Module):
__constants__ = ["padding", "value"]
value: float
padding: Sequence[int]
def __init__(self, value: float) -> None:
super().__init__()
self.value = value
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, "constant", self.value)
def extra_repr(self) -> str:
return f"padding={self.padding}, value={self.value}"
| _ConstantPadNd |
python | lxml__lxml | doc/s5/ep2008/atom.py | {
"start": 12688,
"end": 13211
} | class ____(_EntryElement):
"""
For ``<category>`` elements.
"""
term = _attr_element_property('term')
scheme = _attr_element_property('scheme', None)
label = _attr_element_property('label', None)
def as_string(self):
"""
Returns the string representation of the category, using the
GData convention of ``{scheme}term``
"""
if self.scheme is not None:
return '{%s}%s' % (self.scheme, self.term)
else:
return self.term
| Category |
python | tensorflow__tensorflow | tensorflow/python/training/momentum.py | {
"start": 1008,
"end": 7785
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
@compatibility(TF2)
tf.compat.v1.train.MomentumOptimizer is compatible with eager mode and
`tf.function`.
When eager execution is enabled, `learning_rate`,`momentum`, can each be a
callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
To switch to native TF2 style, please directly use
[`tf.keras.optimizers.SGD`]
(https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/SGD)
with the `momentum` argument.
#### Structural mapping to native TF2
Before:
```python
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum,
use_nesterov=use_nesterov)
```
After:
```python
optimizer = tf.keras.optimizers.SGD(
learning_rate=learning_rate,
momentum=momentum,
nesterov=use_nesterov)
```
#### How to map arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| ------------------ | ------------- | ------------------------------- |
| `learning_rate` | `learning_rate`| Be careful of setting |
: : : learning_rate tensor value computed from the global step. :
: : : In TF1 this was usually meant to imply a dynamic learning rate and :
: : : would recompute in each step. In TF2 (eager + function) it will :
: : : treat it as a scalar value that only gets computed once instead of :
: : : a symbolic placeholder to be computed each time. :
| `momentum` | `momentum` | - |
| `use_locking` | - | Not applicable in TF2. |
| `use_nesterov` | `nesterov` | - |
#### Before & after usage example
Before:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.compat.v1.train.MomentumOptimizer(
learning_rate=0.001,
momentum=0.9,
use_nesterov=False)
optimizer.apply_gradients(zip([grad], [x]))
```
After:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.keras.optimizers.SGD(
learning_rate=0.001,
momentum=0.9,
nesterov=False)
optimizer.apply_gradients(zip([grad], [x]))
```
@end_compatibility
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See (Sutskever et al., 2013).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
This implementation is an approximation of the original formula, valid
for high values of momentum. It will compute the "adjusted gradient"
in NAG by assuming that the new gradient will be estimated by the
current average gradient plus the product of momentum and the change
in the average gradient.
References:
On the importance of initialization and momentum in deep learning:
[Sutskever et al., 2013]
(http://proceedings.mlr.press/v28/sutskever13.html)
([pdf](http://proceedings.mlr.press/v28/sutskever13.pdf))
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
name="learning_rate")
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return gen_training_ops.apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return gen_training_ops.resource_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return gen_training_ops.sparse_apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values, grad.indices,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, "momentum")
return gen_training_ops.resource_sparse_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad, indices,
math_ops.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
| MomentumOptimizer |
python | fastapi__sqlmodel | docs_src/tutorial/many_to_many/tutorial003.py | {
"start": 120,
"end": 525
} | class ____(SQLModel, table=True):
team_id: Optional[int] = Field(
default=None, foreign_key="team.id", primary_key=True
)
hero_id: Optional[int] = Field(
default=None, foreign_key="hero.id", primary_key=True
)
is_training: bool = False
team: "Team" = Relationship(back_populates="hero_links")
hero: "Hero" = Relationship(back_populates="team_links")
| HeroTeamLink |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/datatree.py | {
"start": 2510,
"end": 3406
} | class ____:
"""Represents a transition where multiple choices can be made as to what
to drawn."""
constraints: ChoiceConstraintsT
choice_type: ChoiceTypeT
children: dict[ChoiceT, "TreeNode"] = field(repr=False)
@property
def max_children(self) -> int:
max_children = compute_max_children(self.choice_type, self.constraints)
assert max_children > 0
return max_children
def _repr_pretty_(self, p: "RepresentationPrinter", cycle: bool) -> None:
assert cycle is False
for i, (value, child) in enumerate(self.children.items()):
if i > 0:
p.break_()
p.text(
_node_pretty(self.choice_type, value, self.constraints, forced=False)
)
with p.indent(2):
p.break_()
p.pretty(child)
@dataclass(slots=True, frozen=True)
| Branch |
python | ray-project__ray | python/ray/data/_internal/datasource/iceberg_datasource.py | {
"start": 8660,
"end": 18522
} | class ____(Datasource):
"""
Iceberg datasource to read Iceberg tables into a Ray Dataset. This module heavily
uses PyIceberg to read iceberg tables. All the routines in this class override
`ray.data.Datasource`.
"""
def __init__(
self,
table_identifier: str,
row_filter: Union[str, "BooleanExpression"] = None,
selected_fields: Tuple[str, ...] = ("*",),
snapshot_id: Optional[int] = None,
scan_kwargs: Optional[Dict[str, Any]] = None,
catalog_kwargs: Optional[Dict[str, Any]] = None,
):
"""
Initialize an IcebergDatasource.
Args:
table_identifier: Fully qualified table identifier (i.e.,
"db_name.table_name")
row_filter: A PyIceberg BooleanExpression to use to filter the data *prior*
to reading
selected_fields: Which columns from the data to read, passed directly to
PyIceberg's load functions
snapshot_id: Optional snapshot ID for the Iceberg table
scan_kwargs: Optional arguments to pass to PyIceberg's Table.scan()
function
catalog_kwargs: Optional arguments to use when setting up the Iceberg
catalog
"""
# Initialize parent class to set up predicate pushdown mixin
super().__init__()
_check_import(self, module="pyiceberg", package="pyiceberg")
from pyiceberg.expressions import AlwaysTrue
self._scan_kwargs = scan_kwargs if scan_kwargs is not None else {}
self._catalog_kwargs = catalog_kwargs if catalog_kwargs is not None else {}
if "name" in self._catalog_kwargs:
self._catalog_name = self._catalog_kwargs.pop("name")
else:
self._catalog_name = "default"
self.table_identifier = table_identifier
self._row_filter = row_filter if row_filter is not None else AlwaysTrue()
# Convert selected_fields to projection_map (identity mapping if specified)
# Note: Empty tuple () means no columns, None/"*" means all columns
if selected_fields is None or selected_fields == ("*",):
self._projection_map = None
else:
self._projection_map = {col: col for col in selected_fields}
if snapshot_id:
self._scan_kwargs["snapshot_id"] = snapshot_id
self._plan_files = None
self._table = None
def _get_catalog(self) -> "Catalog":
from pyiceberg import catalog
return catalog.load_catalog(self._catalog_name, **self._catalog_kwargs)
@property
def table(self) -> "Table":
"""
Return the table reference from the catalog
"""
if self._table is None:
catalog = self._get_catalog()
self._table = catalog.load_table(self.table_identifier)
return self._table
@property
def plan_files(self) -> List["FileScanTask"]:
"""
Return the plan files specified by this query
"""
# Calculate and cache the plan_files if they don't already exist
if self._plan_files is None:
data_scan = self._get_data_scan()
self._plan_files = data_scan.plan_files()
return self._plan_files
def _get_combined_filter(self) -> "BooleanExpression":
"""Get the combined filter including both row_filter and pushed-down predicates."""
combined_filter = self._row_filter
if self._predicate_expr is not None:
# Convert Ray Data expression to PyIceberg expression using internal visitor
visitor = _IcebergExpressionVisitor()
iceberg_filter = visitor.visit(self._predicate_expr)
# Combine with existing row_filter using AND
from pyiceberg.expressions import AlwaysTrue, And
if not isinstance(combined_filter, AlwaysTrue):
combined_filter = And(combined_filter, iceberg_filter)
else:
combined_filter = iceberg_filter
return combined_filter
def _get_data_scan(self) -> "DataScan":
# Get the combined filter
combined_filter = self._get_combined_filter()
# Convert back to tuple for PyIceberg API (None -> ("*",))
data_columns = self._get_data_columns()
selected_fields = ("*",) if data_columns is None else tuple(data_columns)
data_scan = self.table.scan(
row_filter=combined_filter,
selected_fields=selected_fields,
**self._scan_kwargs,
)
return data_scan
def estimate_inmemory_data_size(self) -> Optional[int]:
# Approximate the size by using the plan files - this will not
# incorporate the deletes, but that's a reasonable approximation
# task
return sum(task.file.file_size_in_bytes for task in self.plan_files)
def supports_predicate_pushdown(self) -> bool:
"""Returns True to indicate this datasource supports predicate pushdown."""
return True
def supports_projection_pushdown(self) -> bool:
"""Returns True to indicate this datasource supports projection pushdown."""
return True
@staticmethod
def _distribute_tasks_into_equal_chunks(
plan_files: Iterable["FileScanTask"], n_chunks: int
) -> List[List["FileScanTask"]]:
"""
Implement a greedy knapsack algorithm to distribute the files in the scan
across tasks, based on their file size, as evenly as possible
"""
chunks = [list() for _ in range(n_chunks)]
chunk_sizes = [(0, chunk_id) for chunk_id in range(n_chunks)]
heapq.heapify(chunk_sizes)
# From largest to smallest, add the plan files to the smallest chunk one at a
# time
for plan_file in sorted(
plan_files, key=lambda f: f.file.file_size_in_bytes, reverse=True
):
smallest_chunk = heapq.heappop(chunk_sizes)
chunks[smallest_chunk[1]].append(plan_file)
heapq.heappush(
chunk_sizes,
(
smallest_chunk[0] + plan_file.file.file_size_in_bytes,
smallest_chunk[1],
),
)
return chunks
def get_read_tasks(
self, parallelism: int, per_task_row_limit: Optional[int] = None
) -> List[ReadTask]:
from pyiceberg.io import pyarrow as pyi_pa_io
from pyiceberg.manifest import DataFileContent
# Get the PyIceberg scan
data_scan = self._get_data_scan()
# Get the plan files in this query
plan_files = self.plan_files
# Get the projected schema for this scan, given all the row filters,
# snapshot ID, etc.
projected_schema = data_scan.projection()
# Get the arrow schema, to set in the metadata
pya_schema = pyi_pa_io.schema_to_pyarrow(projected_schema)
# Set the n_chunks to the min of the number of plan files and the actual
# requested n_chunks, so that there are no empty tasks
if parallelism > len(list(plan_files)):
parallelism = len(list(plan_files))
logger.warning(
f"Reducing the parallelism to {parallelism}, as that is the"
"number of files"
)
# Get required properties for reading tasks - table IO, table metadata,
# row filter, case sensitivity,limit and projected schema to pass
# them directly to `_get_read_task` to avoid capture of `self` reference
# within the closure carrying substantial overhead invoking these tasks
#
# See https://github.com/ray-project/ray/issues/49107 for more context
table_io = self.table.io
table_metadata = self.table.metadata
row_filter = self._get_combined_filter()
case_sensitive = self._scan_kwargs.get("case_sensitive", True)
limit = self._scan_kwargs.get("limit")
get_read_task = partial(
_get_read_task,
table_io=table_io,
table_metadata=table_metadata,
row_filter=row_filter,
case_sensitive=case_sensitive,
limit=limit,
schema=projected_schema,
column_rename_map=self.get_column_renames(),
)
read_tasks = []
# Chunk the plan files based on the requested parallelism
for chunk_tasks in IcebergDatasource._distribute_tasks_into_equal_chunks(
plan_files, parallelism
):
unique_deletes: Set[DataFile] = set(
itertools.chain.from_iterable(
[task.delete_files for task in chunk_tasks]
)
)
# Get a rough estimate of the number of deletes by just looking at
# position deletes. Equality deletes are harder to estimate, as they
# can delete multiple rows.
position_delete_count = sum(
delete.record_count
for delete in unique_deletes
if delete.content == DataFileContent.POSITION_DELETES
)
metadata = BlockMetadata(
num_rows=sum(task.file.record_count for task in chunk_tasks)
- position_delete_count,
size_bytes=sum(task.length for task in chunk_tasks),
input_files=[task.file.file_path for task in chunk_tasks],
exec_stats=None,
)
read_tasks.append(
ReadTask(
read_fn=lambda tasks=chunk_tasks: get_read_task(tasks),
metadata=metadata,
schema=pya_schema,
per_task_row_limit=per_task_row_limit,
)
)
return read_tasks
| IcebergDatasource |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_missing.py | {
"start": 276,
"end": 4840
} | class ____:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = list(range(10))
labels = np.random.default_rng(2).integers(0, 10, 20)
labels[::5] = -1
msg = "Constructing a Categorical with a dtype and values containing"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
cat = Categorical(labels, categories)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]), copy=True)
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
# Case where the Point is not among our categories; we want ValueError,
# not NotImplementedError GH#41914
cat = Categorical(np.array([Point(1, 0), Point(0, 1), None], dtype=object))
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
cat.fillna(Point(0, 0))
def test_fillna_array(self):
# accept Categorical or ndarray value if it holds appropriate values
cat = Categorical(["A", "B", "C", None, None])
other = cat.fillna("C")
result = cat.fillna(other)
tm.assert_categorical_equal(result, other)
assert isna(cat[-1]) # didn't modify original inplace
other = np.array(["A", "B", "C", "B", "A"])
result = cat.fillna(other)
expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
tm.assert_categorical_equal(result, expected)
assert isna(cat[-1]) # didn't modify original inplace
@pytest.mark.parametrize(
"a1, a2, categories",
[
(["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]),
([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]),
],
)
def test_compare_categorical_with_missing(self, a1, a2, categories):
# GH 28384
cat_type = CategoricalDtype(categories)
# !=
result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type)
expected = Series(a1) != Series(a2)
tm.assert_series_equal(result, expected)
# ==
result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type)
expected = Series(a1) == Series(a2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"na_value, dtype",
[
(pd.NaT, "datetime64[s]"),
(None, "float64"),
(np.nan, "float64"),
(pd.NA, "float64"),
],
)
def test_categorical_only_missing_values_no_cast(self, na_value, dtype):
# GH#44900
result = Categorical([na_value, na_value])
tm.assert_index_equal(result.categories, Index([], dtype=dtype))
| TestCategoricalMissing |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/executable_component/script_utils.py | {
"start": 520,
"end": 1814
} | class ____(OpSpec):
type: Literal["script"] = "script"
path: str
args: Optional[Union[list[str], str]] = None
@staticmethod
def with_script_stem_as_default_name(
script_runner_spec: "ScriptSpec",
) -> "ScriptSpec":
return script_runner_spec.model_copy(
update={
"name": script_runner_spec.name
if script_runner_spec.name
else Path(script_runner_spec.path).stem
}
)
def invoke_runner(
*, context: Union["AssetExecutionContext", "AssetCheckExecutionContext"], command: list[str]
) -> Sequence["PipesExecutionResult"]:
from dagster._core.pipes.subprocess import PipesSubprocessClient
return (
PipesSubprocessClient()
.run(context=context.op_execution_context, command=command)
.get_results()
)
def get_cmd(script_runner_exe: list[str], spec: ScriptSpec, path: str) -> list[str]:
abs_path = spec.path if os.path.isabs(spec.path) else os.path.join(path, spec.path)
if isinstance(spec.args, str):
return [*script_runner_exe, abs_path, *shlex.split(spec.args)]
elif isinstance(spec.args, list):
return [*script_runner_exe, abs_path, *spec.args]
else:
return [*script_runner_exe, abs_path]
| ScriptSpec |
python | django__django | tests/queries/tests.py | {
"start": 86802,
"end": 87932
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.n1 = Note.objects.create(note="n1", misc="foo", id=1)
e1 = ExtraInfo.objects.create(info="e1", note=cls.n1)
cls.a2 = Author.objects.create(name="a2", num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
item_ab = Item.objects.create(
name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
item_xy = Item.objects.create(
name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iexact="x%Y"),
[item_xy],
)
self.assertSequenceEqual(
Item.objects.filter(name__istartswith="A_b"),
[item_ab],
)
self.assertSequenceEqual(
Item.objects.filter(name__iendswith="A_b"),
[item_ab],
)
| ComparisonTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 9618,
"end": 9793
} | class ____(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
| InvalidRequestError |
python | mlflow__mlflow | mlflow/cli/genai_eval_utils.py | {
"start": 1108,
"end": 1470
} | class ____:
"""
Container for evaluation results for a single trace.
This dataclass provides structured access to trace evaluation data,
replacing dict-based access for better type safety.
"""
trace_id: str
"""The trace ID"""
assessments: list[Assessment]
"""List of Assessment objects for this trace"""
@dataclass
| EvalResult |
python | pypa__pipenv | pipenv/vendor/tomlkit/items.py | {
"start": 41594,
"end": 45932
} | class ____(AbstractTable):
"""
A table literal.
"""
def __init__(
self,
value: container.Container,
trivia: Trivia,
is_aot_element: bool,
is_super_table: bool | None = None,
name: str | None = None,
display_name: str | None = None,
) -> None:
super().__init__(value, trivia)
self.name = name
self.display_name = display_name
self._is_aot_element = is_aot_element
self._is_super_table = is_super_table
@property
def discriminant(self) -> int:
return 9
def __copy__(self) -> Table:
return type(self)(
self._value.copy(),
self._trivia.copy(),
self._is_aot_element,
self._is_super_table,
self.name,
self.display_name,
)
def append(self, key: Key | str | None, _item: Any) -> Table:
"""
Appends a (key, item) to the table.
"""
if not isinstance(_item, Item):
_item = item(_item, _parent=self)
self._value.append(key, _item)
if isinstance(key, Key):
key = next(iter(key)).key
_item = self._value[key]
if key is not None:
dict.__setitem__(self, key, _item)
m = re.match(r"(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
return self
indent = m.group(1)
if not isinstance(_item, Whitespace):
m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent)
if not m:
_item.trivia.indent = indent
else:
_item.trivia.indent = m.group(1) + indent + m.group(2)
return self
def raw_append(self, key: Key | str | None, _item: Any) -> Table:
"""Similar to :meth:`append` but does not copy indentation."""
if not isinstance(_item, Item):
_item = item(_item)
self._value.append(key, _item, validate=False)
if isinstance(key, Key):
key = next(iter(key)).key
_item = self._value[key]
if key is not None:
dict.__setitem__(self, key, _item)
return self
def is_aot_element(self) -> bool:
"""True if the table is the direct child of an AOT element."""
return self._is_aot_element
def is_super_table(self) -> bool:
"""A super table is the intermediate parent of a nested table as in [a.b.c].
If true, it won't appear in the TOML representation."""
if self._is_super_table is not None:
return self._is_super_table
if not self:
return False
# If the table has children and all children are tables, then it is a super table.
for k, child in self.items():
if not isinstance(k, Key):
k = SingleKey(k)
index = self.value._map[k]
if isinstance(index, tuple):
return False
real_key = self.value.body[index][0]
if (
not isinstance(child, (Table, AoT))
or real_key is None
or real_key.is_dotted()
):
return False
return True
def as_string(self) -> str:
return self._value.as_string()
# Helpers
def indent(self, indent: int) -> Table:
"""Indent the table with given number of spaces."""
super().indent(indent)
m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent)
if not m:
indent_str = ""
else:
indent_str = m.group(1)
for _, item in self._value.body:
if not isinstance(item, Whitespace):
item.trivia.indent = indent_str + item.trivia.indent
return self
def invalidate_display_name(self):
"""Call ``invalidate_display_name`` on the contained tables"""
self.display_name = None
for child in self.values():
if hasattr(child, "invalidate_display_name"):
child.invalidate_display_name()
def _getstate(self, protocol: int = 3) -> tuple:
return (
self._value,
self._trivia,
self._is_aot_element,
self._is_super_table,
self.name,
self.display_name,
)
| Table |
python | kamyu104__LeetCode-Solutions | Python/recover-binary-search-tree.py | {
"start": 693,
"end": 1861
} | class ____(object):
# @param root, a tree node
# @return a tree node
def recoverTree(self, root):
return self.MorrisTraversal(root)
def MorrisTraversal(self, root):
if root is None:
return
broken = [None, None]
pre, cur = None, root
while cur:
if cur.left is None:
self.detectBroken(broken, pre, cur)
pre = cur
cur = cur.right
else:
node = cur.left
while node.right and node.right != cur:
node = node.right
if node.right is None:
node.right =cur
cur = cur.left
else:
self.detectBroken(broken, pre, cur)
node.right = None
pre = cur
cur = cur.right
broken[0].val, broken[1].val = broken[1].val, broken[0].val
return root
def detectBroken(self, broken, pre, cur):
if pre and pre.val > cur.val:
if broken[0] is None:
broken[0] = pre
broken[1] = cur
| Solution |
python | django__django | tests/user_commands/utils.py | {
"start": 52,
"end": 699
} | class ____:
def __init__(self, test, shutil_which_result="nonexistent"):
self.stdout = StringIO()
self.stderr = StringIO()
self.test = test
self.shutil_which_result = shutil_which_result
def __enter__(self):
self.mocker = mock.patch(
"django.core.management.utils.shutil.which",
return_value=self.shutil_which_result,
)
self.mocker.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.mocker.stop()
self.test.assertIn("Formatters failed to launch", self.stderr.getvalue())
| AssertFormatterFailureCaughtContext |
python | doocs__leetcode | solution/1400-1499/1420.Build Array Where You Can Find The Maximum Exactly K Comparisons/Solution.py | {
"start": 0,
"end": 724
} | class ____:
def numOfArrays(self, n: int, m: int, k: int) -> int:
if k == 0:
return 0
dp = [[[0] * (m + 1) for _ in range(k + 1)] for _ in range(n + 1)]
mod = 10**9 + 7
for i in range(1, m + 1):
dp[1][1][i] = 1
for i in range(2, n + 1):
for c in range(1, min(k + 1, i + 1)):
for j in range(1, m + 1):
dp[i][c][j] = dp[i - 1][c][j] * j
for j0 in range(1, j):
dp[i][c][j] += dp[i - 1][c - 1][j0]
dp[i][c][j] %= mod
ans = 0
for i in range(1, m + 1):
ans += dp[n][k][i]
ans %= mod
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/examples/adding_an_op/zero_out_2_test.py | {
"start": 917,
"end": 1812
} | class ____(tf.test.TestCase):
def test(self):
result = zero_out_op_2.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
def test_2d(self):
result = zero_out_op_2.zero_out([[6, 5, 4], [3, 2, 1]])
self.assertAllEqual(result, [[6, 0, 0], [0, 0, 0]])
def test_grad(self):
x = tf.constant([5, 4, 3, 2, 1], dtype=tf.float32)
theoretical, numerical = tf.test.compute_gradient(zero_out_op_2.zero_out,
tuple([x]))
self.assertAllClose(theoretical, numerical)
def test_grad_2d(self):
x = tf.constant([[6, 5, 4], [3, 2, 1]], dtype=tf.float32)
theoretical, numerical = tf.test.compute_gradient(zero_out_op_2.zero_out,
tuple([x]))
self.assertAllClose(theoretical, numerical)
if __name__ == '__main__':
tf.test.main()
| ZeroOut2Test |
python | numpy__numpy | numpy/distutils/command/build_ext.py | {
"start": 833,
"end": 32979
} | class ____ (old_build_ext):
description = "build C/C++/F extensions (compile/link to build directory)"
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
('warn-error', None,
"turn all warnings into errors (-Werror)"),
('cpu-baseline=', None,
"specify a list of enabled baseline CPU optimizations"),
('cpu-dispatch=', None,
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
('simd-test=', None,
"specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
boolean_options = old_build_ext.boolean_options + ['warn-error', 'disable-optimization']
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
self.warn_error = None
self.cpu_baseline = None
self.cpu_dispatch = None
self.disable_optimization = None
self.simd_test = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError as e:
raise ValueError("--parallel/-j argument must be an integer") from e
# Ensure that self.include_dirs and self.distribution.include_dirs
# refer to the same list object. finalize_options will modify
# self.include_dirs, but self.distribution.include_dirs is used
# during the actual build.
# self.include_dirs is None unless paths are specified with
# --include-dirs.
# The include paths will be passed to the compiler in the order:
# numpy paths, --include-dirs paths, Python include path.
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
incl_dirs = self.include_dirs or []
if self.distribution.include_dirs is None:
self.distribution.include_dirs = []
self.include_dirs = self.distribution.include_dirs
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
self.set_undefined_options('build',
('parallel', 'parallel'),
('warn_error', 'warn_error'),
('cpu_baseline', 'cpu_baseline'),
('cpu_dispatch', 'cpu_dispatch'),
('disable_optimization', 'disable_optimization'),
('simd_test', 'simd_test')
)
CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
def run(self):
if not self.extensions:
return
# Make sure that extension sources are complete.
self.run_command('build_src')
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
log.warn('build_clib already run, it is too late to '
'ensure in-place build of build_clib')
build_clib = self.distribution.get_command_obj(
'build_clib')
else:
build_clib = self.distribution.get_command_obj(
'build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
self.distribution.have_run['build_clib'] = 1
else:
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
# Not including C libraries to the list of
# extension libraries automatically to prevent
# bogus linking commands. Extensions must
# explicitly specify the C libraries that they use.
from distutils.ccompiler import new_compiler
from numpy.distutils.fcompiler import new_fcompiler
compiler_type = self.compiler
# Initialize C compiler:
self.compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
if self.warn_error:
self.compiler.compiler.append('-Werror')
self.compiler.compiler_so.append('-Werror')
self.compiler.show_customization()
if not self.disable_optimization:
dispatch_hpath = os.path.join("numpy", "distutils", "include", "npy_cpu_dispatch_config.h")
dispatch_hpath = os.path.join(self.get_finalized_command("build_src").build_src, dispatch_hpath)
opt_cache_path = os.path.abspath(
os.path.join(self.build_temp, 'ccompiler_opt_cache_ext.py')
)
if hasattr(self, "compiler_opt"):
# By default `CCompilerOpt` update the cache at the exit of
# the process, which may lead to duplicate building
# (see build_extension()/force_rebuild) if run() called
# multiple times within the same os process/thread without
# giving the chance the previous instances of `CCompilerOpt`
# to update the cache.
self.compiler_opt.cache_flush()
self.compiler_opt = new_ccompiler_opt(
compiler=self.compiler, dispatch_hpath=dispatch_hpath,
cpu_baseline=self.cpu_baseline, cpu_dispatch=self.cpu_dispatch,
cache_path=opt_cache_path
)
def report(copt):
log.info("\n########### EXT COMPILER OPTIMIZATION ###########")
log.info(copt.report(full=True))
import atexit
atexit.register(report, self.compiler_opt)
# Setup directory for storing generated extra DLL files on Windows
self.extra_dll_dir = os.path.join(self.build_temp, '.libs')
if not os.path.isdir(self.extra_dll_dir):
os.makedirs(self.extra_dll_dir)
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
log.warn('library %r defined more than once,'
' overwriting build_info\n%s... \nwith\n%s...'
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
for libname, build_info in self.distribution.libraries or []:
if libname in clibs:
# build_clib libraries have a precedence before distribution ones
continue
clibs[libname] = build_info
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
# Update extension libraries, library_dirs, and macros.
all_languages = set()
for ext in self.extensions:
ext_languages = set()
c_libs = []
c_lib_dirs = []
macros = []
for libname in ext.libraries:
if libname in clibs:
binfo = clibs[libname]
c_libs += binfo.get('libraries', [])
c_lib_dirs += binfo.get('library_dirs', [])
for m in binfo.get('macros', []):
if m not in macros:
macros.append(m)
for l in clibs.get(libname, {}).get('source_languages', []):
ext_languages.add(l)
if c_libs:
new_c_libs = ext.libraries + c_libs
log.info('updating extension %r libraries from %r to %r'
% (ext.name, ext.libraries, new_c_libs))
ext.libraries = new_c_libs
ext.library_dirs = ext.library_dirs + c_lib_dirs
if macros:
log.info('extending extension %r defined_macros with %r'
% (ext.name, macros))
ext.define_macros = ext.define_macros + macros
# determine extension languages
if has_f_sources(ext.sources):
ext_languages.add('f77')
if has_cxx_sources(ext.sources):
ext_languages.add('c++')
l = ext.language or self.compiler.detect_language(ext.sources)
if l:
ext_languages.add(l)
# reset language attribute for choosing proper linker
#
# When we build extensions with multiple languages, we have to
# choose a linker. The rules here are:
# 1. if there is Fortran code, always prefer the Fortran linker,
# 2. otherwise prefer C++ over C,
# 3. Users can force a particular linker by using
# `language='c'` # or 'c++', 'f90', 'f77'
# in their config.add_extension() calls.
if 'c++' in ext_languages:
ext_language = 'c++'
else:
ext_language = 'c' # default
has_fortran = False
if 'f90' in ext_languages:
ext_language = 'f90'
has_fortran = True
elif 'f77' in ext_languages:
ext_language = 'f77'
has_fortran = True
if not ext.language or has_fortran:
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
ext.language = ext_language
# global language
all_languages.update(ext_languages)
need_f90_compiler = 'f90' in all_languages
need_f77_compiler = 'f77' in all_languages
need_cxx_compiler = 'c++' in all_languages
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
compiler.show_customization()
self._cxx_compiler = compiler.cxx_compiler()
else:
self._cxx_compiler = None
# Initialize Fortran 77 compiler:
if need_f77_compiler:
ctype = self.fcompiler
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=False,
c_compiler=self.compiler)
fcompiler = self._f77_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f77_compiler=%s is not available.' %
(ctype))
self._f77_compiler = None
else:
self._f77_compiler = None
# Initialize Fortran 90 compiler:
if need_f90_compiler:
ctype = self.fcompiler
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=True,
c_compiler=self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
(ctype))
self._f90_compiler = None
else:
self._f90_compiler = None
# Build extensions
self.build_extensions()
# Copy over any extra DLL files
# FIXME: In the case where there are more than two packages,
# we blindly assume that both packages need all of the libraries,
# resulting in a larger wheel than is required. This should be fixed,
# but it's so rare that I won't bother to handle it.
pkg_roots = {
self.get_ext_fullname(ext.name).split('.')[0]
for ext in self.extensions
}
for pkg_root in pkg_roots:
shared_lib_dir = os.path.join(pkg_root, '.libs')
if not self.inplace:
shared_lib_dir = os.path.join(self.build_lib, shared_lib_dir)
for fn in os.listdir(self.extra_dll_dir):
if not os.path.isdir(shared_lib_dir):
os.makedirs(shared_lib_dir)
if not fn.lower().endswith('.dll'):
continue
runtime_lib = os.path.join(self.extra_dll_dir, fn)
copy_file(runtime_lib, shared_lib_dir)
def swig_sources(self, sources, extensions=None):
# Do nothing. Swig sources have been handled in build_src command.
return sources
def build_extension(self, ext):
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
force_rebuild = self.force
if not self.disable_optimization and not self.compiler_opt.is_cached():
log.debug("Detected changes on compiler optimizations")
force_rebuild = True
if not (force_rebuild or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
extra_cflags = getattr(ext, 'extra_c_compile_args', None) or []
extra_cxxflags = getattr(ext, 'extra_cxx_compile_args', None) or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type == 'msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
extra_cflags += extra_cxxflags
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language == 'f90':
fcompiler = self._f90_compiler
elif ext.language == 'f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
ext, 'extra_f77_compile_args') else []
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError("extension %r has C++ sources"
"but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("extension %r has Fortran sources "
"but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries "
"but no Fortran linker found, using default linker" % (ext.name))
if ext.language == 'c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries "
"but no C++ linker found, using default linker" % (ext.name))
kws = {'depends': ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
# filtering C dispatch-table sources when optimization is not disabled,
# otherwise treated as normal sources.
copt_c_sources = []
copt_cxx_sources = []
copt_baseline_flags = []
copt_macros = []
if not self.disable_optimization:
bsrc_dir = self.get_finalized_command("build_src").build_src
dispatch_hpath = os.path.join("numpy", "distutils", "include")
dispatch_hpath = os.path.join(bsrc_dir, dispatch_hpath)
include_dirs.append(dispatch_hpath)
# copt_build_src = None if self.inplace else bsrc_dir
# Always generate the generated config files and
# dispatch-able sources inside the build directory,
# even if the build option `inplace` is enabled.
# This approach prevents conflicts with Meson-generated
# config headers. Since `spin build --clean` will not remove
# these headers, they might overwrite the generated Meson headers,
# causing compatibility issues. Maintaining separate directories
# ensures compatibility between distutils dispatch config headers
# and Meson headers, avoiding build disruptions.
# See gh-24450 for more details.
copt_build_src = bsrc_dir
for _srcs, _dst, _ext in (
((c_sources,), copt_c_sources, ('.dispatch.c',)),
((c_sources, cxx_sources), copt_cxx_sources,
('.dispatch.cpp', '.dispatch.cxx'))
):
for _src in _srcs:
_dst += [
_src.pop(_src.index(s))
for s in _src[:] if s.endswith(_ext)
]
copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
else:
copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
c_objects = []
if copt_cxx_sources:
log.info("compiling C++ dispatch-able sources")
c_objects += self.compiler_opt.try_dispatch(
copt_cxx_sources,
output_dir=output_dir,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args + extra_cxxflags,
ccompiler=cxx_compiler,
**kws
)
if copt_c_sources:
log.info("compiling C dispatch-able sources")
c_objects += self.compiler_opt.try_dispatch(
copt_c_sources,
output_dir=output_dir,
src_dir=copt_build_src,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args + extra_cflags,
**kws)
if c_sources:
log.info("compiling C sources")
c_objects += self.compiler.compile(
c_sources,
output_dir=output_dir,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_args + copt_baseline_flags +
extra_cflags),
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(
cxx_sources,
output_dir=output_dir,
macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=(extra_args + copt_baseline_flags +
extra_cxxflags),
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp, os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
unlinkable_fobjects = f_objects
objects = c_objects
else:
unlinkable_fobjects = []
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(
fcompiler, libraries, library_dirs)
if ext.runtime_library_dirs:
# gcc adds RPATH to the link. On windows, copy the dll into
# self.extra_dll_dir instead.
for d in ext.runtime_library_dirs:
for f in glob(d + '/*.dll'):
copy_file(f, self.extra_dll_dir)
ext.runtime_library_dirs = []
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
if ext.language == 'c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
if fcompiler is not None:
objects, libraries = self._process_unlinkable_fobjects(
objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects)
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
"gfortran_vs2003_hack.c")],
output_dir=self.build_temp)
self.compiler.create_static_lib(
objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
def _process_unlinkable_fobjects(self, objects, libraries,
fcompiler, library_dirs,
unlinkable_fobjects):
libraries = list(libraries)
objects = list(objects)
unlinkable_fobjects = list(unlinkable_fobjects)
# Expand possible fake static libraries to objects;
# make sure to iterate over a copy of the list as
# "fake" libraries will be removed as they are
# encountered
for lib in libraries[:]:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
if os.path.isfile(fake_lib):
# Replace fake static library
libraries.remove(lib)
with open(fake_lib) as f:
unlinkable_fobjects.extend(f.read().splitlines())
# Expand C objects
c_lib = os.path.join(libdir, lib + '.cobjects')
with open(c_lib) as f:
objects.extend(f.read().splitlines())
# Wrap unlinkable objects to a linkable one
if unlinkable_fobjects:
fobjects = [os.path.abspath(obj) for obj in unlinkable_fobjects]
wrapped = fcompiler.wrap_unlinkable_objects(
fobjects, output_dir=self.build_temp,
extra_dll_dir=self.extra_dll_dir)
objects.extend(wrapped)
return objects, libraries
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
if fcompiler is None:
return
for libname in c_libraries:
if libname.startswith('msvc'):
continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists:
continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(self.build_temp, libname + '.lib')
copy_file(libfile, libfile2)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
fileexists = True
break
if fileexists:
continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
# Always use system linker when using MSVC compiler.
f_lib_dirs = []
for dir in fcompiler.library_dirs:
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
try:
dir = subprocess.check_output(['cygpath', '-w', dir])
except (OSError, subprocess.CalledProcessError):
pass
else:
dir = filepath_from_subprocess_output(dir)
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
# make g77-compiled static libs available to MSVC
for lib in fcompiler.libraries:
if not lib.startswith('msvc'):
c_libraries.append(lib)
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
if p:
dst_name = os.path.join(self.build_temp, lib + '.lib')
if not os.path.isfile(dst_name):
copy_file(p[0], dst_name)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
if not ext.sources:
continue
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
| build_ext |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 787,
"end": 1928
} | class ____(Action):
"""Base class for Actions that operate on Deployments and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"deployment (given by `deployment_id`), or to a deployment that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `deployment_id` may not be set. If the source is 'selected', the "
"`deployment_id` must be set."
),
)
deployment_id: Optional[UUID] = Field(
None, description="The identifier of the deployment"
)
@model_validator(mode="after")
def selected_deployment_requires_id(self):
wants_selected_deployment = self.source == "selected"
has_deployment_id = bool(self.deployment_id)
if wants_selected_deployment != has_deployment_id:
raise ValueError(
"deployment_id is "
+ ("not allowed" if has_deployment_id else "required")
)
return self
| DeploymentAction |
python | ray-project__ray | rllib/algorithms/tests/test_env_runner_failures.py | {
"start": 1015,
"end": 1565
} | class ____:
"""Remote counter service that survives restarts."""
def __init__(self):
self.reset()
def _key(self, eval, worker_index, vector_index):
return f"{eval}:{worker_index}:{vector_index}"
def increment(self, eval, worker_index, vector_index):
self.counter[self._key(eval, worker_index, vector_index)] += 1
def get(self, eval, worker_index, vector_index):
return self.counter[self._key(eval, worker_index, vector_index)]
def reset(self):
self.counter = defaultdict(int)
| Counter |
python | conda__conda | conda/exceptions.py | {
"start": 40153,
"end": 40321
} | class ____(CondaError):
def __init__(self, message: str, *args, **kwargs):
msg = f"{message}"
super().__init__(msg, *args, **kwargs)
| CondaEnvException |
python | tensorflow__tensorflow | tensorflow/compiler/tests/image_ops_jit_compile_test.py | {
"start": 1192,
"end": 2314
} | class ____(xla_test.XLATestCase):
def testGradImageResize(self):
"""Tests that the gradient of image.resize is compilable."""
with ops.device("device:{}:0".format(self.device)):
img_width = 2048
var = variables.Variable(array_ops.ones(1, dtype=dtypes.float32))
def model(x):
x = var * x
x = image_ops.resize_images(
x,
size=[img_width, img_width],
method=image_ops.ResizeMethod.BILINEAR)
return x
def train(x, y):
with backprop.GradientTape() as tape:
output = model(x)
loss_value = math_ops.reduce_mean((y - output)**2)
grads = tape.gradient(loss_value, [var])
return grads
compiled_train = def_function.function(train, jit_compile=True)
x = array_ops.zeros((1, img_width // 2, img_width // 2, 1),
dtype=dtypes.float32)
y = array_ops.zeros((1, img_width, img_width, 1), dtype=dtypes.float32)
self.assertAllClose(train(x, y), compiled_train(x, y))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| ImageOpsTest |
python | PrefectHQ__prefect | tests/server/schemas/test_core.py | {
"start": 4035,
"end": 5572
} | class ____:
class OldTaskRunPolicy(PrefectBaseModel):
# Schemas ignore extras during normal execution, but raise errors during tests if not explicitly ignored.
model_config = ConfigDict(extra="ignore")
max_retries: int = 0
retry_delay_seconds: float = 0
async def test_task_run_policy_is_backwards_compatible(self):
"""
In version 2.1.1 and prior, the TaskRunPolicy schema required two properties,
`max_retries` and `retry_delay_seconds`. These properties are deprecated.
This test ensures old clients can load new FlowRunPolicySchemas. It can be removed
when the corresponding properties are removed.
"""
empty_new_policy = schemas.core.TaskRunPolicy()
# should not raise an error
self.OldTaskRunPolicy(**empty_new_policy.model_dump())
async def test_flow_run_policy_populates_new_properties_from_deprecated(self):
"""
In version 2.1.1 and prior, the TaskRunPolicy schema required two properties,
`max_retries` and `retry_delay_seconds`. These properties are deprecated.
This test ensures new servers correctly parse old TaskRunPolicySchemas. It can be removed
when the corresponding properties are removed.
"""
old_policy = self.OldTaskRunPolicy(max_retries=1, retry_delay_seconds=2)
new_policy = schemas.core.TaskRunPolicy(**old_policy.model_dump())
assert new_policy.retries == 1
assert new_policy.retry_delay == 2
| TestTaskRunPolicy |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 11149,
"end": 12704
} | class ____(TypedDict, total=False):
type: Required[Literal['function-plain']]
function: Required[SerializerFunction]
is_field_serializer: bool # default False
info_arg: bool # default False
return_schema: CoreSchema # if omitted, AnySchema is used
when_used: WhenUsed # default: 'always'
def plain_serializer_function_ser_schema(
function: SerializerFunction,
*,
is_field_serializer: bool | None = None,
info_arg: bool | None = None,
return_schema: CoreSchema | None = None,
when_used: WhenUsed = 'always',
) -> PlainSerializerFunctionSerSchema:
"""
Returns a schema for serialization with a function, can be either a "general" or "field" function.
Args:
function: The function to use for serialization
is_field_serializer: Whether the serializer is for a field, e.g. takes `model` as the first argument,
and `info` includes `field_name`
info_arg: Whether the function takes an `info` argument
return_schema: Schema to use for serializing return value
when_used: When the function should be called
"""
if when_used == 'always':
# just to avoid extra elements in schema, and to use the actual default defined in rust
when_used = None # type: ignore
return _dict_not_none(
type='function-plain',
function=function,
is_field_serializer=is_field_serializer,
info_arg=info_arg,
return_schema=return_schema,
when_used=when_used,
)
| PlainSerializerFunctionSerSchema |
python | keras-team__keras | keras/src/initializers/random_initializers.py | {
"start": 12655,
"end": 14292
} | class ____(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of
input units in the weight tensor and `fan_out` is the number of output units
in the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = GlorotNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
"""
def __init__(self, seed=None):
super().__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(
[
"keras.initializers.LecunNormal",
"keras.initializers.lecun_normal",
]
)
| GlorotNormal |
python | davidhalter__jedi | jedi/api/environment.py | {
"start": 4489,
"end": 4559
} | class ____(_SameEnvironmentMixin, Environment):
pass
| SameEnvironment |
python | joke2k__faker | faker/providers/phone_number/uz_UZ/__init__.py | {
"start": 49,
"end": 369
} | class ____(PhoneNumberProvider):
formats = (
"+998 (##) ###-##-##",
"+998 (##) ### ## ##",
"+998 (##) ### ####",
"+998 (##) ###-####",
"+998 ## ###-##-##",
"+998 ## ### ## ##",
"+998 ## ### ####",
"+998 ## ###-####",
"+998#########",
)
| Provider |
python | realpython__materials | python-protocol/animals_v1.py | {
"start": 197,
"end": 280
} | class ____(Animal):
def bark(self):
print(f"{self.name} is barking.")
| Dog |
python | openai__openai-python | src/openai/types/beta/threads/image_url_delta_block.py | {
"start": 267,
"end": 484
} | class ____(BaseModel):
index: int
"""The index of the content part in the message."""
type: Literal["image_url"]
"""Always `image_url`."""
image_url: Optional[ImageURLDelta] = None
| ImageURLDeltaBlock |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dtlink5/package.py | {
"start": 217,
"end": 453
} | class ____(Package):
"""Simple package which acts as a link dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtlink5-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
| Dtlink5 |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-jira-issue/llama_index/tools/jira_issue/base.py | {
"start": 168,
"end": 10296
} | class ____(BaseToolSpec):
"""Atlassian Jira Issue Tool Spec."""
spec_functions = [
"search_issues",
"create_issue",
"add_comment_to_issue",
"update_issue_summary",
"update_issue_assignee",
"update_issue_status",
"update_issue_due_date",
"delete_issue",
]
def __init__(
self,
email: str = os.environ.get("JIRA_ACCOUNT_EMAIL", ""),
api_key: Optional[str] = os.environ.get("JIRA_API_KEY", ""),
server_url: Optional[str] = os.environ.get("JIRA_SERVER_URL", ""),
) -> None:
if email and api_key and server_url:
self.jira = JIRA(
basic_auth=(email, api_key),
server=server_url,
)
else:
raise Exception("Please provide Jira credentials to continue.")
def search_issues(self, jql_str: str) -> Dict[str, Any]:
"""
Search for JIRA issues using JQL.
Args:
jql_str (str): JQL query string to search for issues.
Returns:
Dict[str, Any]: A dictionary containing the search results or error message.
"""
try:
issues = self.jira.search_issues(jql_str)
if issues:
return {
"error": False,
"message": "Issues found",
"issues": [
{
"key": issue.key,
"summary": issue.fields.summary,
"status": issue.fields.status.name,
"assignee": issue.fields.assignee.displayName
if issue.fields.assignee
else None,
}
for issue in issues
],
}
else:
return {
"error": True,
"message": "No issues found.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to search issues: {e!s}",
}
def create_issue(
self,
project_key: str = "KAN",
summary: str = "New Issue",
description: Optional[str] = None,
issue_type: Literal["Task", "Bug", "Epic"] = "Task",
) -> Dict[str, Any]:
"""
Create a new JIRA issue.
Args:
project_key (str): The key of the project to create the issue in (default is "KAN").
summary (str): The summary of the new issue (default is "New Issue").
description (Optional[str]): The description of the new issue.
issue_type (str): The type of the issue to create, can be "Task", "Bug", or "Epic" (default is "Task").
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
new_issue = self.jira.create_issue(
project=project_key,
summary=summary,
description=description,
issuetype={"name": issue_type},
)
return {
"error": False,
"message": f"Issue {new_issue.key} created successfully.",
"issue_key": new_issue.key,
}
except Exception as e:
return {
"error": True,
"message": f"Failed to create new issue: {e!s}",
}
def add_comment_to_issue(self, issue_key: str, comment: str) -> Dict[str, Any]:
"""
Add a comment to a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to comment on.
comment (str): The comment text to add.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
self.jira.add_comment(issue, comment)
return {"error": False, "message": f"Comment added to issue {issue_key}."}
except Exception as e:
return {
"error": True,
"message": f"Failed to add comment to issue {issue_key}: {e!s}",
}
def update_issue_summary(
self, issue_key: str, new_summary: str, notify: bool = False
) -> Dict[str, Any]:
"""
Update the summary of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
new_summary (str): The new summary text for the issue.
notify (bool): Whether to email watchers of the issue about the update.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
issue.update(summary=new_summary, notify=notify)
return {"error": False, "message": f"Issue {issue_key} summary updated."}
except Exception as e:
return {
"error": True,
"message": f"Failed to update issue {issue_key}: {e!s}",
}
def update_issue_assignee(self, issue_key, assignee_full_name):
"""
Update the assignee of the Jira issue using the assignee's full name.
Args:
issue_key (str): The key of the Jira issue to update.
assignee_full_name (str): The full name of the user to assign the issue to.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
# Search for users by display name
users = self.jira.search_users(query=assignee_full_name)
# Find exact match for the full name
target_user = None
for user in users:
if user.displayName.lower() == assignee_full_name.lower():
target_user = user
break
if not target_user:
return {
"error": True,
"message": f"User with full name '{assignee_full_name}' not found",
}
# Get the issue
issue = self.jira.issue(issue_key)
issue.update(assignee={"accountId": target_user.accountId})
return {
"error": False,
"message": f"Issue {issue_key} successfully assigned to {assignee_full_name}",
}
except Exception as e:
return {
"error": True,
"message": f"An error occurred while updating the assignee: {e!s}",
}
def update_issue_status(
self, issue_key: str, new_status: Literal["To Do", "In Progress", "Done"]
) -> Dict[str, Any]:
"""
Update the status of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
new_status (str): The new status to set for the issue.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
transitions = self.jira.transitions(issue)
transition_id = next(
(t["id"] for t in transitions if t["name"] == new_status), None
)
if transition_id:
self.jira.transition_issue(issue, transition_id)
return {
"error": False,
"message": f"Issue {issue_key} status updated to {new_status}.",
}
else:
available_statuses = [t["name"] for t in transitions]
return {
"error": True,
"message": f"Status '{new_status}' not available for issue {issue_key}. Available transitions: {available_statuses}",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to update status for issue {issue_key}: {e!s}",
}
def update_issue_due_date(
self, issue_key: str, due_date: Optional[str] = None
) -> Dict[str, Any]:
"""
Update the due date of a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to update.
due_date (Optional[str]): The new due date in 'YYYY-MM-DD' format.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
if due_date:
try:
from datetime import datetime
datetime.strptime(due_date, "%Y-%m-%d")
except ValueError:
return {
"error": True,
"message": "Invalid date format. Use YYYY-MM-DD.",
}
try:
issue = self.jira.issue(issue_key)
issue.update(duedate=due_date)
return {
"error": False,
"message": f"Issue {issue_key} due date {'updated' if due_date else 'cleared'}.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to update due date for issue {issue_key}: {e!s}",
}
def delete_issue(self, issue_key: str) -> Dict[str, Any]:
"""
Delete a JIRA issue.
Args:
issue_key (str): The key of the JIRA issue to delete.
Returns:
Dict[str, Any]: A dictionary indicating success or failure of the operation.
"""
try:
issue = self.jira.issue(issue_key)
issue.delete()
return {
"error": False,
"message": f"Issue {issue_key} deleted successfully.",
}
except Exception as e:
return {
"error": True,
"message": f"Failed to delete issue {issue_key}: {e!s}",
}
| JiraIssueToolSpec |
python | ray-project__ray | python/ray/tests/accelerators/test_rbln.py | {
"start": 972,
"end": 2749
} | class ____:
def test_get_resource_name(self):
assert RBLNAcceleratorManager.get_resource_name() == "RBLN"
def test_get_visible_accelerator_ids_env_var(self):
assert (
RBLNAcceleratorManager.get_visible_accelerator_ids_env_var()
== RBLN_RT_VISIBLE_DEVICES_ENV_VAR
)
def test_get_current_process_visible_accelerator_ids(self):
os.environ[RBLN_RT_VISIBLE_DEVICES_ENV_VAR] = "0,1,2,3"
assert RBLNAcceleratorManager.get_current_process_visible_accelerator_ids() == [
"0",
"1",
"2",
"3",
]
os.environ[RBLN_RT_VISIBLE_DEVICES_ENV_VAR] = ""
assert (
RBLNAcceleratorManager.get_current_process_visible_accelerator_ids() == []
)
os.environ.pop(RBLN_RT_VISIBLE_DEVICES_ENV_VAR)
assert (
RBLNAcceleratorManager.get_current_process_visible_accelerator_ids() is None
)
def test_get_current_node_num_accelerators(self):
assert RBLNAcceleratorManager.get_current_node_num_accelerators() == 4
def test_get_current_node_accelerator_type(self):
assert RBLNAcceleratorManager.get_current_node_accelerator_type() == "RBLN-CA02"
def test_set_current_process_visible_accelerator_ids(self):
RBLNAcceleratorManager.set_current_process_visible_accelerator_ids(["0", "1"])
assert os.environ[RBLN_RT_VISIBLE_DEVICES_ENV_VAR] == "0,1"
os.environ[NOSET_RBLN_RT_VISIBLE_DEVICES_ENV_VAR] = "1"
RBLNAcceleratorManager.set_current_process_visible_accelerator_ids(["2", "3"])
assert os.environ[RBLN_RT_VISIBLE_DEVICES_ENV_VAR] == "0,1"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestRBLNAcceleratorManager |
python | pyinstaller__pyinstaller | PyInstaller/depend/imphookapi.py | {
"start": 1130,
"end": 8031
} | class ____:
"""
Metadata communicating changes made by the current **pre-safe import module hook** (i.e., hook run immediately
_before_ a call to `ModuleGraph._safe_import_module()` recursively adding the hooked module, package,
or C extension and all transitive imports thereof to the module graph) back to PyInstaller.
Pre-safe import module hooks _must_ define a `pre_safe_import_module()` function accepting an instance of this
class, whose attributes describe the subsequent `ModuleGraph._safe_import_module()` call creating the hooked
module's graph node.
Each pre-safe import module hook is run _only_ on the first attempt to create the hooked module's graph node and
then subsequently ignored. If this hook successfully creates that graph node, the subsequent
`ModuleGraph._safe_import_module()` call will observe this fact and silently return without attempting to
recreate that graph node.
Pre-safe import module hooks are typically used to create graph nodes for **runtime modules** (i.e.,
modules dynamically defined at runtime). Most modules are physically defined in external `.py`-suffixed scripts.
Some modules, however, are dynamically defined at runtime (e.g., `six.moves`, dynamically defined by the
physically defined `six.py` module). However, `ModuleGraph` only parses `import` statements residing in external
scripts. `ModuleGraph` is _not_ a full-fledged, Turing-complete Python interpreter and hence has no means of
parsing `import` statements performed by runtime modules existing only in-memory.
'With great power comes great responsibility.'
Attributes (Immutable)
----------------------------
The following attributes are **immutable** (i.e., read-only). For safety, any attempts to change these attributes
_will_ result in a raised exception:
module_graph : PyiModuleGraph
Current module graph.
parent_package : Package
Graph node for the package providing this module _or_ `None` if this module is a top-level module.
Attributes (Mutable)
-----------------------------
The following attributes are editable.
module_basename : str
Unqualified name of the module to be imported (e.g., `text`).
module_name : str
Fully-qualified name of this module (e.g., `email.mime.text`).
"""
def __init__(self, module_graph, module_basename, module_name, parent_package):
self._module_graph = module_graph
self.module_basename = module_basename
self.module_name = module_name
self._parent_package = parent_package
# Immutable properties. No corresponding setters are defined.
@property
def module_graph(self):
"""
Current module graph.
"""
return self._module_graph
@property
def parent_package(self):
"""
Parent Package of this node.
"""
return self._parent_package
def add_runtime_module(self, module_name):
"""
Add a graph node representing a non-package Python module with the passed name dynamically defined at runtime.
Most modules are statically defined on-disk as standard Python files. Some modules, however, are dynamically
defined in-memory at runtime (e.g., `gi.repository.Gst`, dynamically defined by the statically defined
`gi.repository.__init__` module).
This method adds a graph node representing such a runtime module. Since this module is _not_ a package,
all attempts to import submodules from this module in `from`-style import statements (e.g., the `queue`
submodule in `from six.moves import queue`) will be silently ignored. To circumvent this, simply call
`add_runtime_package()` instead.
Parameters
----------
module_name : str
Fully-qualified name of this module (e.g., `gi.repository.Gst`).
Examples
----------
This method is typically called by `pre_safe_import_module()` hooks, e.g.:
def pre_safe_import_module(api):
api.add_runtime_module(api.module_name)
"""
self._module_graph.add_module(RuntimeModule(module_name))
def add_runtime_package(self, package_name):
"""
Add a graph node representing a non-namespace Python package with the passed name dynamically defined at
runtime.
Most packages are statically defined on-disk as standard subdirectories containing `__init__.py` files. Some
packages, however, are dynamically defined in-memory at runtime (e.g., `six.moves`, dynamically defined by
the statically defined `six` module).
This method adds a graph node representing such a runtime package. All attributes imported from this package
in `from`-style import statements that are submodules of this package (e.g., the `queue` submodule in `from
six.moves import queue`) will be imported rather than ignored.
Parameters
----------
package_name : str
Fully-qualified name of this package (e.g., `six.moves`).
Examples
----------
This method is typically called by `pre_safe_import_module()` hooks, e.g.:
def pre_safe_import_module(api):
api.add_runtime_package(api.module_name)
"""
self._module_graph.add_module(RuntimePackage(package_name))
def add_alias_module(self, real_module_name, alias_module_name):
"""
Alias the source module to the target module with the passed names.
This method ensures that the next call to findNode() given the target module name will resolve this alias.
This includes importing and adding a graph node for the source module if needed as well as adding a reference
from the target to the source module.
Parameters
----------
real_module_name : str
Fully-qualified name of the **existing module** (i.e., the module being aliased).
alias_module_name : str
Fully-qualified name of the **non-existent module** (i.e., the alias to be created).
"""
self._module_graph.alias_module(real_module_name, alias_module_name)
def append_package_path(self, directory):
"""
Modulegraph does a good job at simulating Python's, but it cannot handle packagepath `__path__` modifications
packages make at runtime.
Therefore there is a mechanism whereby you can register extra paths in this map for a package, and it will be
honored.
Parameters
----------
directory : str
Absolute or relative path of the directory to be appended to this package's `__path__` attribute.
"""
self._module_graph.append_package_path(self.module_name, directory)
| PreSafeImportModuleAPI |
python | django__django | tests/defer/models.py | {
"start": 418,
"end": 648
} | class ____(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.OneToOneField(
Secondary, models.CASCADE, related_name="primary_o2o"
)
| PrimaryOneToOne |
python | etianen__django-reversion | tests/test_app/tests/test_views.py | {
"start": 487,
"end": 777
} | class ____(LoginMixin, TestModelMixin, TestBase):
def testCreateRevisionUser(self):
response = self.client.post("/test-app/create-revision/")
obj = TestModel.objects.get(pk=response.content)
self.assertSingleRevision((obj,), user=self.user)
| CreateRevisionUserTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB118.py | {
"start": 2314,
"end": 2574
} | class ____:
z: Callable = lambda self, other: self == other
# Lambdas wrapped in function calls could also still be method definitions!
# To avoid false positives, we shouldn't flag any of these either:
from typing import final, override, no_type_check
| Baz |
python | pydantic__pydantic | tests/benchmarks/shared.py | {
"start": 1383,
"end": 1542
} | class ____(BaseModel):
field1: Union[str, int, float]
field2: list[dict[str, Union[int, float]]]
field3: Optional[list[Union[str, int]]]
| ComplexModel |
python | django__django | django/db/models/query.py | {
"start": 106810,
"end": 111746
} | class ____:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db, fetch_mode):
self.db = db
self.fetch_mode = fetch_mode
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
# A primary key must have all of its constituents not-NULL as
# NULL != NULL and thus NULL cannot be referenced through a foreign
# relationship. Therefore checking for a single member of the primary
# key is enough to determine if the referenced object exists or not.
self.pk_idx = self.init_list.index(self.model_cls._meta.pk_fields[0].attname)
self.related_populators = get_related_populators(
klass_info, select, self.db, fetch_mode
)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(
self.db,
self.init_list,
obj_data,
fetch_mode=self.fetch_mode,
)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db, fetch_mode):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db, fetch_mode)
iterators.append(rel_cls)
return iterators
| RelatedPopulator |
python | walkccc__LeetCode | solutions/3184. Count Pairs That Form a Complete Day I/3184.py | {
"start": 0,
"end": 219
} | class ____:
def countCompleteDayPairs(self, hours: list[int]) -> int:
ans = 0
count = [0] * 24
for hour in hours:
ans += count[(24 - hour % 24) % 24]
count[hour % 24] += 1
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/deberta/modeling_deberta.py | {
"start": 31339,
"end": 32462
} | class ____(nn.Module):
"""https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# note that the input embeddings must be passed as an argument
def forward(self, hidden_states, word_embeddings):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(
hidden_states
) # original used MaskedLayerNorm, but passed no mask. This is equivalent.
hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias
return hidden_states
| DebertaLMPredictionHead |
python | streamlit__streamlit | lib/streamlit/cursor.py | {
"start": 4542,
"end": 6062
} | class ____(Cursor):
def __init__(
self,
root_container: int,
parent_path: tuple[int, ...] = (),
index: int = 0,
**props: Any,
) -> None:
"""A locked pointer to a location in the app.
LockedCursors always point to the same location, even when you call
get_locked_cursor() on them.
Parameters
----------
root_container: int
The root container this cursor lives in.
parent_path: tuple of ints
The full path of this cursor, consisting of the IDs of all ancestors. The
0th item is the topmost ancestor.
index: int
**props: any
Anything else you want to store in this cursor. This is a temporary
measure that will go away when we implement improved return values
for elements.
"""
self._root_container = root_container
self._index = index
self._parent_path = parent_path
self._props = props
@property
def root_container(self) -> int:
return self._root_container
@property
def parent_path(self) -> tuple[int, ...]:
return self._parent_path
@property
def index(self) -> int:
return self._index
@property
def is_locked(self) -> bool:
return True
def get_locked_cursor(self, **props: Any) -> LockedCursor:
self._props = props
return self
@property
def props(self) -> Any:
return self._props
| LockedCursor |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 7832,
"end": 8527
} | class ____(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
super().setUp()
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
pass
@requires_nccl()
@skip_but_pass_in_sandcastle_if(TEST_CUDA, "GPUs are available, skipping test")
def test_init_no_gpus(self):
store = c10d.FileStore(self.file.name, self.world_size)
with self.assertRaisesRegex(
ValueError, "ProcessGroupNCCL is only supported with GPUs, no GPUs found!"
):
c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
| ProcessGroupNCCLNoGPUTest |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py | {
"start": 3679,
"end": 4700
} | class ____(FileSystemEvent):
"""
File system event representing any kind of file system movement.
"""
event_type = EVENT_TYPE_MOVED
def __init__(self, src_path, dest_path):
super(FileSystemMovedEvent, self).__init__(src_path)
self._dest_path = dest_path
@property
def dest_path(self):
"""The destination path of the move event."""
return self._dest_path
# Used for hashing this as an immutable object.
@property
def key(self):
return (self.event_type, self.src_path, self.dest_path, self.is_directory)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path,
is_directory=self.is_directory))
# File events.
| FileSystemMovedEvent |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/gcs_handler.py | {
"start": 928,
"end": 1469
} | class ____(Exception):
"""Raised when we try to download a GCS folder."""
def _handle_import_error(exc: ImportError) -> Never:
# We handle the ImportError this way for continuity/backward compatibility, but
# consider a future, albeit breaking, change that just raises a proper `ImportError`.
logger.exception(f"Error importing optional module {exc.name!r}")
raise wandb.Error(
"gs:// references require the google-cloud-storage library, run pip install wandb[gcp]"
)
@pydantic_dataclass
| _GCSIsADirectoryError |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/loop_with_variable_type_illegal_cases_test.py | {
"start": 3302,
"end": 7655
} | class ____(reference_test_base.TestCase, parameterized.TestCase):
def test_while_with_variable_py_type(self):
with self.assertRaisesRegex(
NotImplementedError,
re.compile(
r'.*condition of while loop started as non\-Tensor,'
r' then changed to Tensor.*', re.DOTALL)):
tf.function(while_with_variable_py_type)()
def test_while_with_variable_dtype(self):
with self.assertRaisesRegex(
TypeError,
"'n' has dtype int32 before the loop, but dtype float32 after"):
tf.function(while_with_variable_dtype)()
def test_while_with_variable_dtype_and_early_stopping(self):
with self.assertRaisesRegex(
TypeError,
"'n' has dtype int32 before the loop, but dtype float32 after"):
tf.function(while_with_variable_dtype_and_early_stopping)()
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset,),
(_dataset_iterator,),
(_distributed_dataset,),
)
def test_for_with_variable_dtype(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
"'n' has dtype int32 before the loop, but dtype float32 after"):
tf.function(for_with_variable_dtype)(l)
# Note: distributed datasets don't allow early stopping.
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset,),
(_dataset_iterator,),
)
def test_for_with_variable_dtype_and_early_stopping(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
"'n' has dtype int32 before the loop, but dtype float32 after"):
tf.function(for_with_variable_dtype_and_early_stopping)(l)
def test_while_with_variable_shape(self):
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(1,\) before the loop, but shape \(2,\) after"):
tf.function(while_with_variable_shape)()
# Note: datasets do allow variable shape.
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset_iterator,),
(_distributed_dataset,),
)
def test_for_with_variable_shape(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(1,\) before the loop, but shape \(2,\) after"):
tf.function(for_with_variable_shape)(l)
def test_while_with_shape_erasure(self):
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(1,\) before the loop, but shape \(None,\) after"):
tf.function(while_with_shape_erasure)()
# Note: datasets do allow variable shape.
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset_iterator,),
(_distributed_dataset,),
)
def test_for_with_shape_erasure(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(1,\) before the loop, but shape \(None,\) after"):
tf.function(for_with_shape_erasure)(l)
def test_while_with_shape_invariant_violation(self):
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(None,\) after one iteration, which does not conform"):
tf.function(while_with_shape_invariant_violation)()
# Note: dataset loops ignore shape invariants.
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset_iterator,),
(_distributed_dataset,),
)
def test_for_with_shape_invariant_violation(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
ValueError,
r"'t' has shape \(None,\) after one iteration, which does not conform"):
tf.function(for_with_shape_invariant_violation)(l)
def test_while_with_variable_structure(self):
with self.assertRaisesRegex(
TypeError,
"'s' does not have the same nested structure"):
tf.function(while_with_variable_structure)()
@parameterized.parameters(
(tf.constant,),
(_tf_range,),
(_dataset,),
(_dataset_iterator,),
(_distributed_dataset,),
)
def test_for_with_variable_structure(self, type_):
l = type_([1, 2, 3])
with self.assertRaisesRegex(
TypeError,
"'s' does not have the same nested structure"):
tf.function(for_with_variable_structure)(l)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | joblib__joblib | joblib/memory.py | {
"start": 10500,
"end": 10855
} | class ____(NotMemorizedFunc):
async def call_and_shelve(self, *args, **kwargs):
return NotMemorizedResult(await self.func(*args, **kwargs))
###############################################################################
# class `MemorizedFunc`
###############################################################################
| AsyncNotMemorizedFunc |
python | ray-project__ray | doc/source/serve/doc_code/streaming_tutorial.py | {
"start": 488,
"end": 3109
} | class ____:
def __init__(self, model_id: str):
self.loop = asyncio.get_running_loop()
self.model_id = model_id
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
# __textbot_constructor_end__
# __textbot_logic_start__
@fastapi_app.post("/")
def handle_request(self, prompt: str) -> StreamingResponse:
logger.info(f'Got prompt: "{prompt}"')
streamer = TextIteratorStreamer(
self.tokenizer, timeout=0, skip_prompt=True, skip_special_tokens=True
)
self.loop.run_in_executor(None, self.generate_text, prompt, streamer)
return StreamingResponse(
self.consume_streamer(streamer), media_type="text/plain"
)
def generate_text(self, prompt: str, streamer: TextIteratorStreamer):
input_ids = self.tokenizer([prompt], return_tensors="pt").input_ids
self.model.generate(input_ids, streamer=streamer, max_length=10000)
async def consume_streamer(self, streamer: TextIteratorStreamer):
while True:
try:
for token in streamer:
logger.info(f'Yielding token: "{token}"')
yield token
break
except Empty:
# The streamer raises an Empty exception if the next token
# hasn't been generated yet. `await` here to yield control
# back to the event loop so other coroutines can run.
await asyncio.sleep(0.001)
# __textbot_logic_end__
# __textbot_bind_start__
app = Textbot.bind("microsoft/DialoGPT-small")
# __textbot_bind_end__
serve.run(app)
chunks = []
# __stream_client_start__
import requests
prompt = "Tell me a story about dogs."
response = requests.post(f"http://localhost:8000/?prompt={prompt}", stream=True)
response.raise_for_status()
for chunk in response.iter_content(chunk_size=None, decode_unicode=True):
print(chunk, end="")
# Dogs are the best.
# __stream_client_end__
chunks.append(chunk)
# Check that streaming is happening.
assert chunks == ["Dogs ", "are ", "the ", "best."]
# __chatbot_setup_start__
import asyncio
import logging
from queue import Empty
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from ray import serve
logger = logging.getLogger("ray.serve")
# __chatbot_setup_end__
# __chatbot_constructor_start__
fastapi_app = FastAPI()
@serve.deployment
@serve.ingress(fastapi_app)
| Textbot |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 6122,
"end": 6176
} | class ____():
def ret(self, b):
return b
| Base |
python | huggingface__transformers | src/transformers/models/aimv2/configuration_aimv2.py | {
"start": 10323,
"end": 13654
} | class ____(PreTrainedConfig):
r"""
[`Aimv2Config`] is the configuration class to store the configuration of a [`Aimv2Model`]. It is used to
instantiate a AIMv2 model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the AIMv2
[apple/aimv2-large-patch14-224-lit](https://huggingface.co/apple/aimv2-large-patch14-224-lit) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Aimv2TextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`Aimv2VisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import Aimv2Config, Aimv2Model
>>> # Initializing a Aimv2Config with apple/aimv2-large-patch14-224-lit style configuration
>>> configuration = Aimv2Config()
>>> # Initializing a Aimv2Model (with random weights) from the apple/aimv2-large-patch14-224-lit style configuration
>>> model = Aimv2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a Aimv2Config from a Aimv2TextConfig and a Aimv2VisionConfig
>>> from transformers import Aimv2TextConfig, Aimv2VisionConfig
>>> # Initializing a AIMv2Text and AIMv2Vision configuration
>>> config_text = Aimv2TextConfig()
>>> config_vision = Aimv2VisionConfig()
>>> config = Aimv2Config(text_config=config_text, vision_config=config_vision)
```"""
model_type = "aimv2"
sub_configs = {"text_config": Aimv2TextConfig, "vision_config": Aimv2VisionConfig}
def __init__(
self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
):
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.max_logit_scale = 100.0
if text_config is None:
text_config = Aimv2TextConfig()
logger.info("`text_config` is `None`. Initializing the `Aimv2TextConfig` with default values.")
elif isinstance(text_config, dict):
text_config = Aimv2TextConfig(**text_config)
if vision_config is None:
vision_config = Aimv2VisionConfig()
logger.info("`vision_config` is `None`. initializing the `Aimv2VisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = Aimv2VisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
super().__init__(**kwargs)
__all__ = ["Aimv2Config", "Aimv2VisionConfig", "Aimv2TextConfig"]
| Aimv2Config |
python | psf__black | tests/data/cases/annotations.py | {
"start": 28,
"end": 263
} | class ____:
def foo(self):
if True:
content_ids: Mapping[
str, Optional[ContentId]
] = self.publisher_content_store.store_config_contents(files)
# output
# regression test for #1765
| Foo |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/dataframeeditor.py | {
"start": 65284,
"end": 67901
} | class ____(QAbstractTableModel, SpyderFontsMixin):
"""
Data Frame level class.
This class is used to represent index levels in the DataFrameEditor. When
using MultiIndex, this model creates labels for the index/header as Index i
for each section in the index/header
Based on the gtabview project (Level4ExtModel).
For more information please see:
https://github.com/wavexx/gtabview/blob/master/gtabview/viewer.py
"""
def __init__(self, model):
super().__init__()
self.model = model
self._background = QColor(SpyderPalette.COLOR_BACKGROUND_2)
def rowCount(self, index=None):
"""Get number of rows (number of levels for the header)."""
return max(1, self.model.header_shape[0])
def columnCount(self, index=None):
"""Get the number of columns (number of levels for the index)."""
return max(1, self.model.header_shape[1])
def headerData(self, section, orientation, role):
"""
Get the text to put in the header of the levels of the indexes.
By default it returns 'Index i', where i is the section in the index
"""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return Qt.AlignCenter
else:
return int(Qt.AlignRight | Qt.AlignVCenter)
if role != Qt.DisplayRole and role != Qt.ToolTipRole:
return None
if self.model.header_shape[0] <= 1 and orientation == Qt.Horizontal:
if self.model.name(1, section):
return self.model.name(1, section)
return _('Index')
elif self.model.header_shape[0] <= 1:
return None
elif self.model.header_shape[1] <= 1 and orientation == Qt.Vertical:
return None
return _('Index') + ' ' + str(section)
def data(self, index, role):
"""Get the information of the levels."""
if not index.isValid():
return None
if role == Qt.FontRole:
return self.get_font(SpyderFontType.Interface)
label = ''
if index.column() == self.model.header_shape[1] - 1:
label = str(self.model.name(0, index.row()))
elif index.row() == self.model.header_shape[0] - 1:
label = str(self.model.name(1, index.column()))
if role == Qt.DisplayRole and label:
return label
elif role == Qt.BackgroundRole:
return self._background
elif role == Qt.BackgroundRole:
return self._palette.window()
return None
| DataFrameLevelModel |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 40028,
"end": 40359
} | class ____(JsCallable[P, T], Generic[P, T]):
"""A JavaScript handle for a Python function which can be called at most
once.
After it is called, the reference to the underlying Python object is
released and attempting to call it again will raise an `Error`.
"""
def destroy(self):
pass
| JsOnceCallable |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/sparse/sparse_csr_matrix_ops.py | {
"start": 1687,
"end": 8914
} | class ____(
collections.namedtuple("DenseShapeAndType", ("shape", "dtype"))):
pass
def _get_handle_data(tensor):
return resource_variable_ops.get_eager_safe_handle_data(tensor)
def _create_handle_data_proto(shape_proto, dtype_enum):
"""Create handle data based on shape and dtype protos."""
variant_shape_and_type_data = \
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData()
variant_shape_and_type_data.is_set = True
# NOTE(ebrevdo): shape_and_type lacks append() in some versions of protobuf.
variant_shape_and_type_data.shape_and_type.extend([
cpp_shape_inference_pb2.CppShapeInferenceResult.HandleShapeAndType(
shape=shape_proto, dtype=dtype_enum)
])
return variant_shape_and_type_data
def _make_handle_data(tensor):
"""Create handle data based on tensor shape and dtype."""
return _create_handle_data_proto(tensor.shape.as_proto(),
tensor.dtype.as_datatype_enum)
def get_shape_and_type(matrix):
"""Return matrix's shape and type if available."""
handle_data = getattr(matrix, "_handle_data", None)
if handle_data is None:
return None
if len(handle_data.shape_and_type) != 1:
raise ValueError(
"shape_and_type array in _handle_data must have length one, but saw: %d"
% len(handle_data.shape_and_type))
return handle_data.shape_and_type[0]
def dense_shape_and_type(matrix):
"""Get dense shape and dtype of the tf.Tensor containing the matrix.
Args:
matrix: A `tf.Tensor` of type `tf.variant` storing a sparse matrix.
Returns:
An instance of `ShapeAndType` with properties `shape` (a `tf.TensorShape`)
and `dtype` (a `tf.DType`).
Raises:
TypeError: if `matrix` is not a tensor or its dtype is not variant.
ValueError: if `matrix` lacks static handle data containing the dense
shape and dtype.
"""
if not isinstance(matrix, tensor_lib.Tensor):
raise TypeError("matrix should be a tensor, but saw: %s" % (matrix,))
if matrix.dtype != dtypes.variant:
raise TypeError(
"expected matrix to be type tf.variant, but saw: %s" % (matrix.dtype,))
handle_data = _get_handle_data(matrix)
if not handle_data or not handle_data.is_set:
raise ValueError("matrix has missing handle data: %s" % (matrix,))
if len(handle_data.shape_and_type) != 1:
raise ValueError("len(matrix.handle_data.shape_and_type) != 1: '%s'" %
(handle_data.shape_and_type,))
return DenseShapeAndType(
tensor_shape.TensorShape(handle_data.shape_and_type[0].shape),
dtypes.DType(handle_data.shape_and_type[0].dtype))
def matmul_shape_inference(a, b, c, transpose_a, transpose_b, adjoint_a,
adjoint_b):
"""Helper function for matmul to set the result matrix's handle data."""
c_handle = getattr(c, "_handle_data", None)
a_shape_and_type = get_shape_and_type(a)
b_shape_and_type = get_shape_and_type(b)
if (c_handle is None and a_shape_and_type is not None and
b_shape_and_type is not None):
transpose_a = transpose_a or adjoint_a
transpose_b = transpose_b or adjoint_b
a_shape = a_shape_and_type.shape
b_shape = b_shape_and_type.shape
rank = len(a_shape.dim)
# Creates the output shape.
c_rows = a_shape.dim[rank - (1 if transpose_a else 2)].size
c_cols = b_shape.dim[rank - (2 if transpose_b else 1)].size
c_shape = tensor_shape.TensorShape(a_shape)
c_shape = tensor_shape.TensorShape(c_shape[:rank - 2] + [c_rows, c_cols])
c_handle = _create_handle_data_proto(c_shape.as_proto(),
a_shape_and_type.dtype)
return c_handle
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
name=None):
"""Perform a sparse matrix matmul between `a` and `b`.
Performs a contraction between `a` and `b` along the two innermost dimensions.
If both `a` and `b` are instances of `SparseMatrix`, returns a new instance
of `SparseMatrix` (same type as `a`). If one is not an instance of
`SparseMatrix`, returns a dense `Tensor`:
```
c = opA(a) . opB(b)
```
where `opA` (resp. `opB`) is the transpose or hermitian transpose depending
on the values of `transpose_a` (resp. `transpose_b`) and `adjoint_a`
(resp. `adjoint_b`).
Args:
a: `Tensor` or `SparseMatrix`, having rank `2` or `3`.
b: `Tensor` or `SparseMatrix`, having rank `2` or `3`.
transpose_a: Python `bool`.
transpose_b: Python `bool`.
adjoint_a: Python `bool`.
adjoint_b: Python `bool`.
name: Optional name to use when creating ops.
Returns:
A `SparseMatrix` if both `a` and `b` are instances of `SparseMatrix`,
otherwise a dense `Tensor`.
"""
if not isinstance(a, SparseMatrix) and not isinstance(b, SparseMatrix):
return math_ops.matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
name=name)
# pylint: disable=protected-access
a_matrix = a._matrix if isinstance(a, SparseMatrix) else a
b_matrix = b._matrix if isinstance(b, SparseMatrix) else b
with ops.name_scope(name, "SparseMatrixMatMul", [a_matrix, b_matrix]):
if isinstance(a, SparseMatrix) and isinstance(b, SparseMatrix):
if not (isinstance(a, type(b)) or isinstance(b, type(a))):
raise TypeError("SparseMatrix types don't inherit from each other: "
"%s and %s" % (type(a), type(b)))
c = sm_ops.sparse_matrix_sparse_mat_mul(
a_matrix,
b_matrix,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
type=a.dtype)
# In eager mode, shape inference functions are not called, and the output
# shape is not set. We have to infer the output shape here.
# TODO(penporn): Set this from the C++ kernel instead.
c_handle = matmul_shape_inference(a_matrix, b_matrix, c, transpose_a,
transpose_b, adjoint_a, adjoint_b)
return a._from_matrix(c, handle_data=c_handle)
elif isinstance(a, SparseMatrix):
return sm_ops.sparse_matrix_mat_mul(
a_matrix,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
else:
# opA(A) . opB(B) = t(nopB(B) . nopA(A))
if not adjoint_a and not adjoint_b:
return sm_ops.sparse_matrix_mat_mul(
b_matrix,
a,
transpose_a=not transpose_b,
transpose_b=not transpose_a,
transpose_output=True)
elif not transpose_a and not transpose_b:
return sm_ops.sparse_matrix_mat_mul(
b_matrix,
a,
adjoint_a=not adjoint_b,
adjoint_b=not adjoint_a,
transpose_output=True,
conjugate_output=True)
else:
return sm_ops.sparse_matrix_mat_mul(
b_matrix,
math_ops.conj(a),
transpose_output=True,
conjugate_output=adjoint_b)
| DenseShapeAndType |
python | getsentry__sentry | src/sentry/analytics/events/weekly_report.py | {
"start": 75,
"end": 266
} | class ____(analytics.Event):
organization_id: int
user_id: int | None = None
notification_uuid: str
user_project_count: int
analytics.register(WeeklyReportSent)
| WeeklyReportSent |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B015.py | {
"start": 222,
"end": 266
} | class ____:
1 == 1
print(1 == 1)
| TestClass |
python | networkx__networkx | networkx/readwrite/text.py | {
"start": 1310,
"end": 1450
} | class ____(UtfBaseGlyphs):
last: str = "└─╼ "
mid: str = "├─╼ "
backedge: str = "╾"
vertical_edge: str = "╽"
| UtfDirectedGlyphs |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 118301,
"end": 120590
} | class ____(TypedDict, total=False):
type: Required[Literal['dataclass-args']]
dataclass_name: Required[str]
fields: Required[list[DataclassField]]
computed_fields: list[ComputedField]
collect_init_only: bool # default: False
ref: str
metadata: dict[str, Any]
serialization: SerSchema
extra_behavior: ExtraBehavior
def dataclass_args_schema(
dataclass_name: str,
fields: list[DataclassField],
*,
computed_fields: list[ComputedField] | None = None,
collect_init_only: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
extra_behavior: ExtraBehavior | None = None,
) -> DataclassArgsSchema:
"""
Returns a schema for validating dataclass arguments, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
field_a = core_schema.dataclass_field(
name='a', schema=core_schema.str_schema(), kw_only=False
)
field_b = core_schema.dataclass_field(
name='b', schema=core_schema.bool_schema(), kw_only=False
)
schema = core_schema.dataclass_args_schema('Foobar', [field_a, field_b])
v = SchemaValidator(schema)
assert v.validate_python({'a': 'hello', 'b': True}) == ({'a': 'hello', 'b': True}, None)
```
Args:
dataclass_name: The name of the dataclass being validated
fields: The fields to use for the dataclass
computed_fields: Computed fields to use when serializing the dataclass
collect_init_only: Whether to collect init only fields into a dict to pass to `__post_init__`
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
extra_behavior: How to handle extra fields
"""
return _dict_not_none(
type='dataclass-args',
dataclass_name=dataclass_name,
fields=fields,
computed_fields=computed_fields,
collect_init_only=collect_init_only,
ref=ref,
metadata=metadata,
serialization=serialization,
extra_behavior=extra_behavior,
)
| DataclassArgsSchema |
python | kubernetes-client__python | kubernetes/client/models/v1_runtime_class_list.py | {
"start": 383,
"end": 6949
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1RuntimeClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1RuntimeClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1RuntimeClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1RuntimeClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1RuntimeClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1RuntimeClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1RuntimeClassList. # noqa: E501
items is a list of schema objects. # noqa: E501
:return: The items of this V1RuntimeClassList. # noqa: E501
:rtype: list[V1RuntimeClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1RuntimeClassList.
items is a list of schema objects. # noqa: E501
:param items: The items of this V1RuntimeClassList. # noqa: E501
:type: list[V1RuntimeClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1RuntimeClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1RuntimeClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1RuntimeClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1RuntimeClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1RuntimeClassList. # noqa: E501
:return: The metadata of this V1RuntimeClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1RuntimeClassList.
:param metadata: The metadata of this V1RuntimeClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RuntimeClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RuntimeClassList):
return True
return self.to_dict() != other.to_dict()
| V1RuntimeClassList |
python | PyCQA__pylint | tests/functional/i/init_subclass_classmethod.py | {
"start": 223,
"end": 261
} | class ____(PluginBase):
pass
| Plugin1 |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 34413,
"end": 36211
} | class ____(LstsqCases):
def test_rcond(self):
a = np.array([[0., 1., 0., 1., 2., 0.],
[0., 2., 0., 0., 1., 0.],
[1., 0., 1., 0., 0., 4.],
[0., 0., 0., 2., 3., 0.]]).T
b = np.array([1, 0, 0, 0, 0, 0])
x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
assert_(rank == 4)
x, residuals, rank, s = linalg.lstsq(a, b)
assert_(rank == 3)
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
assert_(rank == 3)
@pytest.mark.parametrize(["m", "n", "n_rhs"], [
(4, 2, 2),
(0, 4, 1),
(0, 4, 2),
(4, 0, 1),
(4, 0, 2),
(4, 2, 0),
(0, 0, 0)
])
def test_empty_a_b(self, m, n, n_rhs):
a = np.arange(m * n).reshape(m, n)
b = np.ones((m, n_rhs))
x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
if m == 0:
assert_((x == 0).all())
assert_equal(x.shape, (n, n_rhs))
assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
if m > n and n_rhs > 0:
# residuals are exactly the squared norms of b's columns
r = b - np.dot(a, x)
assert_almost_equal(residuals, (r * r).sum(axis=-2))
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
def test_incompatible_dims(self):
# use modified version of docstring example
x = np.array([0, 1, 2, 3])
y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
A = np.vstack([x, np.ones(len(x))]).T
with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
linalg.lstsq(A, y, rcond=None)
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
| TestLstsq |
python | huggingface__transformers | src/transformers/cache_utils.py | {
"start": 6016,
"end": 9539
} | class ____(DynamicLayer):
"""
A cache layer that grows dynamically as more tokens are generated, up until the sliding window size.
It stores the key and value states as tensors of shape `[batch_size, num_heads, min(seq_len, sliding_window), head_dim]`.
"""
is_sliding = True
def __init__(self, sliding_window: int):
super().__init__()
self.sliding_window = sliding_window
self.cumulative_length = 0
self._sliding_window_tensor = torch.tensor(self.sliding_window, dtype=torch.long)
def lazy_initialization(self, key_states: torch.Tensor) -> None:
super().lazy_initialization(key_states)
self._sliding_window_tensor = self._sliding_window_tensor.to(self.device)
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Update the key and value caches in-place, and return the necessary keys and value states.
Args:
key_states (`torch.Tensor`): The new key states to cache.
value_states (`torch.Tensor`): The new value states to cache.
cache_kwargs (`dict[str, Any]`, *optional*): Additional arguments for the cache.
Returns:
tuple[`torch.Tensor`, `torch.Tensor`]: The key and value states.
"""
# Lazy initialization
if not self.is_initialized:
self.lazy_initialization(key_states)
self.cumulative_length += key_states.shape[-2]
# Compute the full states
full_key_states = torch.cat([self.keys, key_states], dim=-2)
full_value_states = torch.cat([self.values, value_states], dim=-2)
# Only cache the last `self.sliding_window - 1` tokens (or all of them if lower than that)
self.keys = full_key_states[:, :, -self.sliding_window + 1 :, :]
self.values = full_value_states[:, :, -self.sliding_window + 1 :, :]
# Return the full states
return full_key_states, full_value_states
def get_mask_sizes(self, cache_position: torch.Tensor) -> tuple[int, int]:
"""Return the length and offset of the cache, used to generate the attention mask"""
query_length = cache_position.shape[0]
is_full = self.cumulative_length >= self.sliding_window
kv_offset = max(self.cumulative_length - self.sliding_window + 1, 0)
if is_full:
kv_length = self.sliding_window - 1 + query_length
else:
kv_length = self.cumulative_length + query_length
return kv_length, kv_offset
def get_seq_length(self) -> int:
"""Returns the sequence length of the cached states."""
return self.cumulative_length
def get_max_cache_shape(self) -> int:
"""Return the maximum cache shape of the cache"""
return self.sliding_window
def crop(self, max_length: int) -> None:
"""
Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
negative to remove `max_length` tokens.
"""
if self.get_seq_length() >= self.sliding_window:
raise ValueError(
"Cannot `crop` a `DynamicSlidingWindowLayer` after it has seen more tokens than its"
"sliding window (otherwise some states are lost)"
)
super().crop(max_length)
self.cumulative_length = self.keys.shape[-2]
| DynamicSlidingWindowLayer |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/variant_values_override/package.py | {
"start": 201,
"end": 368
} | class ____(VariantValues):
"""Test variant value validation with multiple definitions."""
variant("v", default="baz", values=["bar", "baz"])
| VariantValuesOverride |
python | getsentry__sentry | src/sentry/db/models/fields/slug.py | {
"start": 302,
"end": 418
} | class ____(SlugField):
default_validators = [*SlugField.default_validators, org_slug_validator]
| SentryOrgSlugField |
python | jazzband__django-model-utils | tests/test_fields/test_field_tracker.py | {
"start": 30877,
"end": 32968
} | class ____(FieldTrackerTests):
tracked_class: type[ModelTracked | TrackedAbstract] = ModelTracked
instance: ModelTracked
def test_cache_compatible(self) -> None:
cache.set('key', self.instance)
instance = cache.get('key')
instance.number = 1
instance.name = 'cached'
instance.save()
self.assertChanged()
instance.number = 2
self.assertHasChanged(number=True)
def test_pre_save_changed(self) -> None:
self.assertChanged()
self.instance.name = 'new age'
self.assertChanged()
self.instance.number = 8
self.assertChanged()
self.instance.name = ''
self.assertChanged()
self.instance.mutable = [1, 2, 3]
self.assertChanged()
def test_first_save(self) -> None:
self.assertHasChanged(name=True, number=True, mutable=True)
self.assertPrevious(name=None, number=None, mutable=None)
self.assertCurrent(name='', number=None, id=None, mutable=None)
self.assertChanged()
self.instance.name = 'retro'
self.instance.number = 4
self.instance.mutable = [1, 2, 3]
self.assertHasChanged(name=True, number=True, mutable=True)
self.assertPrevious(name=None, number=None, mutable=None)
self.assertCurrent(name='retro', number=4, id=None, mutable=[1, 2, 3])
self.assertChanged()
self.instance.save(update_fields=[])
self.assertHasChanged(name=True, number=True, mutable=True)
self.assertPrevious(name=None, number=None, mutable=None)
self.assertCurrent(name='retro', number=4, id=None, mutable=[1, 2, 3])
self.assertChanged()
with self.assertRaises(ValueError):
self.instance.save(update_fields=['number'])
def test_pre_save_has_changed(self) -> None:
self.assertHasChanged(name=True, number=True)
self.instance.name = 'new age'
self.assertHasChanged(name=True, number=True)
self.instance.number = 7
self.assertHasChanged(name=True, number=True)
| ModelTrackerTests |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/components/airflow_instance/component.py | {
"start": 2662,
"end": 3375
} | class ____(Resolvable):
by_key: ResolvedAssetKey
def resolve_mapped_asset(context: ResolutionContext, model) -> Union[AssetKey, AssetSpec]:
if isinstance(model, InAirflowAsset.model()):
return InAirflowAsset.resolve_from_model(context, model).spec
elif isinstance(model, InDagsterAssetRef.model()):
return InDagsterAssetRef.resolve_from_model(context, model).by_key
else:
raise ValueError(f"Unsupported asset type: {type(model)}")
ResolvedMappedAsset: TypeAlias = Annotated[
Union[AssetKey, AssetSpec],
Resolver(
resolve_mapped_asset,
model_field_type=Union[InAirflowAsset.model(), InDagsterAssetRef.model()],
),
]
@dataclass
| InDagsterAssetRef |
python | astropy__astropy | astropy/units/tests/test_structured.py | {
"start": 27156,
"end": 28041
} | class ____(StructuredTestBaseWithUnits):
def setup_class(self):
super().setup_class()
class PositionVelocity(u.SpecificTypeQuantity):
_equivalent_unit = self.pv_unit
self.PositionVelocity = PositionVelocity
def test_init(self):
pv = self.PositionVelocity(self.pv, self.pv_unit)
assert isinstance(pv, self.PositionVelocity)
assert type(pv["p"]) is u.Quantity
assert_array_equal(pv["p"], self.pv["p"] << self.pv_unit["p"])
pv2 = self.PositionVelocity(self.pv, "AU,AU/day")
assert_array_equal(pv2["p"], self.pv["p"] << u.AU)
def test_error_on_non_equivalent_unit(self):
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU")
with pytest.raises(u.UnitsError):
self.PositionVelocity(self.pv, "AU,yr")
| TestStructuredSpecificTypeQuantity |
python | huggingface__transformers | src/transformers/models/unispeech/modeling_unispeech.py | {
"start": 26020,
"end": 29125
} | class ____(nn.Module):
"""
Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible "
f"by `config.num_codevector_groups` {self.num_groups} for concatenation"
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
@auto_docstring
| UniSpeechGumbelVectorQuantizer |
python | gevent__gevent | src/gevent/_config.py | {
"start": 17946,
"end": 18257
} | class ____(AresSettingMixin, Setting):
document = True
name = 'ares_timeout'
default = None
environment_key = 'GEVENTARES_TIMEOUT'
desc = """\
.. deprecated:: 1.3a2
Prefer the :attr:`resolver_timeout` setting. If both are set,
the results are not defined.
"""
| AresTimeout |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 23770,
"end": 25078
} | class ____(DagsterType):
def __init__(self, inner_type: DagsterType):
key = "List." + inner_type.key
self.inner_type = inner_type
super(ListType, self).__init__(
key=key,
name=None,
kind=DagsterTypeKind.LIST,
type_check_fn=self.type_check_method,
loader=_create_list_input_schema(inner_type),
typing_type=t.List[inner_type.typing_type],
)
@property
def display_name(self):
return "[" + self.inner_type.display_name + "]"
def type_check_method(self, context, value):
value_check = _fail_if_not_of_type(value, list, "list")
if not value_check.success:
return value_check
for item in value:
item_check = self.inner_type.type_check(context, item)
if not item_check.success:
return item_check
return TypeCheck(success=True)
@property
def inner_types(self):
return [self.inner_type] + self.inner_type.inner_types # pyright: ignore[reportOperatorIssue]
@property
def type_param_keys(self):
return [self.inner_type.key]
@property
def supports_fan_in(self):
return True
def get_inner_type_for_fan_in(self):
return self.inner_type
| ListType |
python | scipy__scipy | scipy/io/matlab/_mio5_params.py | {
"start": 6625,
"end": 7448
} | class ____(np.ndarray):
"""Subclass of ndarray to signal this is a matlab object.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be instantiated directly.
"""
def __new__(cls, input_array, classname=None):
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.classname = classname
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self,obj):
# reset the attribute from passed original object
self.classname = getattr(obj, 'classname', None)
# We do not need to return anything
| MatlabObject |
python | python-openxml__python-docx | src/docx/oxml/section.py | {
"start": 768,
"end": 1455
} | class ____(BaseOxmlElement):
"""`w:hdr` and `w:ftr`, the root element for header and footer part respectively."""
add_p: Callable[[], CT_P]
p_lst: List[CT_P]
tbl_lst: List[CT_Tbl]
_insert_tbl: Callable[[CT_Tbl], CT_Tbl]
p = ZeroOrMore("w:p", successors=())
tbl = ZeroOrMore("w:tbl", successors=())
@property
def inner_content_elements(self) -> List[CT_P | CT_Tbl]:
"""Generate all `w:p` and `w:tbl` elements in this header or footer.
Elements appear in document order. Elements shaded by nesting in a `w:ins` or
other "wrapper" element will not be included.
"""
return self.xpath("./w:p | ./w:tbl")
| CT_HdrFtr |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_auth_views.py | {
"start": 273,
"end": 903
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
auth_identity = {"id": "bar@example.com", "email": "bar@example.com"}
return render_to_response(
"sentry/auth-confirm-identity.html",
context={
"existing_user": User(email="foo@example.com"),
"identity": auth_identity,
"login_form": None,
"identity_display_name": auth_identity["email"],
"identity_identifier": auth_identity["id"],
},
request=request,
)
@internal_region_silo_view
| DebugAuthConfirmIdentity |
python | tiangolo__fastapi | docs_src/path_operation_configuration/tutorial002.py | {
"start": 109,
"end": 580
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: Set[str] = set()
@app.post("/items/", response_model=Item, tags=["items"])
async def create_item(item: Item):
return item
@app.get("/items/", tags=["items"])
async def read_items():
return [{"name": "Foo", "price": 42}]
@app.get("/users/", tags=["users"])
async def read_users():
return [{"username": "johndoe"}]
| Item |
python | joke2k__faker | faker/providers/internet/ja_JP/__init__.py | {
"start": 90,
"end": 523
} | class ____(InternetProvider):
user_name_formats = (
"{{last_romanized_name}}.{{first_romanized_name}}",
"{{first_romanized_name}}.{{last_romanized_name}}",
"{{first_romanized_name}}##",
"?{{last_romanized_name}}",
)
tlds = ("com", "com", "com", "net", "org", "jp", "jp", "jp")
@slugify
def domain_word(self) -> str:
return self.generator.format("last_romanized_name")
| Provider |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-palindrome.py | {
"start": 38,
"end": 233
} | class ____(object):
def makeSmallestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
return "".join(min(s[i], s[~i]) for i in xrange(len(s)))
| Solution |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 767,
"end": 1149
} | class ____:
name: str
app_name: str = SERVE_DEFAULT_APP_NAME
def to_replica_actor_class_name(self):
return f"ServeReplica:{self.app_name}:{self.name}"
def __str__(self):
return f"Deployment(name='{self.name}', app='{self.app_name}')"
def __repr__(self):
return str(self)
@PublicAPI(stability="alpha")
@dataclass(frozen=True)
| DeploymentID |
python | numpy__numpy | numpy/distutils/tests/test_fcompiler_intel.py | {
"start": 517,
"end": 785
} | class ____:
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
| TestIntelFCompilerVersions |
python | RaRe-Technologies__gensim | gensim/test/test_fasttext.py | {
"start": 1543,
"end": 44878
} | class ____(unittest.TestCase):
def setUp(self):
self.test_model_file = datapath('lee_fasttext.bin')
self.test_model = gensim.models.fasttext.load_facebook_model(self.test_model_file)
self.test_new_model_file = datapath('lee_fasttext_new.bin')
def test_training(self):
model = FT_gensim(vector_size=12, min_count=1, hs=1, negative=0, seed=42, workers=1, bucket=BUCKET)
model.build_vocab(sentences)
self.model_sanity(model)
model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs)
sims = model.wv.most_similar('graph', topn=10)
self.assertEqual(model.wv.vectors.shape, (12, 12))
self.assertEqual(len(model.wv), 12)
self.assertEqual(model.wv.vectors_vocab.shape[1], 12)
self.assertEqual(model.wv.vectors_ngrams.shape[1], 12)
self.model_sanity(model)
# test querying for "most similar" by vector
graph_vector = model.wv.get_vector('graph', norm=True)
sims2 = model.wv.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# build vocab and train in one step; must be the same as above
model2 = FT_gensim(sentences, vector_size=12, min_count=1, hs=1, negative=0, seed=42, workers=1, bucket=BUCKET)
self.models_equal(model, model2)
# verify oov-word vector retrieval
invocab_vec = model.wv['minors'] # invocab word
self.assertEqual(len(invocab_vec), 12)
oov_vec = model.wv['minor'] # oov word
self.assertEqual(len(oov_vec), 12)
def test_fast_text_train_parameters(self):
model = FT_gensim(vector_size=12, min_count=1, hs=1, negative=0, seed=42, workers=1, bucket=BUCKET)
model.build_vocab(corpus_iterable=sentences)
self.assertRaises(TypeError, model.train, corpus_file=11111, total_examples=1, epochs=1)
self.assertRaises(TypeError, model.train, corpus_iterable=11111, total_examples=1, epochs=1)
self.assertRaises(
TypeError, model.train, corpus_iterable=sentences, corpus_file='test', total_examples=1, epochs=1)
self.assertRaises(TypeError, model.train, corpus_iterable=None, corpus_file=None, total_examples=1, epochs=1)
self.assertRaises(TypeError, model.train, corpus_file=sentences, total_examples=1, epochs=1)
def test_training_fromfile(self):
with temporary_file('gensim_fasttext.tst') as corpus_file:
utils.save_as_line_sentence(sentences, corpus_file)
model = FT_gensim(vector_size=12, min_count=1, hs=1, negative=0, seed=42, workers=1, bucket=BUCKET)
model.build_vocab(corpus_file=corpus_file)
self.model_sanity(model)
model.train(corpus_file=corpus_file, total_words=model.corpus_total_words, epochs=model.epochs)
sims = model.wv.most_similar('graph', topn=10)
self.assertEqual(model.wv.vectors.shape, (12, 12))
self.assertEqual(len(model.wv), 12)
self.assertEqual(model.wv.vectors_vocab.shape[1], 12)
self.assertEqual(model.wv.vectors_ngrams.shape[1], 12)
self.model_sanity(model)
# test querying for "most similar" by vector
graph_vector = model.wv.get_vector('graph', norm=True)
sims2 = model.wv.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
# verify oov-word vector retrieval
invocab_vec = model.wv['minors'] # invocab word
self.assertEqual(len(invocab_vec), 12)
oov_vec = model.wv['minor'] # oov word
self.assertEqual(len(oov_vec), 12)
def models_equal(self, model, model2):
self.assertEqual(len(model.wv), len(model2.wv))
self.assertEqual(model.wv.bucket, model2.wv.bucket)
self.assertTrue(np.allclose(model.wv.vectors_vocab, model2.wv.vectors_vocab))
self.assertTrue(np.allclose(model.wv.vectors_ngrams, model2.wv.vectors_ngrams))
self.assertTrue(np.allclose(model.wv.vectors, model2.wv.vectors))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
most_common_word = max(model.wv.key_to_index, key=lambda word: model.wv.get_vecattr(word, 'count'))[0]
self.assertTrue(np.allclose(model.wv[most_common_word], model2.wv[most_common_word]))
def test_persistence(self):
tmpf = get_tmpfile('gensim_fasttext.tst')
model = FT_gensim(sentences, min_count=1, bucket=BUCKET)
model.save(tmpf)
self.models_equal(model, FT_gensim.load(tmpf))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(tmpf)
loaded_wv = FastTextKeyedVectors.load(tmpf)
self.assertTrue(np.allclose(wv.vectors_ngrams, loaded_wv.vectors_ngrams))
self.assertEqual(len(wv), len(loaded_wv))
def test_persistence_fromfile(self):
with temporary_file('gensim_fasttext1.tst') as corpus_file:
utils.save_as_line_sentence(sentences, corpus_file)
tmpf = get_tmpfile('gensim_fasttext.tst')
model = FT_gensim(corpus_file=corpus_file, min_count=1, bucket=BUCKET)
model.save(tmpf)
self.models_equal(model, FT_gensim.load(tmpf))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(tmpf)
loaded_wv = FastTextKeyedVectors.load(tmpf)
self.assertTrue(np.allclose(wv.vectors_ngrams, loaded_wv.vectors_ngrams))
self.assertEqual(len(wv), len(loaded_wv))
def model_sanity(self, model):
self.model_structural_sanity(model)
# TODO: add semantic tests, where appropriate
def model_structural_sanity(self, model):
"""Check a model for basic self-consistency, necessary properties & property
correspondences, but no semantic tests."""
self.assertEqual(model.wv.vectors.shape, (len(model.wv), model.vector_size))
self.assertEqual(model.wv.vectors_vocab.shape, (len(model.wv), model.vector_size))
self.assertEqual(model.wv.vectors_ngrams.shape, (model.wv.bucket, model.vector_size))
self.assertLessEqual(len(model.wv.vectors_ngrams_lockf), len(model.wv.vectors_ngrams))
self.assertLessEqual(len(model.wv.vectors_vocab_lockf), len(model.wv.index_to_key))
self.assertTrue(np.isfinite(model.wv.vectors_ngrams).all(), "NaN in ngrams")
self.assertTrue(np.isfinite(model.wv.vectors_vocab).all(), "NaN in vectors_vocab")
if model.negative:
self.assertTrue(np.isfinite(model.syn1neg).all(), "NaN in syn1neg")
if model.hs:
self.assertTrue(np.isfinite(model.syn1).all(), "NaN in syn1neg")
def test_load_fasttext_format(self):
try:
model = gensim.models.fasttext.load_facebook_model(self.test_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_model_file, exc))
vocab_size, model_size = 1762, 10
self.assertEqual(model.wv.vectors.shape, (vocab_size, model_size))
self.assertEqual(len(model.wv), vocab_size, model_size)
self.assertEqual(model.wv.vectors_ngrams.shape, (model.wv.bucket, model_size))
expected_vec = [
-0.57144,
-0.0085561,
0.15748,
-0.67855,
-0.25459,
-0.58077,
-0.09913,
1.1447,
0.23418,
0.060007
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
actual_vec = model.wv["hundred"]
self.assertTrue(np.allclose(actual_vec, expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.21929,
-0.53778,
-0.22463,
-0.41735,
0.71737,
-1.59758,
-0.24833,
0.62028,
0.53203,
0.77568
]
actual_vec_oov = model.wv["rejection"]
self.assertTrue(np.allclose(actual_vec_oov, expected_vec_oov, atol=1e-4))
self.assertEqual(model.min_count, 5)
self.assertEqual(model.window, 5)
self.assertEqual(model.epochs, 5)
self.assertEqual(model.negative, 5)
self.assertEqual(model.sample, 0.0001)
self.assertEqual(model.wv.bucket, 1000)
self.assertEqual(model.wv.max_n, 6)
self.assertEqual(model.wv.min_n, 3)
self.assertEqual(model.wv.vectors.shape, (len(model.wv), model.vector_size))
self.assertEqual(model.wv.vectors_ngrams.shape, (model.wv.bucket, model.vector_size))
def test_load_fasttext_new_format(self):
try:
new_model = gensim.models.fasttext.load_facebook_model(self.test_new_model_file)
except Exception as exc:
self.fail('Unable to load FastText model from file %s: %s' % (self.test_new_model_file, exc))
vocab_size, model_size = 1763, 10
self.assertEqual(new_model.wv.vectors.shape, (vocab_size, model_size))
self.assertEqual(len(new_model.wv), vocab_size, model_size)
self.assertEqual(new_model.wv.vectors_ngrams.shape, (new_model.wv.bucket, model_size))
expected_vec = [
-0.025627,
-0.11448,
0.18116,
-0.96779,
0.2532,
-0.93224,
0.3929,
0.12679,
-0.19685,
-0.13179
] # obtained using ./fasttext print-word-vectors lee_fasttext_new.bin
actual_vec = new_model.wv["hundred"]
self.assertTrue(np.allclose(actual_vec, expected_vec, atol=1e-4))
# vector for oov words are slightly different from original FastText due to discarding unused ngrams
# obtained using a modified version of ./fasttext print-word-vectors lee_fasttext_new.bin
expected_vec_oov = [
-0.49111,
-0.13122,
-0.02109,
-0.88769,
-0.20105,
-0.91732,
0.47243,
0.19708,
-0.17856,
0.19815
]
actual_vec_oov = new_model.wv["rejection"]
self.assertTrue(np.allclose(actual_vec_oov, expected_vec_oov, atol=1e-4))
self.assertEqual(new_model.min_count, 5)
self.assertEqual(new_model.window, 5)
self.assertEqual(new_model.epochs, 5)
self.assertEqual(new_model.negative, 5)
self.assertEqual(new_model.sample, 0.0001)
self.assertEqual(new_model.wv.bucket, 1000)
self.assertEqual(new_model.wv.max_n, 6)
self.assertEqual(new_model.wv.min_n, 3)
self.assertEqual(new_model.wv.vectors.shape, (len(new_model.wv), new_model.vector_size))
self.assertEqual(new_model.wv.vectors_ngrams.shape, (new_model.wv.bucket, new_model.vector_size))
def test_load_model_supervised(self):
with self.assertRaises(NotImplementedError):
gensim.models.fasttext.load_facebook_model(datapath('pang_lee_polarity_fasttext.bin'))
def test_load_model_with_non_ascii_vocab(self):
model = gensim.models.fasttext.load_facebook_model(datapath('non_ascii_fasttext.bin'))
self.assertTrue(u'který' in model.wv)
try:
model.wv[u'který']
except UnicodeDecodeError:
self.fail('Unable to access vector for utf8 encoded non-ascii word')
def test_load_model_non_utf8_encoding(self):
model = gensim.models.fasttext.load_facebook_model(datapath('cp852_fasttext.bin'), encoding='cp852')
self.assertTrue(u'který' in model.wv)
try:
model.wv[u'který']
except KeyError:
self.fail('Unable to access vector for cp-852 word')
def test_oov_similarity(self):
word = 'someoovword'
most_similar = self.test_model.wv.most_similar(word)
top_neighbor, top_similarity = most_similar[0]
v1 = self.test_model.wv[word]
v2 = self.test_model.wv[top_neighbor]
top_similarity_direct = self.test_model.wv.cosine_similarities(v1, v2.reshape(1, -1))[0]
self.assertAlmostEqual(top_similarity, top_similarity_direct, places=6)
def test_n_similarity(self):
# In vocab, sanity check
self.assertTrue(np.allclose(self.test_model.wv.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(
self.test_model.wv.n_similarity(['the'], ['and']), self.test_model.wv.n_similarity(['and'], ['the']))
# Out of vocab check
self.assertTrue(np.allclose(self.test_model.wv.n_similarity(['night', 'nights'], ['nights', 'night']), 1.0))
self.assertEqual(
self.test_model.wv.n_similarity(['night'], ['nights']),
self.test_model.wv.n_similarity(['nights'], ['night'])
)
def test_similarity(self):
# In vocab, sanity check
self.assertTrue(np.allclose(self.test_model.wv.similarity('the', 'the'), 1.0))
self.assertEqual(self.test_model.wv.similarity('the', 'and'), self.test_model.wv.similarity('and', 'the'))
# Out of vocab check
self.assertTrue(np.allclose(self.test_model.wv.similarity('nights', 'nights'), 1.0))
self.assertEqual(
self.test_model.wv.similarity('night', 'nights'), self.test_model.wv.similarity('nights', 'night'))
def test_most_similar(self):
# In vocab, sanity check
self.assertEqual(len(self.test_model.wv.most_similar(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(self.test_model.wv.most_similar('the'), self.test_model.wv.most_similar(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.wv.most_similar(['night', 'nights'], topn=5)), 5)
self.assertEqual(
self.test_model.wv.most_similar('nights'), self.test_model.wv.most_similar(positive=['nights']))
def test_most_similar_cosmul(self):
# In vocab, sanity check
self.assertEqual(len(self.test_model.wv.most_similar_cosmul(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(
self.test_model.wv.most_similar_cosmul('the'),
self.test_model.wv.most_similar_cosmul(positive=['the']))
# Out of vocab check
self.assertEqual(len(self.test_model.wv.most_similar_cosmul(['night', 'nights'], topn=5)), 5)
self.assertEqual(
self.test_model.wv.most_similar_cosmul('nights'),
self.test_model.wv.most_similar_cosmul(positive=['nights']))
self.assertEqual(
self.test_model.wv.most_similar_cosmul('the', 'and'),
self.test_model.wv.most_similar_cosmul(positive=['the'], negative=['and']))
def test_lookup(self):
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.key_to_index)
self.assertTrue(np.allclose(self.test_model.wv['night'], self.test_model.wv[['night']]))
# Out of vocab check
self.assertFalse('nights' in self.test_model.wv.key_to_index)
self.assertTrue(np.allclose(self.test_model.wv['nights'], self.test_model.wv[['nights']]))
def test_contains(self):
# In vocab, sanity check
self.assertTrue('night' in self.test_model.wv.key_to_index)
self.assertTrue('night' in self.test_model.wv)
# Out of vocab check
self.assertFalse(self.test_model.wv.has_index_for('nights'))
self.assertFalse('nights' in self.test_model.wv.key_to_index)
self.assertTrue('nights' in self.test_model.wv)
@unittest.skipIf(POT_EXT is False, "POT not installed")
def test_wm_distance(self):
doc = ['night', 'payment']
oov_doc = ['nights', 'forests', 'payments']
dist = self.test_model.wv.wmdistance(doc, oov_doc)
self.assertNotEqual(float('inf'), dist)
def test_cbow_neg_training(self):
model_gensim = FT_gensim(
vector_size=48, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=5,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'fight',
u'month',
u'hearings',
u'Washington',
u'remains',
u'overnight',
u'running']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
self.assertGreaterEqual(
overlap_count, 2,
"only %i overlap in expected %s & actual %s" % (overlap_count, expected_sims_words, sims_gensim_words))
def test_cbow_neg_training_fromfile(self):
with temporary_file('gensim_fasttext.tst') as corpus_file:
model_gensim = FT_gensim(
vector_size=48, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=5,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET)
lee_data = LineSentence(datapath('lee_background.cor'))
utils.save_as_line_sentence(lee_data, corpus_file)
model_gensim.build_vocab(corpus_file=corpus_file)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(corpus_file=corpus_file,
total_words=model_gensim.corpus_total_words,
epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'fight',
u'month',
u'hearings',
u'Washington',
u'remains',
u'overnight',
u'running']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
self.assertGreaterEqual(
overlap_count, 2,
"only %i overlap in expected %s & actual %s" % (overlap_count, expected_sims_words, sims_gensim_words))
def test_sg_neg_training(self):
model_gensim = FT_gensim(
vector_size=48, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=0, negative=5,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET * 4)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'overnight',
u'overnight.',
u'month',
u'land',
u'firm',
u'singles',
u'death']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
self.assertGreaterEqual(
overlap_count, 2,
"only %i overlap in expected %s & actual %s" % (overlap_count, expected_sims_words, sims_gensim_words))
def test_sg_neg_training_fromfile(self):
with temporary_file('gensim_fasttext.tst') as corpus_file:
model_gensim = FT_gensim(
vector_size=48, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=0, negative=5,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET * 4)
lee_data = LineSentence(datapath('lee_background.cor'))
utils.save_as_line_sentence(lee_data, corpus_file)
model_gensim.build_vocab(corpus_file=corpus_file)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(corpus_file=corpus_file,
total_words=model_gensim.corpus_total_words,
epochs=model_gensim.epochs)
self.assertFalse((orig0 == model_gensim.wv.vectors[0]).all()) # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night.',
u'night,',
u'eight',
u'overnight',
u'overnight.',
u'month',
u'land',
u'firm',
u'singles',
u'death']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
self.assertGreaterEqual(
overlap_count, 2,
"only %i overlap in expected %s & actual %s" % (overlap_count, expected_sims_words, sims_gensim_words))
def test_online_learning(self):
model_hs = FT_gensim(sentences, vector_size=12, min_count=1, seed=42, hs=1, negative=0, bucket=BUCKET)
self.assertEqual(len(model_hs.wv), 12)
self.assertEqual(model_hs.wv.get_vecattr('graph', 'count'), 3)
model_hs.build_vocab(new_sentences, update=True) # update vocab
self.assertEqual(len(model_hs.wv), 14)
self.assertEqual(model_hs.wv.get_vecattr('graph', 'count'), 4)
self.assertEqual(model_hs.wv.get_vecattr('artificial', 'count'), 4)
def test_online_learning_fromfile(self):
with temporary_file('gensim_fasttext1.tst') as corpus_file, \
temporary_file('gensim_fasttext2.tst') as new_corpus_file:
utils.save_as_line_sentence(sentences, corpus_file)
utils.save_as_line_sentence(new_sentences, new_corpus_file)
model_hs = FT_gensim(
corpus_file=corpus_file, vector_size=12, min_count=1, seed=42, hs=1, negative=0, bucket=BUCKET)
self.assertTrue(len(model_hs.wv), 12)
self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 3)
model_hs.build_vocab(corpus_file=new_corpus_file, update=True) # update vocab
self.assertEqual(len(model_hs.wv), 14)
self.assertTrue(model_hs.wv.get_vecattr('graph', 'count'), 4)
self.assertTrue(model_hs.wv.get_vecattr('artificial', 'count'), 4)
def test_online_learning_after_save(self):
tmpf = get_tmpfile('gensim_fasttext.tst')
model_neg = FT_gensim(sentences, vector_size=12, min_count=0, seed=42, hs=0, negative=5, bucket=BUCKET)
model_neg.save(tmpf)
model_neg = FT_gensim.load(tmpf)
self.assertTrue(len(model_neg.wv), 12)
model_neg.build_vocab(new_sentences, update=True) # update vocab
model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.epochs)
self.assertEqual(len(model_neg.wv), 14)
def test_online_learning_through_ft_format_saves(self):
tmpf = get_tmpfile('gensim_ft_format.tst')
model = FT_gensim(sentences, vector_size=12, min_count=0, seed=42, hs=0, negative=5, bucket=BUCKET)
gensim.models.fasttext.save_facebook_model(model, tmpf)
model_reload = gensim.models.fasttext.load_facebook_model(tmpf)
self.assertTrue(len(model_reload.wv), 12)
self.assertEqual(len(model_reload.wv), len(model_reload.wv.vectors))
self.assertEqual(len(model_reload.wv), len(model_reload.wv.vectors_vocab))
model_reload.build_vocab(new_sentences, update=True) # update vocab
model_reload.train(new_sentences, total_examples=model_reload.corpus_count, epochs=model_reload.epochs)
self.assertEqual(len(model_reload.wv), 14)
self.assertEqual(len(model_reload.wv), len(model_reload.wv.vectors))
self.assertEqual(len(model_reload.wv), len(model_reload.wv.vectors_vocab))
tmpf2 = get_tmpfile('gensim_ft_format2.tst')
gensim.models.fasttext.save_facebook_model(model_reload, tmpf2)
def test_online_learning_after_save_fromfile(self):
with temporary_file('gensim_fasttext1.tst') as corpus_file, \
temporary_file('gensim_fasttext2.tst') as new_corpus_file:
utils.save_as_line_sentence(sentences, corpus_file)
utils.save_as_line_sentence(new_sentences, new_corpus_file)
tmpf = get_tmpfile('gensim_fasttext.tst')
model_neg = FT_gensim(
corpus_file=corpus_file, vector_size=12, min_count=0, seed=42, hs=0, negative=5, bucket=BUCKET)
model_neg.save(tmpf)
model_neg = FT_gensim.load(tmpf)
self.assertTrue(len(model_neg.wv), 12)
model_neg.build_vocab(corpus_file=new_corpus_file, update=True) # update vocab
model_neg.train(corpus_file=new_corpus_file, total_words=model_neg.corpus_total_words,
epochs=model_neg.epochs)
self.assertEqual(len(model_neg.wv), 14)
def online_sanity(self, model):
terro, others = [], []
for line in list_corpus:
if 'terrorism' in line:
terro.append(line)
else:
others.append(line)
self.assertTrue(all('terrorism' not in line for line in others))
model.build_vocab(others)
start_vecs = model.wv.vectors_vocab.copy()
model.train(others, total_examples=model.corpus_count, epochs=model.epochs)
# checks that `vectors_vocab` has been changed by training
self.assertFalse(np.all(np.equal(start_vecs, model.wv.vectors_vocab)))
# checks that `vectors` is different from `vectors_vocab`
self.assertFalse(np.all(np.equal(model.wv.vectors, model.wv.vectors_vocab)))
self.assertFalse('terrorism' in model.wv.key_to_index)
model.build_vocab(terro, update=True) # update vocab
self.assertTrue(model.wv.vectors_ngrams.dtype == 'float32')
self.assertTrue('terrorism' in model.wv.key_to_index)
orig0_all = np.copy(model.wv.vectors_ngrams)
model.train(terro, total_examples=len(terro), epochs=model.epochs)
self.assertFalse(np.allclose(model.wv.vectors_ngrams, orig0_all))
sim = model.wv.n_similarity(['war'], ['terrorism'])
assert abs(sim) > 0.6
def test_sg_hs_online(self):
model = FT_gensim(sg=1, window=2, hs=1, negative=0, min_count=3, epochs=1, seed=42, workers=1, bucket=BUCKET)
self.online_sanity(model)
def test_sg_neg_online(self):
model = FT_gensim(sg=1, window=2, hs=0, negative=5, min_count=3, epochs=1, seed=42, workers=1, bucket=BUCKET)
self.online_sanity(model)
def test_cbow_hs_online(self):
model = FT_gensim(
sg=0, cbow_mean=1, alpha=0.05, window=2, hs=1, negative=0, min_count=3, epochs=1, seed=42, workers=1,
bucket=BUCKET,
)
self.online_sanity(model)
def test_cbow_neg_online(self):
model = FT_gensim(
sg=0, cbow_mean=1, alpha=0.05, window=2, hs=0, negative=5,
min_count=5, epochs=1, seed=42, workers=1, sample=0, bucket=BUCKET
)
self.online_sanity(model)
def test_get_vocab_word_vecs(self):
model = FT_gensim(vector_size=12, min_count=1, seed=42, bucket=BUCKET)
model.build_vocab(sentences)
original_syn0_vocab = np.copy(model.wv.vectors_vocab)
model.wv.adjust_vectors()
self.assertTrue(np.all(np.equal(model.wv.vectors_vocab, original_syn0_vocab)))
def test_persistence_word2vec_format(self):
"""Test storing/loading the model in word2vec format."""
tmpf = get_tmpfile('gensim_fasttext_w2v_format.tst')
model = FT_gensim(sentences, min_count=1, vector_size=12, bucket=BUCKET)
model.wv.save_word2vec_format(tmpf, binary=True)
loaded_model_kv = KeyedVectors.load_word2vec_format(tmpf, binary=True)
self.assertEqual(len(model.wv), len(loaded_model_kv))
self.assertTrue(np.allclose(model.wv['human'], loaded_model_kv['human']))
def test_bucket_ngrams(self):
model = FT_gensim(vector_size=12, min_count=1, bucket=20)
model.build_vocab(sentences)
self.assertEqual(model.wv.vectors_ngrams.shape, (20, 12))
model.build_vocab(new_sentences, update=True)
self.assertEqual(model.wv.vectors_ngrams.shape, (20, 12))
def test_estimate_memory(self):
model = FT_gensim(sg=1, hs=1, vector_size=12, negative=5, min_count=3, bucket=BUCKET)
model.build_vocab(sentences)
report = model.estimate_memory()
self.assertEqual(report['vocab'], 2800)
self.assertEqual(report['syn0_vocab'], 192)
self.assertEqual(report['syn1'], 192)
self.assertEqual(report['syn1neg'], 192)
# TODO: these fixed numbers for particular implementation generations encumber changes without real QA
# perhaps instead verify reports' total is within some close factor of a deep-audit of actual memory used?
self.assertEqual(report['syn0_ngrams'], model.vector_size * np.dtype(np.float32).itemsize * BUCKET)
self.assertEqual(report['buckets_word'], 688)
self.assertEqual(report['total'], 484064)
def obsolete_testLoadOldModel(self):
"""Test loading fasttext models from previous version"""
model_file = 'fasttext_old'
model = FT_gensim.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv) == 12)
self.assertTrue(len(model.wv.index_to_key) == 12)
self.assertIsNone(model.corpus_total_words)
self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size))
self.assertTrue(model.wv.vectors_lockf.shape == (12, ))
self.assertTrue(model.cum_table.shape == (12, ))
self.assertEqual(model.wv.vectors_vocab.shape, (12, 100))
self.assertEqual(model.wv.vectors_ngrams.shape, (2000000, 100))
# Model stored in multiple files
model_file = 'fasttext_old_sep'
model = FT_gensim.load(datapath(model_file))
self.assertTrue(model.wv.vectors.shape == (12, 100))
self.assertTrue(len(model.wv) == 12)
self.assertTrue(len(model.wv.index_to_key) == 12)
self.assertIsNone(model.corpus_total_words)
self.assertTrue(model.syn1neg.shape == (len(model.wv), model.vector_size))
self.assertTrue(model.wv.vectors_lockf.shape == (12, ))
self.assertTrue(model.cum_table.shape == (12, ))
self.assertEqual(model.wv.vectors_vocab.shape, (12, 100))
self.assertEqual(model.wv.vectors_ngrams.shape, (2000000, 100))
def test_vectors_for_all_with_inference(self):
"""Test vectors_for_all can infer new vectors."""
words = [
'responding',
'approached',
'chairman',
'an out-of-vocabulary word',
'another out-of-vocabulary word',
]
vectors_for_all = self.test_model.wv.vectors_for_all(words)
expected = 5
predicted = len(vectors_for_all)
assert expected == predicted
expected = self.test_model.wv['responding']
predicted = vectors_for_all['responding']
assert np.allclose(expected, predicted)
smaller_distance = np.linalg.norm(
vectors_for_all['an out-of-vocabulary word']
- vectors_for_all['another out-of-vocabulary word']
)
greater_distance = np.linalg.norm(
vectors_for_all['an out-of-vocabulary word']
- vectors_for_all['responding']
)
assert greater_distance > smaller_distance
def test_vectors_for_all_without_inference(self):
"""Test vectors_for_all does not infer new vectors when prohibited."""
words = [
'responding',
'approached',
'chairman',
'an out-of-vocabulary word',
'another out-of-vocabulary word',
]
vectors_for_all = self.test_model.wv.vectors_for_all(words, allow_inference=False)
expected = 3
predicted = len(vectors_for_all)
assert expected == predicted
expected = self.test_model.wv['responding']
predicted = vectors_for_all['responding']
assert np.allclose(expected, predicted)
def test_negative_ns_exp(self):
"""The model should accept a negative ns_exponent as a valid value."""
model = FT_gensim(sentences, ns_exponent=-1, min_count=1, workers=1)
tmpf = get_tmpfile('fasttext_negative_exp.tst')
model.save(tmpf)
loaded_model = FT_gensim.load(tmpf)
loaded_model.train(sentences, total_examples=model.corpus_count, epochs=1)
assert loaded_model.ns_exponent == -1, loaded_model.ns_exponent
@pytest.mark.parametrize('shrink_windows', [True, False])
def test_cbow_hs_training(shrink_windows):
model_gensim = FT_gensim(
vector_size=48, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET, shrink_windows=shrink_windows)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
assert not (orig0 == model_gensim.wv.vectors[0]).all() # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'rights',
u'kilometres',
u'in',
u'eight',
u'according',
u'flights',
u'during',
u'comes']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
message = f"only {overlap_count} overlap in expected {expected_sims_words} & actual {sims_gensim_words}"
assert overlap_count >= 2, message
@pytest.mark.parametrize('shrink_windows', [True, False])
def test_cbow_hs_training_fromfile(shrink_windows):
with temporary_file('gensim_fasttext.tst') as corpus_file:
model_gensim = FT_gensim(
vector_size=48, sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET * 4, shrink_windows=shrink_windows)
lee_data = LineSentence(datapath('lee_background.cor'))
utils.save_as_line_sentence(lee_data, corpus_file)
model_gensim.build_vocab(corpus_file=corpus_file)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(corpus_file=corpus_file,
total_words=model_gensim.corpus_total_words,
epochs=model_gensim.epochs)
assert not (orig0 == model_gensim.wv.vectors[0]).all() # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'rights',
u'kilometres',
u'in',
u'eight',
u'according',
u'flights',
u'during',
u'comes']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
message = f"only {overlap_count} overlap in expected {expected_sims_words} & actual {sims_gensim_words}"
assert overlap_count >= 2, message
@pytest.mark.parametrize('shrink_windows', [True, False])
def test_sg_hs_training(shrink_windows):
model_gensim = FT_gensim(
vector_size=48, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=1, negative=0,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET, shrink_windows=shrink_windows)
lee_data = LineSentence(datapath('lee_background.cor'))
model_gensim.build_vocab(lee_data)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(lee_data, total_examples=model_gensim.corpus_count, epochs=model_gensim.epochs)
assert not (orig0 == model_gensim.wv.vectors[0]).all() # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'eight',
u'nine',
u'overnight',
u'crew',
u'overnight.',
u'manslaughter',
u'north',
u'flight']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
message = f"only {overlap_count} overlap in expected {expected_sims_words} & actual {sims_gensim_words}"
assert overlap_count >= 2, message
@pytest.mark.parametrize('shrink_windows', [True, False])
def test_sg_hs_training_fromfile(shrink_windows):
with temporary_file('gensim_fasttext.tst') as corpus_file:
model_gensim = FT_gensim(
vector_size=48, sg=1, cbow_mean=1, alpha=0.025, window=5, hs=1, negative=0,
min_count=5, epochs=10, batch_words=1000, word_ngrams=1, sample=1e-3, min_n=3, max_n=6,
sorted_vocab=1, workers=1, min_alpha=0.0, bucket=BUCKET, shrink_windows=shrink_windows)
lee_data = LineSentence(datapath('lee_background.cor'))
utils.save_as_line_sentence(lee_data, corpus_file)
model_gensim.build_vocab(corpus_file=corpus_file)
orig0 = np.copy(model_gensim.wv.vectors[0])
model_gensim.train(corpus_file=corpus_file,
total_words=model_gensim.corpus_total_words,
epochs=model_gensim.epochs)
assert not (orig0 == model_gensim.wv.vectors[0]).all() # vector should vary after training
sims_gensim = model_gensim.wv.most_similar('night', topn=10)
sims_gensim_words = [word for (word, distance) in sims_gensim] # get similar words
expected_sims_words = [
u'night,',
u'night.',
u'eight',
u'nine',
u'overnight',
u'crew',
u'overnight.',
u'manslaughter',
u'north',
u'flight']
overlaps = set(sims_gensim_words).intersection(expected_sims_words)
overlap_count = len(overlaps)
message = f"only {overlap_count} overlap in expected {expected_sims_words} & actual {sims_gensim_words}"
assert overlap_count >= 2, message
with open(datapath('toy-data.txt')) as fin:
TOY_SENTENCES = [fin.read().strip().split(' ')]
def train_gensim(bucket=100, min_count=5):
#
# Set parameters to match those in the load_native function
#
model = FT_gensim(bucket=bucket, vector_size=5, alpha=0.05, workers=1, sample=0.0001, min_count=min_count)
model.build_vocab(TOY_SENTENCES)
model.train(TOY_SENTENCES, total_examples=len(TOY_SENTENCES), epochs=model.epochs)
return model
def load_native():
#
# trained using:
#
# ./fasttext cbow -input toy-data.txt -output toy-model -bucket 100 -dim 5
#
path = datapath('toy-model.bin')
model = gensim.models.fasttext.load_facebook_model(path)
return model
def load_vec(fin):
fin.readline() # array shape
for line in fin:
columns = line.strip().split(u' ')
word = columns.pop(0)
vector = [float(c) for c in columns]
yield word, np.array(vector, dtype=np.float32)
def compare_wv(a, b, t):
a_count = {key: a.get_vecattr(key, 'count') for key in a.key_to_index}
b_count = {key: b.get_vecattr(key, 'count') for key in b.key_to_index}
t.assertEqual(a_count, b_count)
#
# We do not compare most matrices directly, because they will never
# be equal unless many conditions are strictly controlled.
#
t.assertEqual(a.vectors.shape, b.vectors.shape)
# t.assertTrue(np.allclose(a.vectors, b.vectors))
t.assertEqual(a.vectors_vocab.shape, b.vectors_vocab.shape)
# t.assertTrue(np.allclose(a.vectors_vocab, b.vectors_vocab))
def compare_nn(a, b, t):
#
# Ensure the neural networks are identical for both cases.
#
t.assertEqual(a.syn1neg.shape, b.syn1neg.shape)
#
# Only if match_gensim=True in init_post_load
#
# t.assertEqual(a.vectors_ngrams_lockf.shape, b.vectors_ngrams_lockf.shape)
# t.assertTrue(np.allclose(a.vectors_ngrams_lockf, b.vectors_ngrams_lockf))
# t.assertEqual(a.vectors_vocab_lockf.shape, b.vectors_vocab_lockf.shape)
# t.assertTrue(np.allclose(a.vectors_vocab_lockf, b.vectors_vocab_lockf))
def compare_vocabulary(a, b, t):
t.assertEqual(a.max_vocab_size, b.max_vocab_size)
t.assertEqual(a.min_count, b.min_count)
t.assertEqual(a.sample, b.sample)
t.assertEqual(a.sorted_vocab, b.sorted_vocab)
t.assertEqual(a.null_word, b.null_word)
t.assertTrue(np.allclose(a.cum_table, b.cum_table))
t.assertEqual(a.raw_vocab, b.raw_vocab)
t.assertEqual(a.max_final_vocab, b.max_final_vocab)
t.assertEqual(a.ns_exponent, b.ns_exponent)
| TestFastTextModel |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 26127,
"end": 26398
} | class ____(AOTOutput):
"""For cases when you don't actually care about descriptor propagation, do not use under normal
circumstances."""
idx: int
def expr(self) -> str:
return f"__dummy{self.idx}"
@dataclasses.dataclass(frozen=True)
| DummyAOTOutput |
python | davidhalter__jedi | jedi/inference/gradual/stub_value.py | {
"start": 3343,
"end": 3385
} | class ____(ValueWrapper):
pass
| VersionInfo |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 59529,
"end": 74843
} | class ____(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_inference_opt(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to(
torch_device
)
# prepare image
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118] # fmt: skip
self.assertEqual(predictions[0].tolist(), expected_ids)
self.assertEqual("a woman sitting on the beach with a dog", generated_text)
# image and context
prompt = "Question: which city is this? Answer:"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
# max_length for BLIP includes prompt length from now on, use max_new_tokens
predictions = model.generate(**inputs, max_new_tokens=11)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118] # fmt: skip
self.assertEqual(predictions[0].tolist(), expected_ids)
self.assertEqual(generated_text, "Question: which city is this? Answer: it's not a city, it's a beach")
def test_inference_interpolate_pos_encoding(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to(
torch_device
)
processor.image_processor.size = {"height": 500, "width": 500}
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs, interpolate_pos_encoding=True)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 8, 2335, 15, 5, 4105, 50118] # fmt: skip
self.assertEqual(predictions[0].tolist(), expected_ids)
self.assertEqual(generated_text, "a woman and dog on the beach")
def test_inference_opt_batched_beam_search(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to(
torch_device
)
# prepare image
image = prepare_img()
inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16)
predictions = model.generate(**inputs, num_beams=2)
# Test output (in this case, slightly different from greedy search)
expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118] # fmt: skip
self.assertEqual(predictions[0].tolist(), expected_ids)
self.assertEqual(predictions[1].tolist(), expected_ids)
def test_inference_t5(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", dtype=torch.float16).to(
torch_device
)
# prepare image
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
expectations = Expectations(
{
("xpu", 3): [
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
"a woman is playing with her dog on the beach",
],
("cuda", 7): [
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
"a woman is playing with her dog on the beach",
],
}
)
expected_outputs = expectations.get_expectation()
# Test output
self.assertEqual(predictions[0].tolist(), expected_outputs[0])
self.assertEqual(expected_outputs[1], generated_text)
# image and context
prompt = "Question: which city is this? Answer:"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
expectations = Expectations(
{
("xpu", 3): [
[0, 3, 7, 152, 2515, 11389, 3523, 1],
"san francisco",
],
("cuda", 7): [
[0, 3, 7, 152, 2515, 11389, 3523, 1],
"san francisco",
],
}
)
expected_outputs = expectations.get_expectation()
# Test output
self.assertEqual(predictions[0].tolist(), expected_outputs[0])
self.assertEqual(generated_text, expected_outputs[1])
def test_inference_t5_batched_beam_search(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", dtype=torch.float16).to(
torch_device
)
# prepare image
image = prepare_img()
inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16)
predictions = model.generate(**inputs, num_beams=2)
expectations = Expectations(
{
("xpu", 3): [
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
],
("cuda", 7): [
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
],
}
)
expected_predictions = expectations.get_expectation()
# Test output (in this case, slightly different from greedy search)
self.assertEqual(predictions[0].tolist(), expected_predictions[0])
self.assertEqual(predictions[1].tolist(), expected_predictions[1])
@require_torch_multi_accelerator
def test_inference_opt_multi_accelerator(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-opt-2.7b", dtype=torch.float16, device_map="balanced"
)
# prepare image
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids = [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]
self.assertEqual(predictions[0].tolist(), [50265] * 32 + expected_ids) # 50265 is the img token id
self.assertEqual("a woman sitting on the beach with a dog", generated_text)
# image and context
prompt = "Question: which city is this? Answer:"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16)
predictions = model.generate(**inputs, max_new_tokens=11)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids = [2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118]
self.assertEqual(predictions[0].tolist(), [50265] * 32 + expected_ids) # 50265 is the img token id
self.assertEqual(generated_text, "Question: which city is this? Answer: it's not a city, it's a beach")
@require_torch_multi_accelerator
def test_inference_t5_multi_accelerator(self):
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl")
device_map = {
"query_tokens": 0,
"vision_model": 0,
"language_model": 1,
"language_projection": 0,
"qformer": 0,
}
model = Blip2ForConditionalGeneration.from_pretrained(
"Salesforce/blip2-flan-t5-xl", dtype=torch.float16, device_map=device_map
)
# prepare image
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(f"{torch_device}:0", dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids_and_text = Expectations(
{
("cuda", None): (
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
"a woman is playing with her dog on the beach",
),
("rocm", (9, 5)): (
[0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1],
"a woman is playing with her dog on the beach",
),
}
).get_expectation()
self.assertEqual(predictions[0].tolist(), expected_ids_and_text[0])
self.assertEqual(generated_text, expected_ids_and_text[1])
# image and context
prompt = "Question: which city is this? Answer:"
inputs = processor(images=image, text=prompt, return_tensors="pt").to(f"{torch_device}:0", dtype=torch.float16)
predictions = model.generate(**inputs)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
# Test output
expected_ids_and_text = Expectations(
{
("cuda", None): ([0, 3, 7, 152, 2515, 11389, 3523, 1], "san francisco"),
("rocm", (9, 5)): ([0, 3, 7, 152, 2515, 11389, 3523, 1], "san francisco"),
}
).get_expectation()
self.assertEqual(predictions[0].tolist(), expected_ids_and_text[0])
self.assertEqual(generated_text, expected_ids_and_text[1])
@require_torch_accelerator
def test_inference_itm(self):
model_name = "Salesforce/blip2-itm-vit-g"
processor = Blip2Processor.from_pretrained(model_name)
model = Blip2ForImageTextRetrieval.from_pretrained(model_name).to(torch_device)
image = prepare_img()
text = "A woman and her dog sitting in a beach"
inputs = processor(images=image, text=text, return_tensors="pt").to(torch_device)
# forward pass
out_itm = model(**inputs, use_image_text_matching_head=True)
out = model(**inputs)
# verify
expected_scores = torch.Tensor([[0.0238, 0.9762]])
torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3)
@require_torch_accelerator
@require_torch_fp16
def test_inference_itm_fp16(self):
model_name = "Salesforce/blip2-itm-vit-g"
processor = Blip2Processor.from_pretrained(model_name)
model = Blip2ForImageTextRetrieval.from_pretrained(model_name, dtype=torch.float16).to(torch_device)
image = prepare_img()
text = "A woman and her dog sitting in a beach"
inputs = processor(images=image, text=text, return_tensors="pt").to(torch_device, dtype=torch.float16)
# forward pass
out_itm = model(**inputs, use_image_text_matching_head=True)
out = model(**inputs)
# verify
expected_scores = torch.Tensor([[0.0239, 0.9761]])
torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu().float()), expected_scores, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(out[0].cpu().float(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3)
@require_torch_accelerator
@require_torch_fp16
def test_inference_vision_with_projection_fp16(self):
model_name = "Salesforce/blip2-itm-vit-g"
processor = Blip2Processor.from_pretrained(model_name)
model = Blip2VisionModelWithProjection.from_pretrained(model_name, dtype=torch.float16).to(torch_device)
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16)
# forward pass
out = model(**inputs)
# verify
expected_image_embeds = [
-0.093994140625,
-0.075927734375,
0.031890869140625,
0.053009033203125,
0.0352783203125,
-0.01190185546875,
]
self.assertTrue(np.allclose(out.image_embeds[0][0][:6].tolist(), expected_image_embeds, atol=1e-3))
@require_torch_accelerator
@require_torch_fp16
def test_inference_text_with_projection_fp16(self):
model_name = "Salesforce/blip2-itm-vit-g"
processor = Blip2Processor.from_pretrained(model_name)
model = Blip2TextModelWithProjection.from_pretrained(model_name, dtype=torch.float16).to(torch_device)
inputs = processor(text="a woman sitting on the beach with a dog", padding=True, return_tensors="pt").to(
torch_device
)
# forward pass
out = model(**inputs)
# verify
expected_text_embeds = [
-0.1082763671875,
0.053192138671875,
-0.02825927734375,
0.0169830322265625,
0.08648681640625,
-0.04656982421875,
]
self.assertTrue(np.allclose(out.text_embeds[0][0][:6].tolist(), expected_text_embeds, atol=1e-3))
| Blip2ModelIntegrationTest |
python | scikit-learn__scikit-learn | sklearn/neighbors/_unsupervised.py | {
"start": 255,
"end": 6283
} | class ____(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
.. versionadded:: 0.9
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
p : float (positive), default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
effective_metric_ : str
Metric used to compute distances to neighbors.
effective_metric_params_ : dict
Parameters for the metric used to compute distances to neighbors.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
vote.
RadiusNeighborsClassifier : Classifier implementing a vote among neighbors
within a given radius.
KNeighborsRegressor : Regression based on k-nearest neighbors.
RadiusNeighborsRegressor : Regression based on neighbors within a fixed
radius.
BallTree : Space partitioning data structure for organizing points in a
multi-dimensional space, used for nearest neighbor search.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
>>> neigh.fit(samples)
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors(
... [[0, 0, 1.3]], 0.4, return_distance=False
... )
>>> np.asarray(nbrs[0][0])
array(2)
"""
def __init__(
self,
*,
n_neighbors=5,
radius=1.0,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
@_fit_context(
# NearestNeighbors.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the nearest neighbors estimator from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : NearestNeighbors
The fitted nearest neighbors estimator.
"""
return self._fit(X)
| NearestNeighbors |
python | ansible__ansible | test/units/module_utils/facts/test_collector.py | {
"start": 17445,
"end": 20473
} | class ____(unittest.TestCase):
def test(self):
dep_map = {'network': set(['distribution', 'platform']),
'virtual': set(),
'platform': set(['what_platform_wants']),
'what_platform_wants': set(),
'network_stuff': set(['network'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
assert names.index('network_stuff') > names.index('network')
assert names.index('platform') > names.index('what_platform_wants')
assert names.index('network') > names.index('platform')
def test_cycles(self):
dep_map = {'leaf1': set(),
'leaf2': set(),
'node1': set(['node2']),
'node2': set(['node3']),
'node3': set(['node1'])}
self.assertRaises(collector.CycleFoundInFactDeps,
collector.tsort,
dep_map)
def test_just_nodes(self):
dep_map = {'leaf1': set(),
'leaf4': set(),
'leaf3': set(),
'leaf2': set()}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
# not a lot to assert here, any order of the
# results is valid
self.assertEqual(set(names), set(dep_map.keys()))
def test_self_deps(self):
dep_map = {'node1': set(['node1']),
'node2': set(['node2'])}
self.assertRaises(collector.CycleFoundInFactDeps,
collector.tsort,
dep_map)
def test_unsolvable(self):
dep_map = {'leaf1': set(),
'node2': set(['leaf2'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
def test_chain(self):
dep_map = {'leaf1': set(['leaf2']),
'leaf2': set(['leaf3']),
'leaf3': set(['leaf4']),
'leaf4': set(),
'leaf5': set(['leaf1'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
def test_multi_pass(self):
dep_map = {'leaf1': set(),
'leaf2': set(['leaf3', 'leaf1', 'leaf4', 'leaf5']),
'leaf3': set(['leaf4', 'leaf1']),
'leaf4': set(['leaf1']),
'leaf5': set(['leaf1'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
assert names.index('leaf1') < names.index('leaf2')
for leaf in ('leaf2', 'leaf3', 'leaf4', 'leaf5'):
assert names.index('leaf1') < names.index(leaf)
| TestTsort |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.