language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | charliermarsh__ruff | scripts/ty_benchmark/src/benchmark/tool.py | {
"start": 4128,
"end": 5918
} | class ____(Tool):
path: Path
def __init__(self, *, path: Path | None = None):
if path:
self.path = path
else:
if sys.platform == "win32":
self.path = Path("./node_modules/.bin/pyright.cmd").resolve()
else:
self.path = Path("./node_modules/.bin/pyright").resolve()
if not self.path.exists():
print(
"Pyright executable not found. Did you ran `npm install` in the `ty_benchmark` directory?"
)
@override
def config(self, project: Project, venv: Venv):
return (
Path("pyrightconfig.json"),
json.dumps(
{
"exclude": [str(path) for path in project.exclude],
# Set the `venv` config for pyright. Pyright only respects the `--venvpath`
# CLI option when `venv` is set in the configuration... 🤷♂️
"venv": venv.name,
# This is not the path to the venv folder, but the folder that contains the venv...
"venvPath": str(venv.path.parent.as_posix()),
"pythonVersion": project.python_version,
}
),
)
def command(self, project: Project, venv: Venv, single_threaded: bool) -> Command:
command = [str(self.path), "--skipunannotated"]
if not single_threaded:
command.append("--threads")
command.extend(
[
"--level=warning",
"--project",
"pyrightconfig.json",
*project.include,
]
)
return Command(
name="Pyright",
command=command,
)
| Pyright |
python | ApeWorX__ape | src/ape/utils/basemodel.py | {
"start": 11618,
"end": 12756
} | class ____(EthpmTypesBaseModel):
"""
An ape-pydantic BaseModel.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
def model_copy(
self: "Model",
*,
update: Optional[Mapping[str, Any]] = None,
deep: bool = False,
cache_clear: Optional[Sequence[str]] = None,
) -> "Model":
result = super().model_copy(update=update, deep=deep)
# Clear @cached_properties
for cached_item in cache_clear or []:
if cached_item in result.__dict__:
del result.__dict__[cached_item]
return result
@raises_not_implemented
def _repr_mimebundle_(self, include=None, exclude=None):
# This works better than AttributeError for Ape.
pass
@raises_not_implemented
def _ipython_display_(self, include=None, exclude=None):
# This works better than AttributeError for Ape.
pass
def _assert_not_ipython_check(key):
# Perf: IPython expects AttributeError here.
if isinstance(key, str) and key == "_ipython_canary_method_should_not_exist_":
raise AttributeError()
| BaseModel |
python | ray-project__ray | python/ray/llm/_internal/serve/serving_patterns/data_parallel/builder.py | {
"start": 2496,
"end": 4979
} | class ____(BaseModelExtended):
"""Schema for DP OpenAI serving args."""
llm_config: Union[str, dict, LLMConfig] = Field(
description="The LLM configuration",
)
ingress_cls_config: Union[dict, IngressClsConfig] = Field(
default_factory=IngressClsConfig,
description="The configuration for the ingress class.",
)
ingress_deployment_config: Optional[dict] = Field(
default_factory=dict,
description="The Ray @server.deployment options for the ingress server.",
)
@field_validator("llm_config")
@classmethod
def _validate_llm_config(cls, value: Any) -> LLMConfig:
if isinstance(value, str):
return LLMConfig.from_file(value)
elif isinstance(value, dict):
return LLMConfig.model_validate(value)
elif isinstance(value, LLMConfig):
return value
else:
raise TypeError(f"Invalid LLMConfig type: {type(value)}")
@field_validator("ingress_cls_config")
@classmethod
def _validate_ingress_cls_config(cls, value: Any) -> IngressClsConfig:
if isinstance(value, dict):
return IngressClsConfig.model_validate(value)
return value
def build_dp_openai_app(builder_config: dict) -> Application:
"""Build an OpenAI compatible app with the DP attention deployment
setup from the given builder configuration.
Args:
builder_config: The configuration for the builder. It has to conform
to the DPOpenAiServingArgs pydantic model.
Returns:
The configured Ray Serve Application.
"""
builder_config = DPOpenAiServingArgs.model_validate(builder_config)
llm_config = builder_config.llm_config
dp_deployment = build_dp_deployment(llm_config)
ingress_cls_config = builder_config.ingress_cls_config
ingress_options = ingress_cls_config.ingress_cls.get_deployment_options(
[llm_config]
)
if builder_config.ingress_deployment_config:
ingress_options = deep_merge_dicts(
ingress_options, builder_config.ingress_deployment_config
)
ingress_cls = make_fastapi_ingress(ingress_cls_config.ingress_cls)
logger.info("============== Ingress Options ==============")
logger.info(pprint.pformat(ingress_options))
return serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=[dp_deployment],
**ingress_cls_config.ingress_extra_kwargs,
)
| DPOpenAiServingArgs |
python | getsentry__sentry | src/sentry/api/serializers/models/release.py | {
"start": 14404,
"end": 14678
} | class ____(TypedDict):
id: int
slug: str | None
name: str
new_groups: int | None
platform: str | None
platforms: list[str]
health_data: NotRequired[ReleaseHealthOverview | None]
has_health_data: NotRequired[bool]
@register(Release)
| _ProjectDict |
python | pytest-dev__pytest-django | tests/test_unittest.py | {
"start": 155,
"end": 474
} | class ____(TestCase):
fixtures = ("items",)
def test_fixtures(self) -> None:
assert Item.objects.count() == 1
assert Item.objects.get().name == "Fixture item"
def test_fixtures_again(self) -> None:
"""Ensure fixtures are only loaded once."""
self.test_fixtures()
| TestFixtures |
python | django__django | tests/model_forms/tests.py | {
"start": 3113,
"end": 3224
} | class ____(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
| BaseCategoryForm |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 227024,
"end": 228339
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
access_token: str,
gocardless_environment: str,
gocardless_version: str,
start_date: str,
):
"""Airbyte Source for Gocardless.
Documentation can be found at https://docs.airbyte.com/integrations/sources/gocardless
Args:
name (str): The name of the destination.
access_token (str): Gocardless API TOKEN
gocardless_environment (str): Environment you are trying to connect to.
gocardless_version (str): GoCardless version. This is a date. You can find the latest here: https://developer.gocardless.com/api-reference/#api-usage-making-requests
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
"""
self.access_token = check.str_param(access_token, "access_token")
self.gocardless_environment = check.str_param(
gocardless_environment, "gocardless_environment"
)
self.gocardless_version = check.str_param(gocardless_version, "gocardless_version")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Gocardless", name)
| GocardlessSource |
python | walkccc__LeetCode | solutions/1268. Search Suggestions System/1268.py | {
"start": 0,
"end": 117
} | class ____:
def __init__(self):
self.children: dict[str, TrieNode] = {}
self.word: str | None = None
| TrieNode |
python | pallets__werkzeug | src/werkzeug/middleware/lint.py | {
"start": 3035,
"end": 3567
} | class ____:
def __init__(self, stream: t.IO[str]) -> None:
self._stream = stream
def write(self, s: str) -> None:
check_type("wsgi.error.write()", s, str)
self._stream.write(s)
def flush(self) -> None:
self._stream.flush()
def writelines(self, seq: t.Iterable[str]) -> None:
for line in seq:
self.write(line)
def close(self) -> None:
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
| ErrorStream |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_values_to_be_unique.py | {
"start": 2072,
"end": 12954
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, [1, 2, 3, 3, 3] will return [3, 3, 3] in result.exceptions_list, with \
unexpected_percent = 60.0.
ExpectColumnValuesToBeUnique is a \
Column Map Expectation
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 "A"
1 2 "A"
2 3 "B"
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToBeUnique(
column="test"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToBeUnique(
column="test2"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 2,
"unexpected_percent": 66.66666666666666,
"partial_unexpected_list": [
"A",
"A",
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 66.66666666666666,
"unexpected_percent_nonmissing": 66.66666666666666
}},
"meta": {{}},
"success": true
}}
""" # noqa: E501 # FIXME CoP
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.unique"
success_keys = ("mostly",)
args_keys = ("column",)
class Config:
title = "Expect column values to be unique"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectColumnValuesToBeUnique]) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
@override
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if renderer_configuration.include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@override
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer( # type: ignore[override] # TODO: Fix this type ignore
cls,
configuration: ExpectationConfiguration,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
if include_column_name:
template_str = "$column values must be unique"
else:
template_str = "values must be unique"
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
| ExpectColumnValuesToBeUnique |
python | nedbat__coveragepy | lab/hack_pyc.py | {
"start": 336,
"end": 2860
} | class ____:
def read(self, f):
if isinstance(f, basestring):
f = open(f, "rb")
self.magic = f.read(4)
self.modtime = f.read(4)
self.code = marshal.load(f)
def write(self, f):
if isinstance(f, basestring):
f = open(f, "wb")
f.write(self.magic)
f.write(self.modtime)
marshal.dump(self.code, f)
def hack_line_numbers(self):
self.code = hack_line_numbers(self.code)
def hack_line_numbers(code):
"""Replace a code object's line number information to claim that every
byte of the bytecode is a new source line. Returns a new code
object. Also recurses to hack the line numbers in nested code objects.
"""
# Create a new lnotab table. Each opcode is claimed to be at
# 1000*lineno + (opcode number within line), so for example, the opcodes on
# source line 12 will be given new line numbers 12000, 12001, 12002, etc.
old_num = list(lnotab_numbers(code.co_lnotab, code.co_firstlineno))
n_bytes = len(code.co_code)
new_num = []
line = 0
opnum_in_line = 0
i_byte = 0
while i_byte < n_bytes:
if old_num and i_byte == old_num[0][0]:
line = old_num.pop(0)[1]
opnum_in_line = 0
new_num.append((i_byte, 100000000 + 1000 * line + opnum_in_line))
if ord(code.co_code[i_byte]) >= opcode.HAVE_ARGUMENT:
i_byte += 3
else:
i_byte += 1
opnum_in_line += 1
# new_num is a list of pairs, (byteoff, lineoff). Turn it into an lnotab.
new_firstlineno = new_num[0][1] - 1
new_lnotab = lnotab_string(new_num, new_firstlineno)
# Recurse into code constants in this code object.
new_consts = []
for const in code.co_consts:
if type(const) == types.CodeType:
new_consts.append(hack_line_numbers(const))
else:
new_consts.append(const)
# Create a new code object, just like the old one, except with new
# line numbers.
new_code = new.code(
code.co_argcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
tuple(new_consts),
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
new_firstlineno,
new_lnotab,
)
return new_code
def hack_file(f):
pyc = PycFile()
pyc.read(f)
pyc.hack_line_numbers()
pyc.write(f)
if __name__ == "__main__":
hack_file(sys.argv[1])
| PycFile |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 24466,
"end": 24776
} | class ____(MetadataValue[Optional[int]]):
"""Container class for int metadata entry data.
Args:
value (Optional[int]): The int value.
"""
value: PublicAttr[Optional[int]] # type: ignore
@whitelist_for_serdes(storage_name="BoolMetadataEntryData")
@record(kw_only=False)
| IntMetadataValue |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI041_1.py | {
"start": 1181,
"end": 1728
} | class ____:
def good(self, arg: int) -> None:
...
def bad(self, arg: int | float | complex) -> None:
...
def bad2(self, arg: int | Union[float, complex]) -> None:
...
def bad3(self, arg: Union[Union[float, complex], int]) -> None:
...
def bad4(self, arg: Union[float | complex, int]) -> None:
...
def bad5(self, arg: int | (float | complex)) -> None:
...
# https://github.com/astral-sh/ruff/issues/18298
# fix must not yield runtime `None | None | ...` (TypeError)
| Foo |
python | sqlalchemy__sqlalchemy | test/sql/test_compare.py | {
"start": 5312,
"end": 5383
} | class ____(TypeDecorator):
cache_ok = True
impl = Integer
| MyType2 |
python | pyca__cryptography | tests/hazmat/primitives/test_aes.py | {
"start": 6991,
"end": 7992
} | class ____:
test_cfb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CFB"),
[
"CFB128GFSbox128.rsp",
"CFB128GFSbox192.rsp",
"CFB128GFSbox256.rsp",
"CFB128KeySbox128.rsp",
"CFB128KeySbox192.rsp",
"CFB128KeySbox256.rsp",
"CFB128VarKey128.rsp",
"CFB128VarKey192.rsp",
"CFB128VarKey256.rsp",
"CFB128VarTxt128.rsp",
"CFB128VarTxt192.rsp",
"CFB128VarTxt256.rsp",
"CFB128MMT128.rsp",
"CFB128MMT192.rsp",
"CFB128MMT256.rsp",
],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: CFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.AES(b"\x00" * 16), CFB8(b"\x00" * 16)
),
skip_message="Does not support AES CFB8",
)
| TestAESModeCFB |
python | matplotlib__matplotlib | doc/sphinxext/mock_gui_toolkits.py | {
"start": 49,
"end": 256
} | class ____(MagicMock):
__name__ = "cairocffi"
def setup(app):
sys.modules.update(
cairocffi=MyCairoCffi(),
)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| MyCairoCffi |
python | kamyu104__LeetCode-Solutions | Python/rearrange-k-substrings-to-form-target-string.py | {
"start": 63,
"end": 460
} | class ____(object):
def isPossibleToRearrange(self, s, t, k):
"""
:type s: str
:type t: str
:type k: int
:rtype: bool
"""
cnt = collections.defaultdict(int)
l = len(s)//k
for i in xrange(0, len(s), l):
cnt[s[i:i+l]] += 1
cnt[t[i:i+l]] -= 1
return all(v == 0 for v in cnt.itervalues())
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_methods_invocations.py | {
"start": 408473,
"end": 420421
} | class ____:
def __init__(
self,
arity: int,
rightmost_supports_scalar: bool,
rightmost_supports_scalarlist: bool,
rightmost_supports_tensor: bool = False,
) -> None:
self.arity = arity
self._set_rightmost_arg_types(
rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor,
)
self._intersperse_empty = (True, False)
def _set_rightmost_arg_types(
self,
rightmost_supports_scalar: bool,
rightmost_supports_scalarlist: bool,
rightmost_supports_tensor: bool,
) -> None:
self._rightmost_arg_types = [ForeachRightmostArgType.TensorList]
if self.arity > 1:
if rightmost_supports_scalar:
self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar)
if rightmost_supports_scalarlist:
self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList)
if rightmost_supports_tensor:
self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor)
def _sample_rightmost_arg(
self,
opinfo,
rightmost_arg_type,
device,
dtype,
num_tensors,
allow_higher_dtype_scalars,
**_foreach_inputs_kwargs,
):
if rightmost_arg_type == ForeachRightmostArgType.TensorList:
return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)]
if rightmost_arg_type == ForeachRightmostArgType.Tensor:
return [make_tensor(
(), device=device, dtype=dtype,
noncontiguous=_foreach_inputs_kwargs["noncontiguous"],
requires_grad=_foreach_inputs_kwargs.get("requires_grad", False),
)]
should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16)
def sample_float():
s = random.random()
if should_use_simpler_scalars:
return 1.0 if s > 0.5 else 2.0
else:
return 1.0 - s
high = 2 if should_use_simpler_scalars else 9
if rightmost_arg_type == ForeachRightmostArgType.ScalarList:
scalarlist_list = []
scalarlist_list.append([random.randint(0, high) + 1 for _ in range(num_tensors)])
if allow_higher_dtype_scalars or dtype.is_floating_point:
scalarlist_list.append([sample_float() for _ in range(num_tensors)])
if allow_higher_dtype_scalars or dtype.is_complex:
scalarlist_list.append([complex(sample_float(), sample_float()) for _ in range(num_tensors)])
scalarlist_list.append([1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)])
scalarlist_list.append([True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)])
return scalarlist_list
if rightmost_arg_type == ForeachRightmostArgType.Scalar:
scalars = []
scalars.append(random.randint(1, high + 1))
if allow_higher_dtype_scalars or dtype.is_floating_point:
scalars.append(sample_float())
if allow_higher_dtype_scalars or dtype.is_complex:
scalars.append(complex(sample_float(), sample_float()))
scalars.append(True)
return scalars
raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}")
def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype):
if self.arity == 1:
if "foreach_abs" in opinfo.name and dtype in complex_types():
return True
# unary
if opinfo.ref in (torch.abs, torch.neg):
return False
if opinfo.ref_inplace == torch.Tensor.zero_:
return False
return dtype in integral_types_and(torch.bool)
if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor:
return None
if "foreach_pow" in opinfo.name and dtype in integral_types_and(torch.bool):
return True
if any(
foreach_name in opinfo.name
for foreach_name in ("foreach_clamp_max", "foreach_clamp_min", "foreach_maximum", "foreach_minimum")
) and dtype in integral_types_and(torch.bool):
return True
if rightmost_arg_type == ForeachRightmostArgType.TensorList:
disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool)
if "foreach_add" in opinfo.name and dtype == torch.bool:
disable_fastpath = True
return disable_fastpath
elif rightmost_arg_type == ForeachRightmostArgType.Scalar:
disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool)
if isinstance(rightmost_arg, bool):
disable_fastpath |= dtype == torch.bool
if opinfo.ref in (torch.add, torch.mul):
disable_fastpath = False
elif isinstance(rightmost_arg, int):
disable_fastpath |= dtype == torch.bool
elif isinstance(rightmost_arg, float):
disable_fastpath |= dtype in integral_types_and(torch.bool)
elif isinstance(rightmost_arg, complex):
disable_fastpath |= dtype not in complex_types()
else:
raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}")
return disable_fastpath
elif rightmost_arg_type == ForeachRightmostArgType.ScalarList:
disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool)
elmt_t = type(rightmost_arg[0])
has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg)
if not has_same_type:
return dtype not in complex_types()
if isinstance(rightmost_arg[0], bool):
if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool:
disable_fastpath = False
elif isinstance(rightmost_arg[0], int):
disable_fastpath |= dtype == torch.bool
elif isinstance(rightmost_arg[0], float):
disable_fastpath |= dtype in integral_types_and(torch.bool)
elif isinstance(rightmost_arg[0], complex):
disable_fastpath |= dtype not in complex_types()
else:
raise AssertionError(f"Invalid scalarlist of {rightmost_arg}")
return disable_fastpath
else:
raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}")
def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype):
kwargs = {}
if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param:
if dtype in integral_types_and(torch.bool):
kwargs["alpha"] = 3
elif dtype.is_complex:
kwargs["alpha"] = complex(3, 3)
else:
kwargs["alpha"] = 3.14
if self.arity > 1:
kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype)
return kwargs
def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs):
assert "num_input_tensors" not in kwargs
_foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()}
_foreach_inputs_kwargs["requires_grad"] = requires_grad
allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False)
for _rightmost_arg_type in self._rightmost_arg_types:
zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs)
zero_size_foreach_inputs_kwargs["zero_size"] = True
input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs)
if self.arity > 1:
args = [
sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs)
for _ in range(self.arity - 2)
]
args.append(
self._sample_rightmost_arg(
opinfo,
ForeachRightmostArgType.TensorList,
device,
dtype,
NUM_SIZE0_TENSORS,
allow_higher_dtype_scalars=allow_higher_dtype_scalars,
**zero_size_foreach_inputs_kwargs,
)[0])
kwargs = self._sample_kwargs(
opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)
else:
args = []
kwargs = {}
if opinfo.ref in (torch.abs, torch.neg):
kwargs["disable_fastpath"] = False
else:
kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool)
yield ForeachSampleInput(input, *args, **kwargs)
def __call__(self, opinfo, device, dtype, requires_grad, **kwargs):
num_input_tensors_specified = "num_input_tensors" in kwargs
num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors
assert isinstance(num_input_tensors, list)
_foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()}
_foreach_inputs_kwargs["requires_grad"] = requires_grad
_foreach_inputs_kwargs["zero_size"] = False
allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False)
# add empty tensor interspersion to test fully fixing #100701
for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product(
num_input_tensors, self._rightmost_arg_types, self._intersperse_empty):
if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'):
# generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy
continue
_foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors
input = sample_inputs_foreach(
None, device, dtype, num_tensors, **_foreach_inputs_kwargs)
args = []
if self.arity > 1:
args = [
sample_inputs_foreach(
None, device, dtype, num_tensors, **_foreach_inputs_kwargs)
for _ in range(self.arity - 2)
]
rightmost_arg_list = self._sample_rightmost_arg(
opinfo, rightmost_arg_type, device, dtype, num_tensors, allow_higher_dtype_scalars,
**_foreach_inputs_kwargs)
for rightmost_arg in rightmost_arg_list:
args.append(rightmost_arg)
kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)
ref_args = args
if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor):
ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]]
sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs)
yield sample
args.pop()
else:
yield ForeachSampleInput(
input,
*args,
disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype),
)
| foreach_inputs_sample_func |
python | ray-project__ray | python/ray/serve/tests/test_fastapi.py | {
"start": 4120,
"end": 4247
} | class ____(BaseModel):
name: str
price: float = Field(None, gt=1.0, description="High price!")
nests: Nested
| BodyType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/events.py | {
"start": 676,
"end": 18312
} | class ____(event.Events[SchemaEventTarget]):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`_schema.MetaData`, :class:`_schema.Table`,
:class:`_schema.Column`, etc.
**Create / Drop Events**
Events emitted when CREATE and DROP commands are emitted to the database.
The event hooks in this category include :meth:`.DDLEvents.before_create`,
:meth:`.DDLEvents.after_create`, :meth:`.DDLEvents.before_drop`, and
:meth:`.DDLEvents.after_drop`.
These events are emitted when using schema-level methods such as
:meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all`. Per-object
create/drop methods such as :meth:`.Table.create`, :meth:`.Table.drop`,
:meth:`.Index.create` are also included, as well as dialect-specific
methods such as :meth:`_postgresql.ENUM.create`.
.. versionadded:: 2.0 :class:`.DDLEvents` event hooks now take place
for non-table objects including constraints, indexes, and
dialect-specific schema types.
Event hooks may be attached directly to a :class:`_schema.Table` object or
to a :class:`_schema.MetaData` collection, as well as to any
:class:`.SchemaItem` class or object that can be individually created and
dropped using a distinct SQL command. Such classes include :class:`.Index`,
:class:`.Sequence`, and dialect-specific classes such as
:class:`_postgresql.ENUM`.
Example using the :meth:`.DDLEvents.after_create` event, where a custom
event hook will emit an ``ALTER TABLE`` command on the current connection,
after ``CREATE TABLE`` is emitted::
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table("some_table", m, Column("data", Integer))
@event.listens_for(some_table, "after_create")
def after_create(target, connection, **kw):
connection.execute(
text("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name))
)
some_engine = create_engine("postgresql://scott:tiger@host/test")
# will emit "CREATE TABLE some_table" as well as the above
# "ALTER TABLE" statement afterwards
m.create_all(some_engine)
Constraint objects such as :class:`.ForeignKeyConstraint`,
:class:`.UniqueConstraint`, :class:`.CheckConstraint` may also be
subscribed to these events, however they will **not** normally produce
events as these objects are usually rendered inline within an
enclosing ``CREATE TABLE`` statement and implicitly dropped from a
``DROP TABLE`` statement.
For the :class:`.Index` construct, the event hook will be emitted
for ``CREATE INDEX``, however SQLAlchemy does not normally emit
``DROP INDEX`` when dropping tables as this is again implicit within the
``DROP TABLE`` statement.
.. versionadded:: 2.0 Support for :class:`.SchemaItem` objects
for create/drop events was expanded from its previous support for
:class:`.MetaData` and :class:`.Table` to also include
:class:`.Constraint` and all subclasses, :class:`.Index`,
:class:`.Sequence` and some type-related constructs such as
:class:`_postgresql.ENUM`.
.. note:: These event hooks are only emitted within the scope of
SQLAlchemy's create/drop methods; they are not necessarily supported
by tools such as `alembic <https://alembic.sqlalchemy.org>`_.
**Attachment Events**
Attachment events are provided to customize
behavior whenever a child schema element is associated
with a parent, such as when a :class:`_schema.Column` is associated
with its :class:`_schema.Table`, when a
:class:`_schema.ForeignKeyConstraint`
is associated with a :class:`_schema.Table`, etc. These events include
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach`.
**Reflection Events**
The :meth:`.DDLEvents.column_reflect` event is used to intercept
and modify the in-Python definition of database columns when
:term:`reflection` of database tables proceeds.
**Use with Generic DDL**
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.ExecutableDDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
)
**Event Propagation to MetaData Copies**
For all :class:`.DDLEvent` events, the ``propagate=True`` keyword argument
will ensure that a given event handler is propagated to copies of the
object, which are made when using the :meth:`_schema.Table.to_metadata`
method::
from sqlalchemy import DDL
metadata = MetaData()
some_table = Table("some_table", metadata, Column("data", Integer))
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
propagate=True,
)
new_metadata = MetaData()
new_table = some_table.to_metadata(new_metadata)
The above :class:`.DDL` object will be associated with the
:meth:`.DDLEvents.after_create` event for both the ``some_table`` and
the ``new_table`` :class:`.Table` objects.
.. seealso::
:ref:`event_toplevel`
:class:`.ExecutableDDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
""" # noqa: E501
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(
self, target: SchemaEventTarget, connection: Connection, **kw: Any
) -> None:
r"""Called before CREATE statements are emitted.
:param target: the :class:`.SchemaObject`, such as a
:class:`_schema.MetaData` or :class:`_schema.Table`
but also including all create/drop objects such as
:class:`.Index`, :class:`.Sequence`, etc.,
object which is the target of the event.
.. versionadded:: 2.0 Support for all :class:`.SchemaItem` objects
was added.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
:func:`.event.listen` accepts the ``insert=True``
modifier for this event; when True, the listener function will
be prepended to the internal list of events upon discovery, and execute
before registered listener functions that do not pass this argument.
"""
def after_create(
self, target: SchemaEventTarget, connection: Connection, **kw: Any
) -> None:
r"""Called after CREATE statements are emitted.
:param target: the :class:`.SchemaObject`, such as a
:class:`_schema.MetaData` or :class:`_schema.Table`
but also including all create/drop objects such as
:class:`.Index`, :class:`.Sequence`, etc.,
object which is the target of the event.
.. versionadded:: 2.0 Support for all :class:`.SchemaItem` objects
was added.
:param connection: the :class:`_engine.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def before_drop(
self, target: SchemaEventTarget, connection: Connection, **kw: Any
) -> None:
r"""Called before DROP statements are emitted.
:param target: the :class:`.SchemaObject`, such as a
:class:`_schema.MetaData` or :class:`_schema.Table`
but also including all create/drop objects such as
:class:`.Index`, :class:`.Sequence`, etc.,
object which is the target of the event.
.. versionadded:: 2.0 Support for all :class:`.SchemaItem` objects
was added.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def after_drop(
self, target: SchemaEventTarget, connection: Connection, **kw: Any
) -> None:
r"""Called after DROP statements are emitted.
:param target: the :class:`.SchemaObject`, such as a
:class:`_schema.MetaData` or :class:`_schema.Table`
but also including all create/drop objects such as
:class:`.Index`, :class:`.Sequence`, etc.,
object which is the target of the event.
.. versionadded:: 2.0 Support for all :class:`.SchemaItem` objects
was added.
:param connection: the :class:`_engine.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def before_parent_attach(
self, target: SchemaEventTarget, parent: SchemaItem
) -> None:
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def after_parent_attach(
self, target: SchemaEventTarget, parent: SchemaItem
) -> None:
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
"""
def _sa_event_column_added_to_pk_constraint(
self, const: Constraint, col: Column[Any]
) -> None:
"""internal event hook used for primary key naming convention
updates.
"""
def column_reflect(
self, inspector: Inspector, table: Table, column_info: ReflectedColumn
) -> None:
"""Called for each unit of 'column info' retrieved when
a :class:`_schema.Table` is being reflected.
This event is most easily used by applying it to a specific
:class:`_schema.MetaData` instance, where it will take effect for
all :class:`_schema.Table` objects within that
:class:`_schema.MetaData` that undergo reflection::
metadata = MetaData()
@event.listens_for(metadata, "column_reflect")
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
# under this MetaData
...
# will use the above event hook
my_table = Table("my_table", metadata, autoload_with=some_engine)
.. versionadded:: 1.4.0b2 The :meth:`_events.DDLEvents.column_reflect`
hook may now be applied to a :class:`_schema.MetaData` object as
well as the :class:`_schema.MetaData` class itself where it will
take place for all :class:`_schema.Table` objects associated with
the targeted :class:`_schema.MetaData`.
It may also be applied to the :class:`_schema.Table` class across
the board::
from sqlalchemy import Table
@event.listens_for(Table, "column_reflect")
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
...
It can also be applied to a specific :class:`_schema.Table` at the
point that one is being reflected using the
:paramref:`_schema.Table.listeners` parameter::
t1 = Table(
"my_table",
autoload_with=some_engine,
listeners=[("column_reflect", receive_column_reflect)],
)
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`:
* ``name`` - the column's name, is applied to the
:paramref:`_schema.Column.name` parameter
* ``type`` - the type of this column, which should be an instance
of :class:`~sqlalchemy.types.TypeEngine`, is applied to the
:paramref:`_schema.Column.type` parameter
* ``nullable`` - boolean flag if the column is NULL or NOT NULL,
is applied to the :paramref:`_schema.Column.nullable` parameter
* ``default`` - the column's server default value. This is
normally specified as a plain string SQL expression, however the
event can pass a :class:`.FetchedValue`, :class:`.DefaultClause`,
or :func:`_expression.text` object as well. Is applied to the
:paramref:`_schema.Column.server_default` parameter
The event is called before any action is taken against
this dictionary, and the contents can be modified; the following
additional keys may be added to the dictionary to further modify
how the :class:`_schema.Column` is constructed:
* ``key`` - the string key that will be used to access this
:class:`_schema.Column` in the ``.c`` collection; will be applied
to the :paramref:`_schema.Column.key` parameter. Is also used
for ORM mapping. See the section
:ref:`mapper_automated_reflection_schemes` for an example.
* ``quote`` - force or un-force quoting on the column name;
is applied to the :paramref:`_schema.Column.quote` parameter.
* ``info`` - a dictionary of arbitrary data to follow along with
the :class:`_schema.Column`, is applied to the
:paramref:`_schema.Column.info` parameter.
:func:`.event.listen` also accepts the ``propagate=True``
modifier for this event; when True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`_schema.Table.to_metadata` is used.
.. seealso::
:ref:`mapper_automated_reflection_schemes` -
in the ORM mapping documentation
:ref:`automap_intercepting_columns` -
in the :ref:`automap_toplevel` documentation
:ref:`metadata_reflection_dbagnostic_types` - in
the :ref:`metadata_reflection_toplevel` documentation
"""
| DDLEvents |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 68460,
"end": 68967
} | class ____(PrefectFilterBaseModel):
"""Filter by `WorkPool.type`."""
any_: Optional[list[str]] = Field(
default=None, description="A list of work pool types to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.WorkPool.type.in_(self.any_))
return filters
| WorkPoolFilterType |
python | django__django | tests/generic_relations/models.py | {
"start": 3254,
"end": 3518
} | class ____(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
| ForProxyModelModel |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_text.py | {
"start": 40892,
"end": 45352
} | class ____(Data2VecTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.data2vec_text = Data2VecTextModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.data2vec_text(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(reshaped_logits.device)
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| Data2VecTextForMultipleChoice |
python | walkccc__LeetCode | solutions/683. K Empty Slots/683.py | {
"start": 0,
"end": 652
} | class ____:
def kEmptySlots(self, bulbs: list[int], k: int) -> int:
n = len(bulbs)
ans = math.inf
# day[i] := the day when bulbs[i] is turned on
day = [0] * n
for i, bulb in enumerate(bulbs):
day[bulb - 1] = i + 1
# Find a subarray of day[l..r], where its length is k + 2.
# For each l < i < r, day[i] > max(day[l], day[r]).
l = 0
r = l + k + 1
i = 1
while r < n:
if i == r:
ans = min(ans, max(day[l], day[r]))
l = i
r = i + k + 1
elif day[i] < max(day[l], day[r]):
l = i
r = i + k + 1
i += 1
return -1 if ans == math.inf else ans
| Solution |
python | Netflix__metaflow | test/core/tests/basic_include.py | {
"start": 67,
"end": 2225
} | class ____(MetaflowTest):
PRIORITY = 1
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
INCLUDE_FILES = {
"myfile_txt": {"default": "'./reg.txt'"},
"myfile_utf8": {"default": "'./utf8.txt'", "encoding": "'utf8'"},
"myfile_binary": {"default": "'./utf8.txt'", "is_text": False},
"myfile_overriden": {"default": "'./reg.txt'"},
"absent_file": {"required": False},
}
HEADER = """
import codecs
import os
os.environ['METAFLOW_RUN_MYFILE_OVERRIDEN'] = './override.txt'
with open('reg.txt', mode='w') as f:
f.write("Regular Text File")
with codecs.open('utf8.txt', mode='w', encoding='utf8') as f:
f.write(u"UTF Text File \u5e74")
with open('override.txt', mode='w') as f:
f.write("Override Text File")
"""
@steps(0, ["all"])
def step_all(self):
assert_equals("Regular Text File", self.myfile_txt)
assert_equals("UTF Text File \u5e74", self.myfile_utf8)
assert_equals(
"UTF Text File \u5e74".encode(encoding="utf8"), self.myfile_binary
)
assert_equals("Override Text File", self.myfile_overriden)
# Check that an absent file does not make things crash
assert_equals(None, self.absent_file)
try:
# Include files should be immutable
self.myfile_txt = 5
raise ExpectationFailed(AttributeError, "nothing")
except AttributeError:
pass
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(step.name, "myfile_txt", "Regular Text File")
checker.assert_artifact(step.name, "myfile_utf8", "UTF Text File \u5e74")
checker.assert_artifact(
step.name,
"myfile_binary",
"UTF Text File \u5e74".encode(encoding="utf8"),
)
checker.assert_artifact(step.name, "myfile_overriden", "Override Text File")
| BasicIncludeTest |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 7832,
"end": 10050
} | class ____(AbstractPathEffect):
"""A simple shadow via a filled patch."""
def __init__(self, offset=(2, -2),
shadow_rgbFace=None, alpha=None,
rho=0.3, **kwargs):
"""
Parameters
----------
offset : (float, float), default: (2, -2)
The (x, y) offset of the shadow in points.
shadow_rgbFace : :mpltype:`color`
The shadow color.
alpha : float, default: 0.3
The alpha transparency of the created shadow patch.
rho : float, default: 0.3
A scale factor to apply to the rgbFace color if *shadow_rgbFace*
is not specified.
**kwargs
Extra keywords are stored and passed through to
:meth:`!AbstractPathEffect._update_gc`.
"""
super().__init__(offset)
if shadow_rgbFace is None:
self._shadow_rgbFace = shadow_rgbFace
else:
self._shadow_rgbFace = mcolors.to_rgba(shadow_rgbFace)
if alpha is None:
alpha = 0.3
self._alpha = alpha
self._rho = rho
#: The dictionary of keywords to update the graphics collection with.
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""
Overrides the standard draw_path to add the shadow offset and
necessary color changes for the shadow.
"""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
if self._shadow_rgbFace is None:
r, g, b = (rgbFace or (1., 1., 1.))[:3]
# Scale the colors by a factor to improve the shadow effect.
shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)
else:
shadow_rgbFace = self._shadow_rgbFace
gc0.set_foreground("none")
gc0.set_alpha(self._alpha)
gc0.set_linewidth(0)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer),
shadow_rgbFace)
gc0.restore()
withSimplePatchShadow = _subclass_with_normal(effect_class=SimplePatchShadow)
| SimplePatchShadow |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_detector.py | {
"start": 378,
"end": 6807
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
self.detector = self.create_detector()
def test_queryset(self) -> None:
"""
Test that we filter out objects with statuses other than 'active'
"""
assert Detector.objects.filter(id=self.detector.id).exists()
self.detector.status = ObjectStatus.PENDING_DELETION
self.detector.save()
assert not Detector.objects.filter(id=self.detector.id).exists()
self.detector.status = ObjectStatus.DELETION_IN_PROGRESS
self.detector.save()
assert not Detector.objects.filter(id=self.detector.id).exists()
def test_get_conditions__cached(self) -> None:
self.detector.workflow_condition_group = self.create_data_condition_group()
self.detector.save()
self.create_data_condition(
type="eq",
comparison="HIGH",
condition_group=self.detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.HIGH,
)
fetched_detector = (
Detector.objects.filter(id=self.detector.id)
.select_related("workflow_condition_group")
.prefetch_related("workflow_condition_group__conditions")
.first()
)
assert fetched_detector is not None
with self.assertNumQueries(0):
conditions = fetched_detector.get_conditions()
assert conditions
def test_get_conditions__cached_group_only(self) -> None:
self.detector.workflow_condition_group = self.create_data_condition_group()
self.detector.save()
self.create_data_condition(
type="eq",
comparison="HIGH",
condition_group=self.detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.HIGH,
)
fetched_detector = (
Detector.objects.filter(id=self.detector.id)
.select_related("workflow_condition_group")
.first()
)
assert fetched_detector is not None
with self.assertNumQueries(1):
conditions = fetched_detector.get_conditions()
assert conditions
def test_get_conditions__not_cached(self) -> None:
self.detector.workflow_condition_group = self.create_data_condition_group()
self.detector.save()
self.create_data_condition(
type="eq",
comparison="HIGH",
condition_group=self.detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.HIGH,
)
fetched_detector = Detector.objects.get(id=self.detector.id)
with self.assertNumQueries(1):
conditions = fetched_detector.get_conditions()
assert conditions
def test_get_error_detector_for_project__success(self) -> None:
"""Test successful retrieval of error detector for project, created by default on project creation"""
error_detector = self.create_detector(
project=self.project, type=ErrorGroupType.slug, name="Error Detector"
)
result = Detector.get_error_detector_for_project(self.project.id)
assert result == error_detector
assert result.type == ErrorGroupType.slug
assert result.project_id == self.project.id
def test_get_error_detector_for_project__not_found(self) -> None:
with pytest.raises(Detector.DoesNotExist):
Detector.get_error_detector_for_project(self.project.id)
def test_get_error_detector_for_project__wrong_type(self) -> None:
self.create_detector(
project=self.project,
type=MetricIssue.slug, # Use a different registered type
name="Other Detector",
)
with pytest.raises(Detector.DoesNotExist):
Detector.get_error_detector_for_project(self.project.id)
def test_get_error_detector_for_project__caching(self) -> None:
error_detector = self.create_detector(
project=self.project, type=ErrorGroupType.slug, name="Error Detector"
)
# First call - cache miss
with (
patch("sentry.utils.cache.cache.get") as mock_cache_get,
patch("sentry.utils.cache.cache.set") as mock_cache_set,
):
mock_cache_get.return_value = None
result = Detector.get_error_detector_for_project(self.project.id)
assert result == error_detector
# Verify cache key format using the new method
expected_cache_key = Detector._get_detector_project_type_cache_key(
self.project.id, ErrorGroupType.slug
)
mock_cache_get.assert_called_once_with(expected_cache_key)
mock_cache_set.assert_called_once_with(
expected_cache_key, error_detector, Detector.CACHE_TTL
)
def test_get_error_detector_for_project__cache_hit(self) -> None:
error_detector = self.create_detector(
project=self.project, type=ErrorGroupType.slug, name="Error Detector"
)
# Mock cache hit
with patch("sentry.utils.cache.cache.get") as mock_cache_get:
mock_cache_get.return_value = error_detector
result = Detector.get_error_detector_for_project(self.project.id)
assert result == error_detector
# Verify cache was checked with correct key
expected_cache_key = Detector._get_detector_project_type_cache_key(
self.project.id, ErrorGroupType.slug
)
mock_cache_get.assert_called_once_with(expected_cache_key)
def test_settings(self) -> None:
detector = self.create_detector()
assert detector.settings
def test_settings__no_settings__invaild_settings(self) -> None:
# This is an issue type w/o a detector association
detector = self.create_detector(
type="profile_json_decode_main_thread", name="Invalid Detector"
)
with pytest.raises(ValueError, match="Registered grouptype has no detector settings"):
assert detector.settings
def test_get_detector_project_type_cache_key() -> None:
project_id = 123
detector_type = "error"
cache_key = Detector._get_detector_project_type_cache_key(project_id, detector_type)
assert cache_key == f"detector:by_proj_type:{project_id}:{detector_type}"
| DetectorTest |
python | joblib__joblib | examples/memory_basic_usage.py | {
"start": 3481,
"end": 4698
} | class ____(object):
"""A class which is using the previous function."""
def __init__(self, column=0):
self.column = column
def transform(self, data):
costly_compute = memory.cache(_costly_compute_cached)
return costly_compute(data, self.column)
transformer = Algorithm()
start = time.time()
data_trans = transformer.transform(data)
end = time.time()
print("\nThe function took {:.2f} s to compute.".format(end - start))
print("\nThe transformed data are:\n {}".format(data_trans))
###############################################################################
start = time.time()
data_trans = transformer.transform(data)
end = time.time()
print("\nThe function took {:.2f} s to compute.".format(end - start))
print("\nThe transformed data are:\n {}".format(data_trans))
###############################################################################
# As expected, the second call to the ``transform`` method load the results
# which have been cached.
###############################################################################
# Clean up cache directory
###############################################################################
memory.clear(warn=False)
| Algorithm |
python | getsentry__sentry | src/sentry/analytics/events/ai_autofix_pr_events.py | {
"start": 244,
"end": 349
} | class ____(AiAutofixPrEvent):
pass
@analytics.eventclass("ai.autofix.pr.merged")
| AiAutofixPrClosedEvent |
python | bokeh__bokeh | src/bokeh/plotting/contour.py | {
"start": 3118,
"end": 3431
} | class ____(LineCoords):
''' Complete geometry data for contour lines over a whole sequence of contour levels.
'''
levels: ArrayLike
def asdict(self):
# Convert to dict using shallow copy. dataclasses.asdict uses deep copy.
return dict(entries(self))
@dataclass(frozen=True)
| LineData |
python | pytorch__pytorch | test/inductor/test_cache.py | {
"start": 4962,
"end": 12307
} | class ____(TestMixin, TestCase):
@parametrize("cache_type", TestMixin.cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_get(
self: Self,
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Checks that a cache returns None for missing keys, and after insertion,
# returns the correct value for each key.
if not self.cache_type_supports_key_and_value_types(
cache_type, key_type, value_type
):
return
cache: icache.Cache = cache_type()
self.maybe_randomize_base_dir(cache)
key_1, key_2 = self.keys_not_in(cache, lambda: self.key(key_type), 2)
value_1, value_2 = self.values_unalike(lambda: self.value(value_type), 2)
self.assertIsNone(cache.get(key_1))
self.assertIsNone(cache.get(key_2))
self.assertTrue(cache.insert(key_1, value_1))
self.assertTrue(cache.insert(key_2, value_2))
self.assertEqual(cache.get(key_1), value_1)
self.assertEqual(cache.get(key_2), value_2)
@parametrize("cache_type", TestMixin.cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_insert(
self: Self,
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Verifies that inserting a new key succeeds, inserting the same key again fails,
# and the value for the key remains the first inserted value.
if not self.cache_type_supports_key_and_value_types(
cache_type, key_type, value_type
):
return
cache: icache.Cache = cache_type()
self.maybe_randomize_base_dir(cache)
key = self.key_not_in(cache, lambda: self.key(key_type))
value_1, value_2 = self.values_unalike(lambda: self.value(value_type), 2)
self.assertIsNone(cache.get(key))
self.assertTrue(cache.insert(key, value_1))
self.assertFalse(cache.insert(key, value_2))
self.assertEqual(cache.get(key), value_1)
@parametrize("cache_type", TestMixin.cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_get_concurrent(
self: Self,
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Ensures that concurrent reads (get) from the cache return the correct values
# for all inserted keys, even under parallel access.
if not self.cache_type_supports_key_and_value_types(
cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 100
cache: icache.Cache = cache_type()
self.maybe_randomize_base_dir(cache)
keys = self.keys_not_in(cache, lambda: self.key(key_type), iters)
values = self.values_unalike(lambda: self.value(value_type), iters)
for key, value in zip(keys, values):
self.assertIsNone(cache.get(key))
self.assertTrue(cache.insert(key, value))
gets = executor.map(cache.get, keys)
for value, get in zip(values, gets):
self.assertEqual(get, value)
executor.shutdown()
@parametrize("cache_type", TestMixin.cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
def test_insert_concurrent(
self: Self,
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
) -> None:
# Ensures that concurrent inserts work as expected: only the first insert for each key
# succeeds, and the cache contains the correct value for each key after all inserts.
if not self.cache_type_supports_key_and_value_types(
cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 50
cache: icache.Cache = cache_type()
self.maybe_randomize_base_dir(cache)
keys = self.keys_not_in(cache, lambda: self.key(key_type), iters) * 2
values = self.values_unalike(lambda: self.value(value_type), iters * 2)
for key in keys:
self.assertIsNone(cache.get(key))
inserts = executor.map(cache.insert, keys, values)
inserted = {}
for key, value, insert in zip(keys, values, inserts):
if insert:
self.assertEqual(cache.get(key), value)
self.assertTrue(key not in inserted)
inserted[key] = value
self.assertTrue(set(keys) == set(inserted.keys()))
for key, value in inserted.items():
self.assertEqual(cache.get(key), value)
executor.shutdown()
@parametrize("cache_type", TestMixin.cache_types())
@parametrize("key_type", TestMixin.key_types())
@parametrize("value_type", TestMixin.value_types())
@parametrize("get_first", [True, False])
def test_combo_concurrent(
self: Self,
cache_type: type[icache.Cache],
key_type: type[icache.Key],
value_type: type[icache.Value],
get_first: bool,
) -> None:
# Tests a mix of concurrent get and insert operations, with the order of operations
# varied by the get_first parameter, to ensure correctness under interleaved access.
if not self.cache_type_supports_key_and_value_types(
cache_type, key_type, value_type
):
return
executor, iters = ThreadPoolExecutor(), 50
cache: icache.Cache = cache_type()
self.maybe_randomize_base_dir(cache)
keys = self.keys_not_in(cache, lambda: self.key(key_type), iters) * 2
values = self.values_unalike(lambda: self.value(value_type), iters * 2)
for key in keys:
self.assertIsNone(cache.get(key))
get_futures, insert_futures = [], []
for key, value in zip(keys, values):
if get_first:
get_futures.append(executor.submit(cache.get, key))
insert_futures.append(executor.submit(cache.insert, key, value))
else:
insert_futures.append(executor.submit(cache.insert, key, value))
get_futures.append(executor.submit(cache.get, key))
inserted = {}
for key, value, get_future, insert_future in zip(
keys, values, get_futures, insert_futures
):
if (get := get_future.result()) is not None:
if insert_future.result():
self.assertEqual(get, value)
self.assertTrue(key not in inserted)
inserted[key] = value
else:
if insert_future.result():
self.assertTrue(key not in inserted)
inserted[key] = value
self.assertTrue(set(keys) == set(inserted.keys()))
for key, value in inserted.items():
self.assertEqual(cache.get(key), value)
executor.shutdown()
@instantiate_parametrized_tests
| CacheTest |
python | walkccc__LeetCode | solutions/1901. Find a Peak Element II/1901.py | {
"start": 0,
"end": 277
} | class ____:
def findPeakGrid(self, mat: list[list[int]]) -> list[int]:
l = 0
r = len(mat) - 1
while l < r:
m = (l + r) // 2
if max(mat[m]) >= max(mat[m + 1]):
r = m
else:
l = m + 1
return [l, mat[l].index(max(mat[l]))]
| Solution |
python | pyqtgraph__pyqtgraph | pyqtgraph/GraphicsScene/exportDialog.py | {
"start": 467,
"end": 5415
} | class ____(QtWidgets.QWidget):
def __init__(self, scene):
QtWidgets.QWidget.__init__(self)
self.setVisible(False)
self.setWindowTitle("Export")
self.shown = False
self.currentExporter = None
self.scene = scene
self.selectBox = QtWidgets.QGraphicsRectItem()
self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.PenStyle.DashLine))
self.selectBox.hide()
self.scene.addItem(self.selectBox)
self.ui = ui_template.Ui_Form()
self.ui.setupUi(self)
self.ui.closeBtn.clicked.connect(self.close)
self.ui.exportBtn.clicked.connect(self.exportClicked)
self.ui.copyBtn.clicked.connect(self.copyClicked)
self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)
self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)
def show(self, item=None):
if item is not None:
## Select next exportable parent of the item originally clicked on
while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:
item = item.parentItem()
## if this is a ViewBox inside a PlotItem, select the parent instead.
if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):
item = item.parentItem()
self.updateItemList(select=item)
self.setVisible(True)
self.activateWindow()
self.raise_()
self.selectBox.setVisible(True)
if not self.shown:
self.shown = True
center = self.screen().availableGeometry().center()
frame = self.frameGeometry()
frame.moveCenter(center)
self.move(frame.topLeft())
def updateItemList(self, select=None):
self.ui.itemTree.clear()
si = QtWidgets.QTreeWidgetItem(["Entire Scene"])
si.gitem = self.scene
self.ui.itemTree.addTopLevelItem(si)
self.ui.itemTree.setCurrentItem(si)
si.setExpanded(True)
for child in self.scene.items():
if child.parentItem() is None:
self.updateItemTree(child, si, select=select)
def updateItemTree(self, item, treeItem, select=None):
si = None
if isinstance(item, ViewBox):
si = QtWidgets.QTreeWidgetItem(['ViewBox'])
elif isinstance(item, PlotItem):
si = QtWidgets.QTreeWidgetItem(['Plot'])
if si is not None:
si.gitem = item
treeItem.addChild(si)
treeItem = si
if si.gitem is select:
self.ui.itemTree.setCurrentItem(si)
for ch in item.childItems():
self.updateItemTree(ch, treeItem, select=select)
@QtCore.Slot(QtWidgets.QTreeWidgetItem, QtWidgets.QTreeWidgetItem)
def exportItemChanged(self, item, prev):
if item is None:
return
if item.gitem is self.scene:
newBounds = self.scene.views()[0].viewRect()
else:
newBounds = item.gitem.sceneBoundingRect()
self.selectBox.setRect(newBounds)
self.selectBox.show()
self.updateFormatList()
def updateFormatList(self):
current = self.ui.formatList.currentItem()
self.ui.formatList.clear()
gotCurrent = False
for exp in exporters.listExporters():
item = FormatExportListWidgetItem(exp, QtCore.QCoreApplication.translate('Exporter', exp.Name))
self.ui.formatList.addItem(item)
if item is current:
self.ui.formatList.setCurrentRow(self.ui.formatList.count() - 1)
gotCurrent = True
if not gotCurrent:
self.ui.formatList.setCurrentRow(0)
@QtCore.Slot(QtWidgets.QListWidgetItem, QtWidgets.QListWidgetItem)
def exportFormatChanged(self, item, prev):
if item is None:
self.currentExporter = None
self.ui.paramTree.clear()
return
expClass = item.expClass
exp = expClass(item=self.ui.itemTree.currentItem().gitem)
params = exp.parameters()
if params is None:
self.ui.paramTree.clear()
else:
self.ui.paramTree.setParameters(params)
self.currentExporter = exp
self.ui.copyBtn.setEnabled(exp.allowCopy)
@QtCore.Slot()
def exportClicked(self):
self.selectBox.hide()
self.currentExporter.export()
@QtCore.Slot()
def copyClicked(self):
self.selectBox.hide()
self.currentExporter.export(copy=True)
def close(self):
self.selectBox.setVisible(False)
self.setVisible(False)
def closeEvent(self, event):
self.close()
super().closeEvent(event)
| ExportDialog |
python | pandas-dev__pandas | pandas/errors/__init__.py | {
"start": 10949,
"end": 11736
} | class ____(ValueError):
"""
Exception raised in ``pd.read_csv`` when empty data or header is encountered.
This error is typically encountered when attempting to read an empty file or
an invalid file where no data or headers are present.
See Also
--------
read_csv : Read a comma-separated values (CSV) file into DataFrame.
errors.ParserError : Exception that is raised by an error encountered in parsing
file contents.
errors.DtypeWarning : Warning raised when reading different dtypes in a column
from a file.
Examples
--------
>>> from io import StringIO
>>> empty = StringIO()
>>> pd.read_csv(empty)
Traceback (most recent call last):
EmptyDataError: No columns to parse from file
"""
| EmptyDataError |
python | walkccc__LeetCode | solutions/1997. First Day Where You Have Been in All the Rooms/1997.py | {
"start": 0,
"end": 788
} | class ____:
def firstDayBeenInAllRooms(self, nextVisit: list[int]) -> int:
MOD = 1_000_000_007
n = len(nextVisit)
# dp[i] := the number of days to visit room i for the first time
dp = [0] * n
# Whenever we visit i, visit times of room[0..i - 1] are all even.
# Therefore, the rooms before i can be seen as reset and we can safely
# reuse dp[0..i - 1] as first-time visit to get second-time visit.
for i in range(1, n):
# The total days to visit room[i] is the sum of
# * dp[i - 1]: 1st-time visit room[i - 1]
# * 1: visit room[nextVisit[i - 1]]
# * dp[i - 1] - dp[nextVisit[i - 1]]: 2-time visit room[i - 1]
# * 1: visit room[i]
dp[i] = (2 * dp[i - 1] - dp[nextVisit[i - 1]] + 2) % MOD
return dp[-1]
| Solution |
python | h5py__h5py | h5py/tests/test_h5t.py | {
"start": 1703,
"end": 6595
} | class ____(TestCase):
"""Test TypeFloatID."""
def test_custom_float_promotion(self):
"""Custom floats are correctly promoted to standard floats on read."""
# This test uses the low-level API, so we need names as byte strings
test_filename = self.mktemp().encode()
dataset = b'DS1'
dataset2 = b'DS2'
dataset3 = b'DS3'
dataset4 = b'DS4'
dataset5 = b'DS5'
dims = (4, 7)
wdata = np.array([[-1.50066626e-09, 1.40062184e-09, 1.81216819e-10,
4.01087163e-10, 4.27917257e-10, -7.04858394e-11,
5.74800652e-10],
[-1.50066626e-09, 4.86579665e-10, 3.42879503e-10,
5.12045517e-10, 5.10226528e-10, 2.24190444e-10,
3.93356459e-10],
[-1.50066626e-09, 5.24778443e-10, 8.19454726e-10,
1.28966349e-09, 1.68483894e-10, 5.71276360e-11,
-1.08684617e-10],
[-1.50066626e-09, -1.08343556e-10, -1.58934199e-10,
8.52196536e-10, 6.18456397e-10, 6.16637408e-10,
1.31694833e-09]], dtype=np.float32)
wdata2 = np.array([[-1.50066626e-09, 5.63886715e-10, -8.74251782e-11,
1.32558853e-10, 1.59161573e-10, 2.29420039e-10,
-7.24185156e-11],
[-1.50066626e-09, 1.87810656e-10, 7.74889486e-10,
3.95630195e-10, 9.42236511e-10, 8.38554115e-10,
-8.71978045e-11],
[-1.50066626e-09, 6.20275387e-10, 7.34871719e-10,
6.64840627e-10, 2.64662958e-10, 1.05319486e-09,
1.68256520e-10],
[-1.50066626e-09, 1.67347025e-10, 5.12045517e-10,
3.36513040e-10, 1.02545528e-10, 1.28784450e-09,
4.06089384e-10]], dtype=np.float32)
# Create a new file using the default properties.
fid = h5py.h5f.create(test_filename)
# Create the dataspace. No maximum size parameter needed.
space = h5py.h5s.create_simple(dims)
# create a custom type with larger bias
mytype = h5t.IEEE_F16LE.copy()
mytype.set_fields(14, 9, 5, 0, 9)
mytype.set_size(2)
mytype.set_ebias(53)
mytype.lock()
dset = h5py.h5d.create(fid, dataset, mytype, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata)
del dset
# create a custom type with larger exponent
mytype2 = h5t.IEEE_F16LE.copy()
mytype2.set_fields(15, 9, 6, 0, 9)
mytype2.set_size(2)
mytype2.set_ebias(53)
mytype2.lock()
dset = h5py.h5d.create(fid, dataset2, mytype2, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)
del dset
# create a custom type which reimplements 16-bit floats
mytype3 = h5t.IEEE_F16LE.copy()
mytype3.set_fields(15, 10, 5, 0, 10)
mytype3.set_size(2)
mytype3.set_ebias(15)
mytype3.lock()
dset = h5py.h5d.create(fid, dataset3, mytype3, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)
del dset
# create a custom type with larger bias
mytype4 = h5t.IEEE_F16LE.copy()
mytype4.set_fields(15, 10, 5, 0, 10)
mytype4.set_size(2)
mytype4.set_ebias(258)
mytype4.lock()
dset = h5py.h5d.create(fid, dataset4, mytype4, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)
del dset
# create a dataset with long doubles
dset = h5py.h5d.create(fid, dataset5, h5t.NATIVE_LDOUBLE, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata2)
# Explicitly close and release resources.
del space
del dset
del fid
f = h5py.File(test_filename, 'r')
# ebias promotion to float32
values = f[dataset][:]
np.testing.assert_array_equal(values, wdata)
self.assertEqual(values.dtype, np.dtype('<f4'))
# esize promotion to float32
values = f[dataset2][:]
np.testing.assert_array_equal(values, wdata2)
self.assertEqual(values.dtype, np.dtype('<f4'))
# regular half floats
dset = f[dataset3]
try:
self.assertEqual(dset.dtype, np.dtype('<f2'))
except AttributeError:
self.assertEqual(dset.dtype, np.dtype('<f4'))
# ebias promotion to float64
dset = f[dataset4]
self.assertEqual(dset.dtype, np.dtype('<f8'))
# long double floats
dset = f[dataset5]
self.assertEqual(dset.dtype, np.longdouble)
| TestTypeFloatID |
python | doocs__leetcode | solution/2100-2199/2160.Minimum Sum of Four Digit Number After Splitting Digits/Solution.py | {
"start": 0,
"end": 233
} | class ____:
def minimumSum(self, num: int) -> int:
nums = []
while num:
nums.append(num % 10)
num //= 10
nums.sort()
return 10 * (nums[0] + nums[1]) + nums[2] + nums[3]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 60486,
"end": 63070
} | class ____(test_lib.TestCase):
def test1DTensor(self):
x = array_ops.ones([3, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpyWithGolden(self):
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.array([[[3], [6], [5]], [[1], [0], [1]]], dtype=dtype)
ksize = 2
strides = 1
y = nn_ops.avg_pool1d(x, ksize, strides, "SAME")
expected_y = np.array([[[4.5], [5.5], [5.0]], [[0.5], [0.5], [1.0]]],
dtype=dtype)
self.assertAllEqual(self.evaluate(y), expected_y)
def test2DTensor(self):
x = array_ops.ones([3, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DTensor(self):
x = array_ops.ones([3, 7, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DNumpy(self):
x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
ksize = 2
strides = 2
y1 = nn_ops.avg_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.avg_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
@test_util.run_all_in_graph_and_eager_modes
| AvgPoolTest |
python | PyCQA__pylint | tests/functional/u/unexpected_special_method_signature.py | {
"start": 3317,
"end": 3477
} | class ____:
def __init_subclass__(cls, default_name, **kwargs):
super().__init_subclass__(**kwargs)
cls.default_name = default_name
| Philosopher |
python | facebook__pyre-check | client/coverage_data.py | {
"start": 13347,
"end": 13515
} | class ____(json_mixins.SnakeCaseAndExcludeJsonMixin):
kind: SuppressionKind
location: Location
error_codes: Optional[Sequence[ErrorCode]]
| TypeErrorSuppression |
python | graphql-python__graphene | graphene/types/tests/test_union.py | {
"start": 203,
"end": 1453
} | class ____(ObjectType):
pass
def test_generate_union():
class MyUnion(Union):
"""Documentation"""
class Meta:
types = (MyObjectType1, MyObjectType2)
assert MyUnion._meta.name == "MyUnion"
assert MyUnion._meta.description == "Documentation"
assert MyUnion._meta.types == (MyObjectType1, MyObjectType2)
def test_generate_union_with_meta():
class MyUnion(Union):
class Meta:
name = "MyOtherUnion"
description = "Documentation"
types = (MyObjectType1, MyObjectType2)
assert MyUnion._meta.name == "MyOtherUnion"
assert MyUnion._meta.description == "Documentation"
def test_generate_union_with_no_types():
with raises(Exception) as exc_info:
class MyUnion(Union):
pass
assert str(exc_info.value) == "Must provide types for Union MyUnion."
def test_union_can_be_mounted():
class MyUnion(Union):
class Meta:
types = (MyObjectType1, MyObjectType2)
my_union_instance = MyUnion()
assert isinstance(my_union_instance, UnmountedType)
my_union_field = my_union_instance.mount_as(Field)
assert isinstance(my_union_field, Field)
assert my_union_field.type == MyUnion
| MyObjectType2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 286195,
"end": 286792
} | class ____(sgqlc.types.Input):
"""Choose which environments must be successfully deployed to before
branches can be merged into a branch that matches this rule.
"""
__schema__ = github_schema
__field_names__ = ("required_deployment_environments",)
required_deployment_environments = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="requiredDeploymentEnvironments"
)
"""The environments that must be successfully deployed to before
branches can be merged.
"""
| RequiredDeploymentsParametersInput |
python | tiangolo__fastapi | scripts/sponsors.py | {
"start": 1118,
"end": 1220
} | class ____(BaseModel):
cursor: str
node: SponsorshipAsMaintainerNode
| SponsorshipAsMaintainerEdge |
python | walkccc__LeetCode | solutions/1178. Number of Valid Words for Each Puzzle/1178.py | {
"start": 0,
"end": 666
} | class ____:
def findNumOfValidWords(
self,
words: list[str],
puzzles: list[str],
) -> list[int]:
ans = []
binaryCount = collections.Counter()
for word in words:
mask = 0
for c in word:
mask |= 1 << ord(c) - ord('a')
binaryCount[mask] += 1
for puzzle in puzzles:
valid = 0
n = len(puzzle) - 1
for i in range(1 << n):
mask = 1 << ord(puzzle[0]) - ord('a')
for j in range(n):
if i >> j & 1:
mask |= 1 << ord(puzzle[j + 1]) - ord('a')
if mask in binaryCount:
valid += binaryCount[mask]
ans.append(valid)
return ans
| Solution |
python | pypa__pip | src/pip/_vendor/rich/traceback.py | {
"start": 8278,
"end": 8317
} | class ____:
stacks: List[Stack]
| Trace |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 13741,
"end": 13809
} | class ____(RetargetingMixin, GeoReport):
pass
| RetargetingGeoReport |
python | django__django | tests/model_forms/tests.py | {
"start": 83981,
"end": 89043
} | class ____(TestCase):
def test_modelform_onetoonefield(self):
class ImprovedArticleForm(forms.ModelForm):
class Meta:
model = ImprovedArticle
fields = "__all__"
class ImprovedArticleWithParentLinkForm(forms.ModelForm):
class Meta:
model = ImprovedArticleWithParentLink
fields = "__all__"
self.assertEqual(list(ImprovedArticleForm.base_fields), ["article"])
self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), [])
def test_modelform_subclassed_model(self):
class BetterWriterForm(forms.ModelForm):
class Meta:
# BetterWriter model is a subclass of Writer with an additional
# `score` field.
model = BetterWriter
fields = "__all__"
bw = BetterWriter.objects.create(name="Joe Better", score=10)
self.assertEqual(
sorted(model_to_dict(bw)), ["id", "name", "score", "writer_ptr"]
)
self.assertEqual(sorted(model_to_dict(bw, fields=[])), [])
self.assertEqual(
sorted(model_to_dict(bw, fields=["id", "name"])), ["id", "name"]
)
self.assertEqual(
sorted(model_to_dict(bw, exclude=[])), ["id", "name", "score", "writer_ptr"]
)
self.assertEqual(
sorted(model_to_dict(bw, exclude=["id", "name"])), ["score", "writer_ptr"]
)
form = BetterWriterForm({"name": "Some Name", "score": 12})
self.assertTrue(form.is_valid())
bw2 = form.save()
self.assertEqual(bw2.score, 12)
def test_onetoonefield(self):
class WriterProfileForm(forms.ModelForm):
class Meta:
# WriterProfile has a OneToOneField to Writer
model = WriterProfile
fields = "__all__"
self.w_royko = Writer.objects.create(name="Mike Royko")
self.w_woodward = Writer.objects.create(name="Bob Woodward")
form = WriterProfileForm()
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_writer">Writer:</label>
<select name="writer" id="id_writer" required>
<option value="" selected>---------</option>
<option value="%s">Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" id="id_age" min="0" required></p>
"""
% (
self.w_woodward.pk,
self.w_royko.pk,
),
)
data = {
"writer": str(self.w_woodward.pk),
"age": "65",
}
form = WriterProfileForm(data)
instance = form.save()
self.assertEqual(str(instance), "Bob Woodward is 65")
form = WriterProfileForm(instance=instance)
self.assertHTMLEqual(
form.as_p(),
"""
<p><label for="id_writer">Writer:</label>
<select name="writer" id="id_writer" required>
<option value="">---------</option>
<option value="%s" selected>Bob Woodward</option>
<option value="%s">Mike Royko</option>
</select></p>
<p><label for="id_age">Age:</label>
<input type="number" name="age" value="65" id="id_age" min="0" required>
</p>"""
% (
self.w_woodward.pk,
self.w_royko.pk,
),
)
def test_assignment_of_none(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author
fields = ["publication", "full_name"]
publication = Publication.objects.create(
title="Pravda", date_published=datetime.date(1991, 8, 22)
)
author = Author.objects.create(publication=publication, full_name="John Doe")
form = AuthorForm({"publication": "", "full_name": "John Doe"}, instance=author)
self.assertTrue(form.is_valid())
self.assertIsNone(form.cleaned_data["publication"])
author = form.save()
# author object returned from form still retains original publication
# object that's why we need to retrieve it from database again
new_author = Author.objects.get(pk=author.pk)
self.assertIsNone(new_author.publication)
def test_assignment_of_none_null_false(self):
class AuthorForm(forms.ModelForm):
class Meta:
model = Author1
fields = ["publication", "full_name"]
publication = Publication.objects.create(
title="Pravda", date_published=datetime.date(1991, 8, 22)
)
author = Author1.objects.create(publication=publication, full_name="John Doe")
form = AuthorForm({"publication": "", "full_name": "John Doe"}, instance=author)
self.assertFalse(form.is_valid())
| ModelOneToOneFieldTests |
python | pytorch__pytorch | test/test_ops_gradients.py | {
"start": 679,
"end": 4251
} | class ____(TestGradients):
# Tests that gradients are computed correctly
@_gradcheck_ops(op_db + hop_db + custom_op_db)
def test_fn_grad(self, device, dtype, op):
# This is verified by test_dtypes in test_ops.py
if dtype not in op.supported_backward_dtypes(torch.device(device).type):
self.skipTest("Skipped! Dtype is not in supported backward dtypes!")
else:
self._grad_test_helper(device, dtype, op, op.get_op())
# Method grad (and gradgrad, see below) tests are disabled since they're
# costly and redundant with function grad (and gradgad) tests
# @_gradcheck_ops(op_db)
# def test_method_grad(self, device, dtype, op):
# self._skip_helper(op, device, dtype)
# self._grad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db + custom_op_db)
def test_inplace_grad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.inplace_variant:
self.skipTest("Op has no inplace variant!")
# Verifies an operation doesn't support inplace autograd if it claims not to
if not op.supports_inplace_autograd:
inplace = self._get_safe_inplace(op.get_inplace())
for sample in op.sample_inputs(device, dtype, requires_grad=True):
if sample.broadcasts_input:
continue
with self.assertRaises(Exception):
result = inplace(sample)
result.sum().backward()
else:
self._grad_test_helper(
device, dtype, op, self._get_safe_inplace(op.get_inplace())
)
# Test that gradients of gradients are computed correctly
@_gradcheck_ops(op_db + hop_db + custom_op_db)
def test_fn_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.supports_gradgrad:
self.skipTest(
"Op claims it doesn't support gradgrad. This is not verified."
)
else:
self._check_helper(device, dtype, op, op.get_op(), "bwgrad_bwgrad")
# Test that gradients of gradients are properly raising
@_gradcheck_ops(op_db + custom_op_db)
def test_fn_fail_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if op.supports_gradgrad:
self.skipTest("Skipped! Operation does support gradgrad")
err_msg = r"derivative for .* is not implemented"
with self.assertRaisesRegex(RuntimeError, err_msg):
self._check_helper(device, dtype, op, op.get_op(), "bwgrad_bwgrad")
# Method gradgrad (and grad, see above) tests are disabled since they're
# costly and redundant with function gradgrad (and grad) tests
# @_gradcheck_ops(op_db)
# def test_method_gradgrad(self, device, dtype, op):
# self._skip_helper(op, device, dtype)
# self._gradgrad_test_helper(device, dtype, op, op.get_method())
@_gradcheck_ops(op_db)
def test_inplace_gradgrad(self, device, dtype, op):
self._skip_helper(op, device, dtype)
if not op.inplace_variant or not op.supports_inplace_autograd:
self.skipTest("Skipped! Operation does not support inplace autograd.")
self._check_helper(
device, dtype, op, self._get_safe_inplace(op.get_inplace()), "bwgrad_bwgrad"
)
instantiate_device_type_tests(TestBwdGradients, globals())
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
| TestBwdGradients |
python | huggingface__transformers | tests/models/superpoint/test_modeling_superpoint.py | {
"start": 1280,
"end": 4095
} | class ____:
def __init__(
self,
parent,
batch_size=3,
image_width=80,
image_height=60,
encoder_hidden_sizes: list[int] = [32, 32, 64, 64],
decoder_hidden_size: int = 128,
keypoint_decoder_dim: int = 65,
descriptor_decoder_dim: int = 128,
keypoint_threshold: float = 0.005,
max_keypoints: int = -1,
nms_radius: int = 4,
border_removal_distance: int = 4,
):
self.parent = parent
self.batch_size = batch_size
self.image_width = image_width
self.image_height = image_height
self.encoder_hidden_sizes = encoder_hidden_sizes
self.decoder_hidden_size = decoder_hidden_size
self.keypoint_decoder_dim = keypoint_decoder_dim
self.descriptor_decoder_dim = descriptor_decoder_dim
self.keypoint_threshold = keypoint_threshold
self.max_keypoints = max_keypoints
self.nms_radius = nms_radius
self.border_removal_distance = border_removal_distance
def prepare_config_and_inputs(self):
# SuperPoint expects a grayscale image as input
pixel_values = floats_tensor([self.batch_size, 3, self.image_height, self.image_width])
config = self.get_config()
return config, pixel_values
def get_config(self):
return SuperPointConfig(
encoder_hidden_sizes=self.encoder_hidden_sizes,
decoder_hidden_size=self.decoder_hidden_size,
keypoint_decoder_dim=self.keypoint_decoder_dim,
descriptor_decoder_dim=self.descriptor_decoder_dim,
keypoint_threshold=self.keypoint_threshold,
max_keypoints=self.max_keypoints,
nms_radius=self.nms_radius,
border_removal_distance=self.border_removal_distance,
)
def create_and_check_keypoint_detection(self, config, pixel_values):
model = SuperPointForKeypointDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.keypoints.shape[0], self.batch_size)
self.parent.assertEqual(result.keypoints.shape[-1], 2)
result = model(pixel_values, output_hidden_states=True)
self.parent.assertEqual(
result.hidden_states[-1].shape,
(
self.batch_size,
self.encoder_hidden_sizes[-1],
self.image_height // 8,
self.image_width // 8,
),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| SuperPointModelTester |
python | huggingface__transformers | tests/models/lfm2_vl/test_image_processing_lfm2_vl.py | {
"start": 3479,
"end": 11566
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
test_slow_image_processor = False
fast_image_processing_class = Lfm2VlImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = Lfm2VlImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "downsample_factor"))
self.assertTrue(hasattr(image_processing, "min_tiles"))
self.assertTrue(hasattr(image_processing, "max_tiles"))
self.assertTrue(hasattr(image_processing, "use_thumbnail"))
self.assertTrue(hasattr(image_processing, "min_image_tokens"))
self.assertTrue(hasattr(image_processing, "max_image_tokens"))
self.assertTrue(hasattr(image_processing, "encoder_patch_size"))
self.assertTrue(hasattr(image_processing, "tile_size"))
self.assertTrue(hasattr(image_processing, "max_pixels_tolerance"))
@require_vision
def test_smart_resize(self):
# verify that smart resize output dims are divisible by encoder_patch_size * downsample_factor
image_processing = self.fast_image_processing_class(**self.image_processor_dict)
width, height = image_processing.smart_resize(
height=500,
width=300,
downsample_factor=image_processing.downsample_factor,
min_image_tokens=image_processing.min_image_tokens,
max_image_tokens=image_processing.max_image_tokens,
encoder_patch_size=image_processing.encoder_patch_size,
)
mod = image_processing.encoder_patch_size * image_processing.downsample_factor
self.assertEqual(width % mod, 0)
self.assertEqual(height % mod, 0)
@require_vision
def test_get_grid_layout(self):
# splitting a 512×512 image into tiles of size processor.image_processor.tile_size
image_processing = self.fast_image_processing_class(**self.image_processor_dict)
rows, cols, _, _, num_patches = image_processing._get_grid_layout(
height=1024,
width=1024,
min_tiles=image_processing.min_tiles,
max_tiles=image_processing.max_tiles,
tile_size=image_processing.tile_size,
)
self.assertEqual(num_patches, 4)
self.assertEqual(num_patches, rows * cols)
rows, cols, _, _, num_patches = image_processing._get_grid_layout(
height=1024,
width=1024,
min_tiles=8,
max_tiles=8,
tile_size=image_processing.tile_size,
)
self.assertEqual(num_patches, 8)
self.assertEqual(num_patches, rows * cols)
def test_find_closest_aspect_ratio(self):
# should pick (1,1) over (2,1) for a square image
result = find_closest_aspect_ratio(1.0, [(1, 1), (2, 1)], width=100, height=100, image_size=100)
self.assertEqual(result, (1, 1))
result = find_closest_aspect_ratio(0.5, [(1, 1), (1, 2)], width=100, height=200, image_size=200)
self.assertEqual(result, (1, 2))
def test_call_numpy(self):
# Initialize image_processing
image_processing = self.fast_image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(
self.image_processor_tester.batch_size,
image_processing.max_num_patches,
3 * image_processing.encoder_patch_size**2,
),
)
def test_call_numpy_4_channels(self):
# Lfm2Vl always processes images as RGB, so it always returns images with 3 channels
# Initialize image_processing
image_processor_dict = self.image_processor_dict
image_processing = self.fast_image_processing_class(**image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for sample_images in image_inputs:
for image in sample_images:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(
self.image_processor_tester.batch_size,
image_processing.max_num_patches,
3 * image_processing.encoder_patch_size**2,
),
)
def test_call_pil(self):
# Initialize image_processing
image_processing = self.fast_image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(
self.image_processor_tester.batch_size,
image_processing.max_num_patches,
3 * image_processing.encoder_patch_size**2,
),
)
def test_call_pytorch(self):
# Initialize image_processing
image_processing = self.fast_image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for images in image_inputs:
for image in images:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(1, image_processing.max_num_patches, 3 * image_processing.encoder_patch_size**2),
)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
self.assertEqual(
tuple(encoded_images.shape),
(
self.image_processor_tester.batch_size,
image_processing.max_num_patches,
3 * image_processing.encoder_patch_size**2,
),
)
| Lfm2VlImageProcessingTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/reflection.py | {
"start": 864,
"end": 1317
} | class ____:
"""Stores raw information about a SHOW CREATE TABLE statement."""
charset: Optional[str]
def __init__(self) -> None:
self.columns: list[ReflectedColumn] = []
self.table_options: dict[str, str] = {}
self.table_name: Optional[str] = None
self.keys: list[dict[str, Any]] = []
self.fk_constraints: list[dict[str, Any]] = []
self.ck_constraints: list[dict[str, Any]] = []
| ReflectedState |
python | scipy__scipy | scipy/linalg/tests/test_decomp_update.py | {
"start": 66231,
"end": 66294
} | class ____(BaseQRupdate):
dtype = np.dtype('f')
| TestQRupdate_f |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 17620,
"end": 17968
} | class ____(InputWidget):
''' Color picker widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
color = ColorHex(default='#000000', help="""
The initial color of the picked color (named or hexadecimal)
""")
| ColorPicker |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 55532,
"end": 55710
} | class ____(CaptureStd):
"""Same as CaptureStd but captures only stderr"""
def __init__(self, replay=True):
super().__init__(out=False, replay=replay)
| CaptureStderr |
python | conda__conda | conda/gateways/repodata/jlap/fetch.py | {
"start": 1353,
"end": 5502
} | class ____(LookupError):
pass
def process_jlap_response(response: Response, pos=0, iv=b""):
# if response is 304 Not Modified, could return a buffer with only the
# cached footer...
if response.status_code == 304:
raise Jlap304NotModified()
def lines() -> Iterator[bytes]:
yield from response.iter_lines(delimiter=b"\n") # type: ignore
buffer = JLAP.from_lines(lines(), iv, pos)
# new iv == initial iv if nothing changed
pos, footer, _ = buffer[-2]
footer = json.loads(footer)
new_state = {
# we need to save etag, last-modified, cache-control
"headers": {
k.lower(): v
for k, v in response.headers.items()
if k.lower() in STORE_HEADERS
},
"iv": buffer[-3][-1],
"pos": pos,
"footer": footer,
}
return buffer, new_state
def fetch_jlap(url, pos=0, etag=None, iv=b"", ignore_etag=True, session=None):
response = request_jlap(
url, pos=pos, etag=etag, ignore_etag=ignore_etag, session=session
)
return process_jlap_response(response, pos=pos, iv=iv)
def request_jlap(
url, pos=0, etag=None, ignore_etag=True, session: Session | None = None
):
"""Return the part of the remote .jlap file we are interested in."""
headers = {}
if pos:
headers["range"] = f"bytes={pos}-"
if etag and not ignore_etag:
headers["if-none-match"] = etag
log.debug("%s %s", mask_anaconda_token(url), headers)
if session is None:
raise RuntimeError("session cannot be None")
timeout = context.remote_connect_timeout_secs, context.remote_read_timeout_secs
response = session.get(url, stream=True, headers=headers, timeout=timeout)
response.raise_for_status()
if response.request:
log.debug("request headers: %s", pprint.pformat(response.request.headers))
else:
log.debug("response without request.")
log.debug(
"response headers: %s",
pprint.pformat(
{k: v for k, v in response.headers.items() if k.lower() in STORE_HEADERS}
),
)
log.debug("status: %d", response.status_code)
if "range" in headers:
# 200 is also a possibility that we'd rather not deal with; if the
# server can't do range requests, also mark jlap as unavailable. Which
# status codes mean 'try again' instead of 'it will never work'?
if response.status_code not in (206, 304, 404, 416):
raise HTTPError(
f"Unexpected response code for range request {response.status_code}",
response=response,
)
log.info("%s", response)
return response
def format_hash(hash):
"""Abbreviate hash for formatting."""
return hash[:16] + "\N{HORIZONTAL ELLIPSIS}"
def find_patches(patches, have, want):
apply = []
for patch in reversed(patches):
if have == want:
break
if patch["to"] == want:
apply.append(patch)
want = patch["from"]
if have != want:
log.debug(f"No patch from local revision {format_hash(have)}")
raise JlapPatchNotFound(f"No patch from local revision {format_hash(have)}")
return apply
def apply_patches(data, apply):
while apply:
patch = apply.pop()
log.debug(
f"{format_hash(patch['from'])} \N{RIGHTWARDS ARROW} {format_hash(patch['to'])}, "
f"{len(patch['patch'])} steps"
)
data = jsonpatch.JsonPatch(patch["patch"]).apply(data, in_place=True)
def withext(url, ext):
return re.sub(r"(\.\w+)$", ext, url)
@contextmanager
def timeme(message):
begin = time.monotonic()
yield
end = time.monotonic()
log.debug("%sTook %0.02fs", message, end - begin)
def build_headers(json_path: pathlib.Path, state: RepodataState):
"""Caching headers for a path and state."""
headers = {}
# simplify if we require state to be empty when json_path is missing.
if json_path.exists():
etag = state.get("_etag")
if etag:
headers["if-none-match"] = etag
return headers
| JlapPatchNotFound |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 7092,
"end": 7649
} | class ____(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
| Zenithal |
python | tensorflow__tensorflow | tensorflow/examples/custom_ops_doc/multiplex_1/multiplex_1_test.py | {
"start": 1133,
"end": 5151
} | class ____(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_multiplex_int(self):
a = tf.constant([1, 2, 3, 4, 5])
b = tf.constant([10, 20, 30, 40, 50])
cond = tf.constant([True, False, True, False, True], dtype=bool)
expect = np.where(self.evaluate(cond), self.evaluate(a), self.evaluate(b))
# expected result is [1, 20, 3, 40, 5]
result = multiplex_1_op.multiplex(cond, a, b)
self.assertAllEqual(result, expect)
@test_util.run_in_graph_and_eager_modes
def test_multiplex_float(self):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0])
b = tf.constant([10.0, 20.0, 30.0, 40.0, 50.0])
cond = tf.constant([True, False, True, False, True], dtype=bool)
# expected result is [1.0, 20.0, 3.0, 40.0, 5.0]
expect = np.where(self.evaluate(cond), self.evaluate(a), self.evaluate(b))
result = multiplex_1_op.multiplex(cond, a, b)
self.assertAllEqual(result, expect)
@test_util.run_in_graph_and_eager_modes
def test_multiplex_bad_types(self):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0]) # float
b = tf.constant([10, 20, 30, 40, 50]) # int32
cond = tf.constant([True, False, True, False, True], dtype=bool)
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, TypeError),
# Eager mode raises InvalidArgumentError with the following message
r'(cannot compute Examples1>MultiplexDense as input #2\(zero-based\) '
r'was expected to be a float tensor but is a int32 tensor '
r'\[Op:Examples1>MultiplexDense\]'
r')|('
# Graph mode raises TypeError with the following message
r"Input 'b_values' of 'Examples1>MultiplexDense' Op has type int32 that "
r"does not match type float32 of argument 'a_values'.)"):
self.evaluate(multiplex_1_op.multiplex(cond, a, b))
@test_util.run_in_graph_and_eager_modes
def test_multiplex_bad_size(self):
a = tf.constant([1, 2, 3, 4, 5]) # longer than b
b = tf.constant([10, 20]) # shorter than a
cond = tf.constant([True, False, True, False, True], dtype=bool)
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, ValueError),
# Eager mode raises InvalidArgumentError with the following message
r'(?s)(a_values and b_values must have the same shape. '
r'a_values shape: \[5\] b_values shape: \[2\].* '
r'\[Op:Examples1>MultiplexDense\]'
r')|('
# Graph mode raises ValueError with the following message
r'Dimension 0 in both shapes must be equal, but are 5 and 2\. '
r'Shapes are \[5\] and \[2\]\.)'):
self.evaluate(multiplex_1_op.multiplex(cond, a, b))
@test_util.run_in_graph_and_eager_modes
def test_multiplex_2d(self):
a = tf.constant([[1, 2, 3], [4, 5, 6]])
b = tf.constant([[10, 20, 30], [40, 50, 60]])
cond = tf.constant([[True, False, True], [False, True, False]], dtype=bool)
expect = np.where(self.evaluate(cond), self.evaluate(a), self.evaluate(b))
# expected result is [[1, 20], [3, 40]]
result = multiplex_1_op.multiplex(cond, a, b)
self.assertAllEqual(result, expect)
@test_util.run_in_graph_and_eager_modes
def test_multiplex_bad_shape(self):
a = tf.constant([[1, 2, 3], [4, 5, 6]]) # shape (2,3)
b = tf.constant([[10, 20], [30, 40], [50, 60]]) # shape (3,2)
cond = tf.constant([[True, False, True], [False, True, False]], dtype=bool)
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, ValueError),
# Eager mode raises InvalidArgumentError with the following message
r'(a_values and b_values must have the same shape.'
r' a_values shape: \[2,3\] b_values shape: \[3,2\]'
r')|('
# Graph mode raises ValueError with the following message
r'Dimension 0 in both shapes must be equal, '
r'but are 2 and 3\. Shapes are \[2,3\] and \[3,2\])\.'):
self.evaluate(multiplex_1_op.multiplex(cond, a, b))
if __name__ == '__main__':
tf.test.main()
| MultiplexOpRank1Test |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar10.py | {
"start": 122,
"end": 174
} | class ____:
def method(self, x: "A") -> "A": ...
| A |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 12430,
"end": 12505
} | class ____(TestEnPh):
"""Test fil_PH bank provider"""
pass
| TestFilPh |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 8601,
"end": 8686
} | class ____(HTTPError):
"""The header provided was somehow invalid."""
| InvalidHeader |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 79810,
"end": 80858
} | class ____(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
Origin from https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/RTDetrV2_paddle/ppdet/modeling/transformers/utils.py#L453
"""
def __init__(self, config, input_dim, d_model, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [d_model] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`RTDetrV2ForObjectDetection`].
"""
)
| RTDetrV2MLPPredictionHead |
python | plotly__plotly.py | plotly/express/_special_inputs.py | {
"start": 0,
"end": 590
} | class ____(object):
"""
`dict`-like object which acts as if the value for any key is the key itself. Objects
of this class can be passed in to arguments like `color_discrete_map` to
use the provided data values as colors, rather than mapping them to colors cycled
from `color_discrete_sequence`. This works for any `_map` argument to Plotly Express
functions, such as `line_dash_map` and `symbol_map`.
"""
def __getitem__(self, key):
return key
def __contains__(self, key):
return True
def copy(self):
return self
| IdentityMap |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vision.py | {
"start": 17594,
"end": 21981
} | class ____(GoogleCloudBaseOperator):
"""
Create and return a new product resource.
Possible errors regarding the ``Product`` object provided:
- Returns ``INVALID_ARGUMENT`` if ``display_name`` is missing or longer than 4096 characters.
- Returns ``INVALID_ARGUMENT`` if ``description`` is longer than 4096 characters.
- Returns ``INVALID_ARGUMENT`` if ``product_category`` is missing or invalid.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionCreateProductOperator`
:param location: (Required) The region where the Product should be created. Valid regions
(as of 2019-02-05) are: us-east1, us-west1, europe-west1, asia-east1
:param product: (Required) The product to create. If a dict is provided, it must be of the same form as
the protobuf message `Product`.
:param project_id: (Optional) The project in which the Product should be created. If set to None or
missing, the default project_id from the Google Cloud connection is used.
:param product_id: (Optional) A user-supplied resource id for this Product.
If set, the server will attempt to use this value as the resource id. If it is
already in use, an error is returned with code ALREADY_EXISTS. Must be at most
128 characters long. It cannot contain the character /.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_product_create_template_fields]
template_fields: Sequence[str] = (
"location",
"project_id",
"product_id",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_product_create_template_fields]
def __init__(
self,
*,
location: str,
product: str,
project_id: str = PROVIDE_PROJECT_ID,
product_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.product = product
self.project_id = project_id
self.product_id = product_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.create_product(
location=self.location,
product=self.product,
project_id=self.project_id,
product_id=self.product_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info(
"Product with id %s already exists. Exiting from the create operation.", self.product_id
)
return self.product_id
| CloudVisionCreateProductOperator |
python | pyinstaller__pyinstaller | tests/functional/scripts/pyi_osx_aevent_logger_carbon.py | {
"start": 3780,
"end": 11725
} | class ____:
def __init__(self):
# Get runtime from command-line (first and only positional argument; filter our -psn_*** if it is present).
self.runtime = 15
filtered_args = [arg for arg in sys.argv[1:] if not arg.startswith('-psn')]
if filtered_args:
try:
self.runtime = float(filtered_args[0])
except Exception:
pass
# Track activations
self.activation_count = 0
# Open events log
self.logfile = open(self._get_logfile_path(), 'w')
# Event handlers map
self.ae_handlers = {
'oapp': self.open_app_handler,
'odoc': self.open_document_handler,
'GURL': self.open_url_handler,
'rapp': self.reopen_app_handler,
'actv': self.activate_app_handler,
}
def _get_logfile_path(self):
# Open log file
if getattr(sys, 'frozen', False):
basedir = os.path.dirname(sys.executable)
# Handle .app bundle
if os.path.basename(basedir) == 'MacOS':
basedir = os.path.abspath(os.path.join(basedir, os.pardir, os.pardir, os.pardir))
else:
basedir = os.path.dirname(__file__)
return os.path.join(basedir, 'events.log')
def log_error(self, message):
self.logfile.write(f"ERROR {message}\n")
self.logfile.flush()
def log_event(self, event_id, event_data={}):
self.logfile.write(f"{event_id} {json.dumps(event_data)}\n")
self.logfile.flush()
def main(self):
# Log application start.
self.log_event("started", {'args': sys.argv[1:]})
# Configure AppleEvent handlers.
@ae_callback
def _ae_handler(message, reply, refcon):
event_id = struct.pack(">i", refcon).decode('utf8')
print("Event handler called with event ID: %s" % (event_id,))
try:
handler = self.ae_handlers.get(event_id, None)
assert handler, "No handler available!"
event_data = handler(message, reply, refcon)
self.log_event(f"ae {event_id}", event_data)
except Exception as e:
print("Failed to handle event %s: %s!" % (event_id, e))
self.log_error(f"Failed to handle event '{event_id}': {e}")
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication, _ae_handler, kAEOpenApplication, FALSE)
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments, _ae_handler, kAEOpenDocuments, FALSE)
carbon.AEInstallEventHandler(kAEInternetSuite, kAEGetURL, _ae_handler, kAEGetURL, FALSE)
carbon.AEInstallEventHandler(kCoreEventClass, kAEReOpenApplication, _ae_handler, kAEReOpenApplication, FALSE)
carbon.AEInstallEventHandler(kCoreEventClass, kAEActivate, _ae_handler, kAEActivate, FALSE)
# Run the main loop and process events.
start = time.time()
eventType = EventTypeSpec()
eventType.eventClass = kEventClassAppleEvent
eventType.eventKind = kEventAppleEvent
while time.time() < start + self.runtime:
event = ctypes.c_void_p()
status = carbon.ReceiveNextEvent(
1,
ctypes.byref(eventType),
max(start + self.runtime - time.time(), 0),
TRUE,
ctypes.byref(event),
)
if status == eventLoopTimedOutErr:
break
elif status != 0:
self.log_error(f"Failed to fetch events: {status}!")
break
status = carbon.AEProcessEvent(event)
if status != 0:
self.log_error(f"Failed to process event: {status}!")
break
# Cleanup
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication, _ae_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments, _ae_handler, FALSE)
carbon.AERemoveEventHandler(kAEInternetSuite, kAEGetURL, _ae_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEReOpenApplication, _ae_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEActivate, _ae_handler, FALSE)
# Log application finish.
self.log_event("finished", {'activation_count': self.activation_count})
self.logfile.close()
self.logfile = None
# *** Event handlers ***
def open_app_handler(self, message, reply, refcon):
# Nothing to do here, return empty dict.
self.activation_count += 1
return {}
def reopen_app_handler(self, message, reply, refcon):
# Increment the counter, return empty dict.
self.activation_count += 1
return {}
def activate_app_handler(self, message, reply, refcon):
# Increment the counter, return empty dict.
self.activation_count += 1
return {}
def open_document_handler(self, message, reply, refcon):
# Get descriptor list.
listdesc = AEDesc()
status = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList, ctypes.byref(listdesc))
assert status == 0, f'Could not retrieve descriptor list: {status}!'
# Count items.
item_count = ctypes.c_long()
status = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
assert status == 0, f'Could not count number of items in descriptor list: {status}!'
# Collect data from all descriptors.
desc = AEDesc()
paths = []
for i in range(item_count.value):
# Retrieve descriptor.
status = carbon.AEGetNthDesc(ctypes.byref(listdesc), i + 1, typeFSRef, 0, ctypes.byref(desc))
assert status == 0, f'Could not retrieve descriptor #{i}: {status}!'
# Get data.
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
status = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
assert status == 0, f'Could not retrieve data for descriptor #{i}: {status}!'
# Decode path.
fsref = buf
buf = ctypes.create_string_buffer(4096)
status = carbon.FSRefMakePath(ctypes.byref(fsref), buf, 4095)
assert status == 0, f'Could not convert data for descriptor #{i} to path: {status}!'
# Append to output list.
paths.append(buf.value.decode("utf-8"))
return paths
def open_url_handler(self, message, reply, refcon):
# Get descriptor list.
listdesc = AEDesc()
status = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList, ctypes.byref(listdesc))
assert status == 0, f'Could not retrieve descriptor list: {status}!'
# Count items.
item_count = ctypes.c_long()
status = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
assert status == 0, f'Could not count number of items in descriptor list: {status}!'
# Collect data from all descriptors.
desc = AEDesc()
urls = []
for i in range(item_count.value):
# Retrieve descriptor.
status = carbon.AEGetNthDesc(ctypes.byref(listdesc), i + 1, typeChar, 0, ctypes.byref(desc))
assert status == 0, f'Could not retrieve descriptor #{i}: {status}!'
# Get data.
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
status = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
assert status == 0, f'Could not retrieve data for descriptor #{i}: {status}!'
# Append to output list.
urls.append(buf.value.decode("utf-8"))
return urls
if __name__ == '__main__':
app = Application()
app.main()
| Application |
python | numpy__numpy | numpy/lib/_index_tricks_impl.py | {
"start": 8755,
"end": 10299
} | class ____(nd_grid):
"""
An instance which returns an open multi-dimensional "meshgrid".
An instance which returns an open (i.e. not fleshed out) mesh-grid
when indexed, so that only one dimension of each returned array is
greater than 1. The dimension and number of the output arrays are
equal to the number of indexing dimensions. If the step length is
not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
-------
mesh-grid : ndarray or tuple of ndarrays
If the input is a single slice, returns an array.
If the input is multiple slices, returns a tuple of arrays, with
only one dimension not equal to 1.
See Also
--------
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
meshgrid: return coordinate matrices from coordinate vectors
r_ : array concatenator
:ref:`how-to-partition`
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5, 0:5]
(array([[0],
[1],
[2],
[3],
[4]]),
array([[0, 1, 2, 3, 4]]))
"""
__slots__ = ()
def __init__(self):
super().__init__(sparse=True)
ogrid = OGridClass()
| OGridClass |
python | falconry__falcon | tests/asgi/test_asgi_servers.py | {
"start": 979,
"end": 6830
} | class ____:
def test_get(self, server_base_url, requests):
resp = requests.get(server_base_url, timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 200
assert resp.text == '127.0.0.1'
def test_put(self, server_base_url, requests):
body = '{}'
resp = requests.put(server_base_url, data=body, timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 200
assert resp.text == '{}'
def test_head_405(self, server_base_url, requests):
body = '{}'
resp = requests.head(server_base_url, data=body, timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 405
def test_post_multipart_form(self, server_base_url, requests):
size = random.randint(16 * _SIZE_1_MB, 32 * _SIZE_1_MB)
data = os.urandom(size)
digest = hashlib.sha1(data).hexdigest()
files = {
'random': ('random.dat', data),
'message': ('hello.txt', b'Hello, World!\n'),
}
resp = requests.post(
server_base_url + 'forms', files=files, timeout=_REQUEST_TIMEOUT
)
assert resp.status_code == 200
assert resp.json() == {
'message': {
'filename': 'hello.txt',
'sha1': '60fde9c2310b0d4cad4dab8d126b04387efba289',
},
'random': {
'filename': 'random.dat',
'sha1': digest,
},
}
def test_post_multiple(self, server_base_url, requests):
body = testing.rand_string(_SIZE_1_KB // 2, _SIZE_1_KB)
resp = requests.post(server_base_url, data=body, timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 200
assert resp.text == body
assert resp.headers['X-Counter'] == '0'
time.sleep(1)
resp = requests.post(server_base_url, data=body, timeout=_REQUEST_TIMEOUT)
assert resp.headers['X-Counter'] == '2002'
def test_post_invalid_content_length(self, server_base_url, requests):
headers = {'Content-Length': 'invalid'}
try:
resp = requests.post(
server_base_url, headers=headers, timeout=_REQUEST_TIMEOUT
)
# Daphne responds with a 400
assert resp.status_code == 400
except requests.ConnectionError:
# NOTE(kgriffs): Uvicorn will kill the request so it does not
# even get to our app; the app logic is tested on the WSGI
# side. We leave this here in case something changes in
# the way uvicorn handles it or something and we want to
# get a heads-up if the request is no longer blocked.
pass
def test_post_read_bounded_stream(self, server_base_url, requests):
body = testing.rand_string(_SIZE_1_KB // 2, _SIZE_1_KB)
resp = requests.post(
server_base_url + 'bucket', data=body, timeout=_REQUEST_TIMEOUT
)
assert resp.status_code == 200
assert resp.text == body
def test_post_read_bounded_stream_large(self, server_base_url, requests):
"""Test that we can correctly read large bodies chunked server-side.
ASGI servers typically employ some type of flow control to stream
large request bodies to the app. This occurs regardless of whether
"chunked" Transfer-Encoding is employed by the client.
"""
# NOTE(kgriffs): One would hope that flow control is effective enough
# to at least prevent bursting over 1 MB.
size_mb = 5
body = os.urandom(_SIZE_1_MB * size_mb)
resp = requests.put(
server_base_url + 'bucket/drops', data=body, timeout=_REQUEST_TIMEOUT
)
assert resp.status_code == 200
assert resp.json().get('drops') > size_mb
assert resp.json().get('sha1') == hashlib.sha1(body).hexdigest()
def test_post_read_bounded_stream_no_body(self, server_base_url, requests):
resp = requests.post(server_base_url + 'bucket', timeout=_REQUEST_TIMEOUT)
assert not resp.text
def test_sse(self, server_base_url, requests):
resp = requests.get(server_base_url + 'events', timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 200
events = resp.text.split('\n\n')
assert len(events) > 2
for e in events[:-1]:
assert e == 'data: hello world'
assert not events[-1]
def test_sse_client_disconnects_early(self, server_base_url, requests):
"""Test that when the client connection is lost, the server task does not hang.
In the case of SSE, Falcon should detect when the client connection is
lost and immediately bail out. Currently this is observable by watching
the output of the uvicorn and daphne server processes. Also, the
_run_server_isolated() method will fail the test if the server process
takes too long to shut down.
"""
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(
server_base_url + 'events',
timeout=(_asgi_test_app.SSE_TEST_MAX_DELAY_SEC / 2),
)
async def test_stream_chunked_request(self, server_base_url, httpx):
"""Regression test for https://github.com/falconry/falcon/issues/2024"""
async def emitter():
for _ in range(64):
yield b'123456789ABCDEF\n'
async with httpx.AsyncClient() as client:
resp = await client.put(
server_base_url + 'bucket/drops',
content=emitter(),
timeout=_REQUEST_TIMEOUT,
)
resp.raise_for_status()
assert resp.json().get('drops') >= 1
@pytest.mark.skipif(
websockets is None, reason='websockets is required for this test class'
)
| TestASGIServer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-adjust/source_adjust/components.py | {
"start": 265,
"end": 980
} | class ____(JsonFileSchemaLoader):
config: Mapping[str, Any]
def get_json_schema(self) -> Mapping[str, Any]:
"""
Prune the schema to only include selected fields to synchronize.
"""
schema = source_adjust.model.Report.schema()
properties = schema["properties"]
required = schema["required"]
selected = self.config["metrics"] + self.config["dimensions"]
retain = required + selected
for attr in list(properties.keys()):
if attr not in retain:
del properties[attr]
for attr in self.config["additional_metrics"]:
properties[attr] = {"type": "number"}
return schema
| AdjustSchemaLoader |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 581707,
"end": 582064
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "labelable")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
labelable = sgqlc.types.Field(Labelable, graphql_name="labelable")
| RemoveLabelsFromLabelablePayload |
python | tensorflow__tensorflow | tensorflow/python/client/session_test.py | {
"start": 3138,
"end": 83472
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = self.evaluate(c)
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = self.evaluate(c)
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = self.evaluate(copy)
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
pool = config_pb.session_inter_op_thread_pool.add()
with session.Session(config=config_pb) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config_pb) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config_pb.session_inter_op_thread_pool) - 1)
with session.Session(config=config_pb) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
self.evaluate(a)
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
self.evaluate(c)
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertIsNone(res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertIsNone(res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertIsNone(res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertIsInstance(res, list)
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertIsInstance(res, list)
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertIsInstance(res, tuple)
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertIsInstance(res, tuple)
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertIsInstance(res, ABC)
self.assertEqual(42.0, res.a)
self.assertIsNone(res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertIsInstance(res, ABC)
self.assertEqual(42.0, res.a)
self.assertIsNone(res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertIsInstance(res, dict)
self.assertEqual(42.0, res['a'])
self.assertIsNone(res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertIsInstance(res, collections.OrderedDict)
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertIsNone(res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertIsInstance(res, list)
self.assertEqual(3, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(0, len(res[0]))
self.assertIsInstance(res[1], tuple)
self.assertEqual(0, len(res[1]))
self.assertIsInstance(res[2], dict)
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertIsInstance(res, list)
self.assertEqual(4, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(0, len(res[0]))
self.assertIsInstance(res[1], tuple)
self.assertEqual(0, len(res[1]))
self.assertIsInstance(res[2], dict)
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFGHI = collections.namedtuple('DEFGHI', ['d', 'e', 'f', 'g', 'h', 'i'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
test_dct = {'a': a.name, 'c': c, 'b': b}
test_dct_types = [dict, frozendict, defaultdict]
# List of lists, tuples, namedtuple, dict, frozendict, and defaultdict
res = sess.run([
[a, b, c],
(a, b, c),
ABC(a=a, b=b, c=c),
dict(test_dct),
frozendict(test_dct),
defaultdict(str, test_dct),
])
self.assertIsInstance(res, list)
self.assertEqual(6, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertIsInstance(res[1], tuple)
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertIsInstance(res[2], ABC)
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
for expected_type, r in zip(test_dct_types, res[3:]):
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res[5].default_factory, str)
# Tuple of lists, tuples, namedtuple, dict, frozendict, and defaultdict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b,
c=c), dict(test_dct),
frozendict(test_dct), defaultdict(str, test_dct)))
self.assertIsInstance(res, tuple)
self.assertEqual(6, len(res))
self.assertIsInstance(res[0], list)
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertIsInstance(res[1], tuple)
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertIsInstance(res[2], ABC)
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
for expected_type, r in zip(test_dct_types, res[3:]):
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res[5].default_factory, str)
# Namedtuple of lists, tuples, namedtuples, dict, frozendict, defaultdict
res = sess.run(
DEFGHI(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g=dict(test_dct),
h=frozendict(test_dct),
i=defaultdict(str, test_dct)))
self.assertIsInstance(res, DEFGHI)
self.assertIsInstance(res.d, list)
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertIsInstance(res.e, tuple)
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertIsInstance(res.f, ABC)
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertIsInstance(res.g, dict)
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
self.assertIsInstance(res.h, frozendict)
self.assertEqual(3, len(res.h))
self.assertEqual(a_val, res.h['a'])
self.assertEqual(b_val, res.h['b'])
self.assertEqual(c_val, res.h['c'])
self.assertIsInstance(res.i, defaultdict)
self.assertEqual(3, len(res.i))
self.assertEqual(a_val, res.i['a'])
self.assertEqual(b_val, res.i['b'])
self.assertEqual(c_val, res.i['c'])
self.assertEqual(res.i.default_factory, str)
# Dict of lists, tuples, namedtuples, dict, frozendict, defaultdict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': dict(test_dct),
'h': frozendict(test_dct),
'i': defaultdict(str, test_dct),
})
self.assertIsInstance(res, dict)
self.assertEqual(6, len(res))
self.assertIsInstance(res['d'], list)
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertIsInstance(res['e'], tuple)
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertIsInstance(res['f'], ABC)
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
for expected_type, r_key in zip(test_dct_types, ('g', 'h', 'i')):
r = res[r_key]
self.assertIsInstance(r, expected_type)
self.assertEqual(3, len(r))
self.assertEqual(a_val, r['a'])
self.assertEqual(b_val, r['b'])
self.assertEqual(c_val, r['c'])
self.assertEqual(res['i'].default_factory, str)
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = self.evaluate(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = indexed_slices.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = indexed_slices.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = indexed_slices.IndexedSlices(ind_values, ind_indices,
ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run([
ind_values, ind_indices, ind_dense_shape
], {ind: indexed_slices.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: indexed_slices.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = indexed_slices.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = indexed_slices.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = indexed_slices.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: indexed_slices.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: indexed_slices.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = self.evaluate(e)
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = self.evaluate(a) # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = self.evaluate(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variable_v1.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
self.evaluate(assign_a_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
self.evaluate(assign_b_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = self.evaluate(v)
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = self.evaluate(e)
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
self.evaluate(assign_c_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = self.evaluate(e)
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = self.evaluate(v)
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = self.evaluate(v)
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = while_loop.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEqual(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEqual(len(sess.graph_def.node), 2)
self.assertAllEqual(c, 5.0)
self.assertAllEqual(d, 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEqual(len(sess.graph_def.node), 3)
self.assertAllEqual(e, 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertIsNone(stack._default_session_stack.get_default())
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e)
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsError(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with self.assertNoLogs(level='ERROR'):
sess = session.InteractiveSession()
sess.close()
with self.assertNoLogs(level='ERROR'):
sess = session.InteractiveSession()
with self.assertLogs(level='ERROR') as log_output:
sess2 = session.InteractiveSession()
self.assertLen(log_output.output, 1)
self.assertIn(
'An interactive session is already active. This can cause'
' out-of-memory errors or some other unexpected errors (due to'
' the unpredictable timing of garbage collection) in some cases.'
' You must explicitly call `InteractiveSession.close()` to release'
' resources held by the other session(s). Please use `tf.Session()`'
' if you intend to productionize.',
log_output.output[0],
)
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
self.evaluate(b)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(a)
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in range(size)],
dtype=np.object_).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c, c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in range(size)],
dtype=np.object_).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object_)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegex(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertIsInstance(c.name, six.text_type)
self.assertIsInstance(d.name, six.text_type)
self.assertIsInstance(e.name, six.text_type)
self.assertIsInstance(f.name, six.text_type)
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegex(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertFalse(run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegex(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertNotIn('_output_shapes', sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertEqual(a, a)
def testInferShapesTrue(self):
config_pb = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config_pb)
self.assertIn('_output_shapes', sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertEqual(a, a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config_pb = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config_pb) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in range(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegex(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
stack._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, self.evaluate(c))
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, self.evaluate(c))
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegex(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegex(TypeError,
'Argument `target` must be a string'):
session.Session(37)
with self.assertRaisesRegex(TypeError,
'Argument `config` must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegex(TypeError,
'Argument `graph` must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config_pb) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
context.set_log_device_placement(True)
if context.executing_eagerly():
with CaptureStderr() as log:
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
# Ensure if the same kernel with the same arguments is executed then its
# execution is logged.
d = a + b
else:
# Passing the config to the server, but not the session should still
# result in logging device placement.
config_pb = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config_pb)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
d = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
c, d = sess.run([c, d])
self.assertEqual(c, 3)
self.assertEqual(d, 3)
# Ensure that we did log device placement.
# We have three modes of execution at the moment:
# (1) TF1 Graph (2) TF2 eager (3) TF2 eager with function wrapping.
# The codepaths taken by each are slightly different in all resulting in
# slightly different logging messages.
log_msg = ('Executing op AddV2'
if ops.executing_eagerly_outside_functions() else 'AddV2')
add_executions = [l for l in str(log).splitlines() if log_msg in l]
self.assertEqual(len(add_executions), 2)
@def_function.function
def fn(a, b):
c = a + b
# These two AddV2 cannot use the same argument in tf.function since an
# optimization pass will remove duplicate ops and only run it once.
d = a + c
return c, d
with CaptureStderr() as log:
c, d = self.evaluate(fn(constant_op.constant(1), constant_op.constant(2)))
self.assertEqual(c, 3)
self.assertEqual(d, 4)
# Ensure that we did log device placement.
add_executions = [l for l in str(log).splitlines() if 'AddV2' in l]
self.assertEqual(len(add_executions), 2)
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config_pb) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config_pb)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegex(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
@test_util.run_v1_only('b/120545219')
def testOptimizerOptions(self):
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
with ops.Graph().as_default():
sess = session.Session()
self.assertEqual(
sess._config.graph_options.rewrite_options.min_graph_nodes, -1)
if __name__ == '__main__':
googletest.main()
| SessionTest |
python | ansible__ansible | lib/ansible/plugins/strategy/__init__.py | {
"start": 52263,
"end": 55157
} | class ____(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = TemplateEngine(variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
| Debugger |
python | walkccc__LeetCode | solutions/2157. Groups of Strings/2157.py | {
"start": 530,
"end": 1554
} | class ____:
def groupStrings(self, words: list[str]) -> list[int]:
uf = UnionFind(len(words))
def getMask(s: str) -> int:
mask = 0
for c in s:
mask |= 1 << ord(c) - ord('a')
return mask
def getAddedMasks(mask: int):
for i in range(26):
if not (mask >> i & 1):
yield mask | 1 << i
def getDeletedMasks(mask: int):
for i in range(26):
if mask >> i & 1:
yield mask ^ 1 << i
maskToIndex = {getMask(word): i for i, word in enumerate(words)}
deletedMaskToIndex = {}
for i, word in enumerate(words):
mask = getMask(word)
for m in getAddedMasks(mask):
if m in maskToIndex:
uf.unionBySize(i, maskToIndex[m])
for m in getDeletedMasks(mask):
if m in maskToIndex:
uf.unionBySize(i, maskToIndex[m])
if m in deletedMaskToIndex:
uf.unionBySize(i, deletedMaskToIndex[m])
else:
deletedMaskToIndex[m] = i
return [uf.count, max(uf.sz)]
| Solution |
python | plotly__plotly.py | plotly/graph_objs/ohlc/legendgrouptitle/_font.py | {
"start": 233,
"end": 9911
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "ohlc.legendgrouptitle"
_path_str = "ohlc.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.ohlc.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 2632,
"end": 2755
} | class ____:
def m1(self):
return self.m0() # Interval: [2,5] /\ [1,8] = [2,5]
def m0(self):
pass
| A6 |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 3119,
"end": 3313
} | class ____(PrefectException):
"""
Raised when a result is missing from a state; often when result persistence is
disabled and the state is retrieved from the API.
"""
| MissingResult |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 268265,
"end": 268656
} | class ____(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
pass
| NonlocalNode |
python | wandb__wandb | wandb/automations/_filters/run_metrics.py | {
"start": 6969,
"end": 11821
} | class ____(GQLBase, ABC, extra="forbid"):
def gt(self, value: int | float, /) -> MetricThresholdFilter:
"""Returns a filter that watches for `metric_expr > threshold`."""
return self > value
def lt(self, value: int | float, /) -> MetricThresholdFilter:
"""Returns a filter that watches for `metric_expr < threshold`."""
return self < value
def gte(self, value: int | float, /) -> MetricThresholdFilter:
"""Returns a filter that watches for `metric_expr >= threshold`."""
return self >= value
def lte(self, value: int | float, /) -> MetricThresholdFilter:
"""Returns a filter that watches for `metric_expr <= threshold`."""
return self <= value
# Overloads to implement:
# - `(metric_operand > threshold) -> MetricThresholdFilter`
# - `(metric_operand < threshold) -> MetricThresholdFilter`
# - `(metric_operand >= threshold) -> MetricThresholdFilter`
# - `(metric_operand <= threshold) -> MetricThresholdFilter`
def __gt__(self, other: Any) -> MetricThresholdFilter:
if isinstance(other, (int, float)):
return MetricThresholdFilter(**dict(self), cmp="$gt", threshold=other)
return NotImplemented
def __lt__(self, other: Any) -> MetricThresholdFilter:
if isinstance(other, (int, float)):
return MetricThresholdFilter(**dict(self), cmp="$lt", threshold=other)
return NotImplemented
def __ge__(self, other: Any) -> MetricThresholdFilter:
if isinstance(other, (int, float)):
return MetricThresholdFilter(**dict(self), cmp="$gte", threshold=other)
return NotImplemented
def __le__(self, other: Any) -> MetricThresholdFilter:
if isinstance(other, (int, float)):
return MetricThresholdFilter(**dict(self), cmp="$lte", threshold=other)
return NotImplemented
@overload
def changes_by(self, *, diff: PosNum, frac: None) -> MetricChangeFilter: ...
@overload
def changes_by(self, *, diff: None, frac: PosNum) -> MetricChangeFilter: ...
@overload # NOTE: This overload is for internal use only.
def changes_by(
self, *, diff: PosNum | None, frac: PosNum | None, _dir: ChangeDir
) -> MetricChangeFilter: ...
def changes_by(
self,
*,
diff: PosNum | None = None,
frac: PosNum | None = None,
_dir: ChangeDir = ChangeDir.ANY,
) -> MetricChangeFilter:
"""Returns a filter that watches for a numerical increase OR decrease in a metric.
Exactly one of `frac` or `diff` must be provided.
Args:
diff: If given, arithmetic difference that must be observed in the metric.
Must be positive.
frac: If given, fractional (relative) change that must be observed in the
metric. Must be positive. For example, `frac=0.1` denotes a 10% relative
increase or decrease.
"""
if (
# Enforce mutually exclusive keyword args
((frac is None) and (diff is None))
or ((frac is not None) and (diff is not None))
):
raise ValueError("Must provide exactly one of `frac` or `diff`")
# Enforce positive values
if (frac is not None) and (frac <= 0):
raise ValueError(f"Expected positive threshold, got: {frac=}")
if (diff is not None) and (diff <= 0):
raise ValueError(f"Expected positive threshold, got: {diff=}")
if diff is None:
kws = dict(change_dir=_dir, change_type=ChangeType.REL, threshold=frac)
else:
kws = dict(change_dir=_dir, change_type=ChangeType.ABS, threshold=diff)
return MetricChangeFilter(**dict(self), **kws)
@overload
def increases_by(self, *, diff: PosNum, frac: None) -> MetricChangeFilter: ...
@overload
def increases_by(self, *, diff: None, frac: PosNum) -> MetricChangeFilter: ...
def increases_by(
self, *, diff: PosNum | None = None, frac: PosNum | None = None
) -> MetricChangeFilter:
"""Returns a filter that watches for a numerical increase in a metric.
Arguments mirror those of `.changes_by()`.
"""
return self.changes_by(diff=diff, frac=frac, _dir=ChangeDir.INC)
@overload
def decreases_by(self, *, diff: PosNum, frac: None) -> MetricChangeFilter: ...
@overload
def decreases_by(self, *, diff: None, frac: PosNum) -> MetricChangeFilter: ...
def decreases_by(
self, *, diff: PosNum | None = None, frac: PosNum | None = None
) -> MetricChangeFilter:
"""Returns a filter that watches for a numerical decrease in a metric.
Arguments mirror those of `.changes_by()`.
"""
return self.changes_by(diff=diff, frac=frac, _dir=ChangeDir.DEC)
| BaseMetricOperand |
python | python__mypy | mypy/errors.py | {
"start": 12233,
"end": 48473
} | class ____:
"""Container for compile errors.
This class generates and keeps tracks of compile errors and the
current error context (nested imports).
"""
# Map from files to generated error messages. Is an OrderedDict so
# that it can be used to order messages based on the order the
# files were processed.
error_info_map: dict[str, list[ErrorInfo]]
# optimization for legacy codebases with many files with errors
has_blockers: set[str]
# Files that we have reported the errors for
flushed_files: set[str]
# Current error context: nested import context/stack, as a list of (path, line) pairs.
import_ctx: list[tuple[str, int]]
# Path name prefix that is removed from all paths, if set.
ignore_prefix: str | None = None
# Path to current file.
file: str = ""
# Ignore some errors on these lines of each file
# (path -> line -> error-codes)
ignored_lines: dict[str, dict[int, list[str]]]
# Lines that were skipped during semantic analysis e.g. due to ALWAYS_FALSE, MYPY_FALSE,
# or platform/version checks. Those lines would not be type-checked.
skipped_lines: dict[str, set[int]]
# Lines on which an error was actually ignored.
used_ignored_lines: dict[str, dict[int, list[str]]]
# Files where all errors should be ignored.
ignored_files: set[str]
# Collection of reported only_once messages.
only_once_messages: set[str]
# Set to True to show "In function "foo":" messages.
show_error_context: bool = False
# Set to True to show column numbers in error messages.
show_column_numbers: bool = False
# Set to True to show end line and end column in error messages.
# This implies `show_column_numbers`.
show_error_end: bool = False
# Set to True to show absolute file paths in error messages.
show_absolute_path: bool = False
# State for keeping track of the current fine-grained incremental mode target.
# (See mypy.server.update for more about targets.)
# Current module id.
target_module: str | None = None
scope: Scope | None = None
# Have we seen an import-related error so far? If yes, we filter out other messages
# in some cases to avoid reporting huge numbers of errors.
seen_import_error = False
_watchers: list[ErrorWatcher]
def __init__(
self,
options: Options,
*,
read_source: Callable[[str], list[str] | None] | None = None,
hide_error_codes: bool | None = None,
) -> None:
self.options = options
self.hide_error_codes = (
hide_error_codes if hide_error_codes is not None else options.hide_error_codes
)
# We use fscache to read source code when showing snippets.
self.read_source = read_source
self.initialize()
def initialize(self) -> None:
self.error_info_map = {}
self.flushed_files = set()
self.import_ctx = []
self.function_or_member = [None]
self.ignored_lines = {}
self.skipped_lines = {}
self.used_ignored_lines = defaultdict(lambda: defaultdict(list))
self.ignored_files = set()
self.only_once_messages = set()
self.has_blockers = set()
self.scope = None
self.target_module = None
self.seen_import_error = False
self._watchers = []
def reset(self) -> None:
self.initialize()
def set_ignore_prefix(self, prefix: str) -> None:
"""Set path prefix that will be removed from all paths."""
prefix = os.path.normpath(prefix)
# Add separator to the end, if not given.
if os.path.basename(prefix) != "":
prefix += os.sep
self.ignore_prefix = prefix
def simplify_path(self, file: str) -> str:
if self.options.show_absolute_path:
return os.path.abspath(file)
else:
file = os.path.normpath(file)
return remove_path_prefix(file, self.ignore_prefix)
def set_file(
self, file: str, module: str | None, options: Options, scope: Scope | None = None
) -> None:
"""Set the path and module id of the current file."""
# The path will be simplified later, in render_messages. That way
# * 'file' is always a key that uniquely identifies a source file
# that mypy read (simplified paths might not be unique); and
# * we only have to simplify in one place, while still supporting
# reporting errors for files other than the one currently being
# processed.
self.file = file
self.target_module = module
self.scope = scope
self.options = options
def set_file_ignored_lines(
self, file: str, ignored_lines: dict[int, list[str]], ignore_all: bool = False
) -> None:
self.ignored_lines[file] = ignored_lines
if ignore_all:
self.ignored_files.add(file)
def set_skipped_lines(self, file: str, skipped_lines: set[int]) -> None:
self.skipped_lines[file] = skipped_lines
def current_target(self) -> str | None:
"""Retrieves the current target from the associated scope.
If there is no associated scope, use the target module."""
if self.scope is not None:
return self.scope.current_target()
return self.target_module
def current_module(self) -> str | None:
return self.target_module
def import_context(self) -> list[tuple[str, int]]:
"""Return a copy of the import context."""
return self.import_ctx.copy()
def set_import_context(self, ctx: list[tuple[str, int]]) -> None:
"""Replace the entire import context with a new value."""
self.import_ctx = ctx.copy()
def report(
self,
line: int,
column: int | None,
message: str,
code: ErrorCode | None = None,
*,
blocker: bool = False,
severity: str = "error",
file: str | None = None,
only_once: bool = False,
origin_span: Iterable[int] | None = None,
offset: int = 0,
end_line: int | None = None,
end_column: int | None = None,
parent_error: ErrorInfo | None = None,
) -> ErrorInfo:
"""Report message at the given line using the current error context.
Args:
line: line number of error
column: column number of error
message: message to report
code: error code (defaults to 'misc'; not shown for notes)
blocker: if True, don't continue analysis after this error
severity: 'error' or 'note'
file: if non-None, override current file as context
only_once: if True, only report this exact message once per build
origin_span: if non-None, override current context as origin
(type: ignores have effect here)
end_line: if non-None, override current context as end
parent_error: an error this note is attached to (for notes only).
"""
if self.scope:
type = self.scope.current_type_name()
if self.scope.ignored > 0:
type = None # Omit type context if nested function
function = self.scope.current_function_name()
else:
type = None
function = None
if column is None:
column = -1
if end_column is None:
if column == -1:
end_column = -1
else:
end_column = column + 1
if file is None:
file = self.file
if offset:
message = " " * offset + message
if origin_span is None:
origin_span = [line]
if end_line is None:
end_line = line
code = code or (parent_error.code if parent_error else None)
code = code or (codes.MISC if not blocker else None)
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=type,
function_or_member=function,
line=line,
column=column,
end_line=end_line,
end_column=end_column,
severity=severity,
message=message,
code=code,
blocker=blocker,
only_once=only_once,
origin=(self.file, origin_span),
target=self.current_target(),
parent_error=parent_error,
)
self.add_error_info(info)
return info
def _add_error_info(self, file: str, info: ErrorInfo) -> None:
assert file not in self.flushed_files
# process the stack of ErrorWatchers before modifying any internal state
# in case we need to filter out the error entirely
if self._filter_error(file, info):
return
if file not in self.error_info_map:
self.error_info_map[file] = []
self.error_info_map[file].append(info)
if info.blocker:
self.has_blockers.add(file)
if info.code in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND):
self.seen_import_error = True
def get_watchers(self) -> Iterator[ErrorWatcher]:
"""Yield the `ErrorWatcher` stack from top to bottom."""
i = len(self._watchers)
while i > 0:
i -= 1
yield self._watchers[i]
def _filter_error(self, file: str, info: ErrorInfo) -> bool:
"""
process ErrorWatcher stack from top to bottom,
stopping early if error needs to be filtered out
"""
return any(w.on_error(file, info) for w in self.get_watchers())
def add_error_info(self, info: ErrorInfo) -> None:
file, lines = info.origin
# process the stack of ErrorWatchers before modifying any internal state
# in case we need to filter out the error entirely
# NB: we need to do this both here and in _add_error_info, otherwise we
# might incorrectly update the sets of ignored or only_once messages
if self._filter_error(file, info):
return
if not info.blocker: # Blockers cannot be ignored
if file in self.ignored_lines:
# Check each line in this context for "type: ignore" comments.
# line == end_line for most nodes, so we only loop once.
for scope_line in lines:
if self.is_ignored_error(scope_line, info, self.ignored_lines[file]):
err_code = info.code or codes.MISC
if not self.is_error_code_enabled(err_code):
# Error code is disabled - don't mark the current
# "type: ignore" comment as used.
return
# Annotation requests us to ignore all errors on this line.
self.used_ignored_lines[file][scope_line].append(err_code.code)
return
if file in self.ignored_files:
return
if info.only_once:
if info.message in self.only_once_messages:
return
self.only_once_messages.add(info.message)
if (
self.seen_import_error
and info.code not in (IMPORT, IMPORT_UNTYPED, IMPORT_NOT_FOUND)
and self.has_many_errors()
):
# Missing stubs can easily cause thousands of errors about
# Any types, especially when upgrading to mypy 0.900,
# which no longer bundles third-party library stubs. Avoid
# showing too many errors to make it easier to see
# import-related errors.
info.hidden = True
self.report_hidden_errors(info)
self._add_error_info(file, info)
ignored_codes = self.ignored_lines.get(file, {}).get(info.line, [])
if ignored_codes and info.code:
# Something is ignored on the line, but not this error, so maybe the error
# code is incorrect.
msg = f'Error code "{info.code.code}" not covered by "type: ignore" comment'
if info.code in original_error_codes:
# If there seems to be a "type: ignore" with a stale error
# code, report a more specific note.
old_code = original_error_codes[info.code].code
if old_code in ignored_codes:
msg = (
f'Error code changed to {info.code.code}; "type: ignore" comment '
+ "may be out of date"
)
note = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=info.type,
function_or_member=info.function_or_member,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=msg,
code=None,
blocker=False,
only_once=False,
)
self._add_error_info(file, note)
if (
self.options.show_error_code_links
and not self.options.hide_error_codes
and info.code is not None
and info.code not in HIDE_LINK_CODES
and info.code.code in mypy_error_codes
):
message = f"See {BASE_RTD_URL}-{info.code.code} for more info"
if message in self.only_once_messages:
return
self.only_once_messages.add(message)
info = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=info.type,
function_or_member=info.function_or_member,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=message,
code=info.code,
blocker=False,
only_once=True,
priority=20,
)
self._add_error_info(file, info)
def has_many_errors(self) -> bool:
if self.options.many_errors_threshold < 0:
return False
if len(self.error_info_map) >= self.options.many_errors_threshold:
return True
if (
sum(len(errors) for errors in self.error_info_map.values())
>= self.options.many_errors_threshold
):
return True
return False
def report_hidden_errors(self, info: ErrorInfo) -> None:
message = (
"(Skipping most remaining errors due to unresolved imports or missing stubs; "
+ "fix these first)"
)
if message in self.only_once_messages:
return
self.only_once_messages.add(message)
new_info = ErrorInfo(
import_ctx=info.import_ctx,
file=info.file,
module=info.module,
typ=None,
function_or_member=None,
line=info.line,
column=info.column,
end_line=info.end_line,
end_column=info.end_column,
severity="note",
message=message,
code=None,
blocker=False,
only_once=True,
origin=info.origin,
target=info.target,
)
self._add_error_info(info.origin[0], new_info)
def is_ignored_error(self, line: int, info: ErrorInfo, ignores: dict[int, list[str]]) -> bool:
if info.blocker:
# Blocking errors can never be ignored
return False
if info.code and not self.is_error_code_enabled(info.code):
return True
if line not in ignores:
return False
if not ignores[line]:
# Empty list means that we ignore all errors
return True
if info.code and self.is_error_code_enabled(info.code):
return (
info.code.code in ignores[line]
or info.code.sub_code_of is not None
and info.code.sub_code_of.code in ignores[line]
)
return False
def is_error_code_enabled(self, error_code: ErrorCode) -> bool:
if self.options:
current_mod_disabled = self.options.disabled_error_codes
current_mod_enabled = self.options.enabled_error_codes
else:
current_mod_disabled = set()
current_mod_enabled = set()
if error_code in current_mod_disabled:
return False
elif error_code in current_mod_enabled:
return True
elif error_code.sub_code_of is not None and error_code.sub_code_of in current_mod_disabled:
return False
else:
return error_code.default_enabled
def clear_errors_in_targets(self, path: str, targets: set[str]) -> None:
"""Remove errors in specific fine-grained targets within a file."""
if path in self.error_info_map:
new_errors = []
has_blocker = False
for info in self.error_info_map[path]:
if info.target not in targets:
new_errors.append(info)
has_blocker |= info.blocker
elif info.only_once:
self.only_once_messages.remove(info.message)
self.error_info_map[path] = new_errors
if not has_blocker and path in self.has_blockers:
self.has_blockers.remove(path)
def generate_unused_ignore_errors(self, file: str) -> None:
if (
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
or file in self.ignored_files
):
return
ignored_lines = self.ignored_lines[file]
used_ignored_lines = self.used_ignored_lines[file]
for line, ignored_codes in ignored_lines.items():
if line in self.skipped_lines[file]:
continue
if codes.UNUSED_IGNORE.code in ignored_codes:
continue
used_ignored_codes = set(used_ignored_lines[line])
unused_ignored_codes = [c for c in ignored_codes if c not in used_ignored_codes]
# `ignore` is used
if not ignored_codes and used_ignored_codes:
continue
# All codes appearing in `ignore[...]` are used
if ignored_codes and not unused_ignored_codes:
continue
# Display detail only when `ignore[...]` specifies more than one error code
unused_codes_message = ""
if len(ignored_codes) > 1 and unused_ignored_codes:
unused_codes_message = f"[{', '.join(unused_ignored_codes)}]"
message = f'Unused "type: ignore{unused_codes_message}" comment'
for unused in unused_ignored_codes:
narrower = set(used_ignored_codes) & codes.sub_code_map[unused]
if narrower:
message += f", use narrower [{', '.join(narrower)}] instead of [{unused}] code"
# Don't use report since add_error_info will ignore the error!
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=None,
function_or_member=None,
line=line,
column=-1,
end_line=line,
end_column=-1,
severity="error",
message=message,
code=codes.UNUSED_IGNORE,
blocker=False,
only_once=False,
origin=(self.file, [line]),
target=self.target_module,
)
self._add_error_info(file, info)
def generate_ignore_without_code_errors(
self, file: str, is_warning_unused_ignores: bool
) -> None:
if (
is_typeshed_file(self.options.abs_custom_typeshed_dir if self.options else None, file)
or file in self.ignored_files
):
return
used_ignored_lines = self.used_ignored_lines[file]
# If the whole file is ignored, ignore it.
if used_ignored_lines:
_, used_codes = min(used_ignored_lines.items())
if codes.FILE.code in used_codes:
return
for line, ignored_codes in self.ignored_lines[file].items():
if ignored_codes:
continue
# If the ignore is itself unused and that would be warned about, let
# that error stand alone
if is_warning_unused_ignores and not used_ignored_lines[line]:
continue
codes_hint = ""
ignored_codes = sorted(set(used_ignored_lines[line]))
if ignored_codes:
codes_hint = f' (consider "type: ignore[{", ".join(ignored_codes)}]" instead)'
message = f'"type: ignore" comment without error code{codes_hint}'
# Don't use report since add_error_info will ignore the error!
info = ErrorInfo(
import_ctx=self.import_context(),
file=file,
module=self.current_module(),
typ=None,
function_or_member=None,
line=line,
column=-1,
end_line=line,
end_column=-1,
severity="error",
message=message,
code=codes.IGNORE_WITHOUT_CODE,
blocker=False,
only_once=False,
origin=(self.file, [line]),
target=self.target_module,
)
self._add_error_info(file, info)
def num_messages(self) -> int:
"""Return the number of generated messages."""
return sum(len(x) for x in self.error_info_map.values())
def is_errors(self) -> bool:
"""Are there any generated messages?"""
return bool(self.error_info_map)
def is_blockers(self) -> bool:
"""Are the any errors that are blockers?"""
return bool(self.has_blockers)
def blocker_module(self) -> str | None:
"""Return the module with a blocking error, or None if not possible."""
for path in self.has_blockers:
for err in self.error_info_map[path]:
if err.blocker:
return err.module
return None
def is_errors_for_file(self, file: str) -> bool:
"""Are there any errors for the given file?"""
return file in self.error_info_map and file not in self.ignored_files
def prefer_simple_messages(self) -> bool:
"""Should we generate simple/fast error messages?
Return True if errors are not shown to user, i.e. errors are ignored
or they are collected for internal use only.
If True, we should prefer to generate a simple message quickly.
All normal errors should still be reported.
"""
if self.file in self.ignored_files:
# Errors ignored, so no point generating fancy messages
return True
if self._watchers:
_watcher = self._watchers[-1]
if _watcher._filter is True and _watcher._filtered is None:
# Errors are filtered
return True
return False
def raise_error(self, use_stdout: bool = True) -> NoReturn:
"""Raise a CompileError with the generated messages.
Render the messages suitable for displaying.
"""
# self.new_messages() will format all messages that haven't already
# been returned from a file_messages() call.
raise CompileError(
self.new_messages(), use_stdout=use_stdout, module_with_blocker=self.blocker_module()
)
def format_messages(
self, error_tuples: list[ErrorTuple], source_lines: list[str] | None
) -> list[str]:
"""Return a string list that represents the error messages.
Use a form suitable for displaying to the user. If self.pretty
is True also append a relevant trimmed source code line (only for
severity 'error').
"""
a: list[str] = []
for file, line, column, end_line, end_column, severity, message, code in error_tuples:
s = ""
if file is not None:
if self.options.show_column_numbers and line >= 0 and column >= 0:
srcloc = f"{file}:{line}:{1 + column}"
if self.options.show_error_end and end_line >= 0 and end_column >= 0:
srcloc += f":{end_line}:{end_column}"
elif line >= 0:
srcloc = f"{file}:{line}"
else:
srcloc = file
s = f"{srcloc}: {severity}: {message}"
else:
s = message
if (
not self.hide_error_codes
and code
and (severity != "note" or code in SHOW_NOTE_CODES)
):
# If note has an error code, it is related to a previous error. Avoid
# displaying duplicate error codes.
s = f"{s} [{code.code}]"
a.append(s)
if self.options.pretty:
# Add source code fragment and a location marker.
if severity == "error" and source_lines and line > 0:
source_line = source_lines[line - 1]
source_line_expanded = source_line.expandtabs()
if column < 0:
# Something went wrong, take first non-empty column.
column = len(source_line) - len(source_line.lstrip())
# Shifts column after tab expansion
column = len(source_line[:column].expandtabs())
end_column = len(source_line[:end_column].expandtabs())
# Note, currently coloring uses the offset to detect source snippets,
# so these offsets should not be arbitrary.
a.append(" " * DEFAULT_SOURCE_OFFSET + source_line_expanded)
marker = "^"
if end_line == line and end_column > column:
marker = f'^{"~" * (end_column - column - 1)}'
elif end_line != line:
# just highlight the first line instead
marker = f'^{"~" * (len(source_line_expanded) - column - 1)}'
a.append(" " * (DEFAULT_SOURCE_OFFSET + column) + marker)
return a
def file_messages(self, path: str, formatter: ErrorFormatter | None = None) -> list[str]:
"""Return a string list of new error messages from a given file.
Use a form suitable for displaying to the user.
"""
if path not in self.error_info_map:
return []
error_info = self.error_info_map[path]
error_info = [info for info in error_info if not info.hidden]
error_info = self.remove_duplicates(self.sort_messages(error_info))
error_tuples = self.render_messages(error_info)
if formatter is not None:
errors = create_errors(error_tuples)
return [formatter.report_error(err) for err in errors]
self.flushed_files.add(path)
source_lines = None
if self.options.pretty and self.read_source:
# Find shadow file mapping and read source lines if a shadow file exists for the given path.
# If shadow file mapping is not found, read source lines
mapped_path = self.find_shadow_file_mapping(path)
if mapped_path:
source_lines = self.read_source(mapped_path)
else:
source_lines = self.read_source(path)
return self.format_messages(error_tuples, source_lines)
def find_shadow_file_mapping(self, path: str) -> str | None:
"""Return the shadow file path for a given source file path or None."""
if self.options.shadow_file is None:
return None
for i in self.options.shadow_file:
if i[0] == path:
return i[1]
return None
def new_messages(self) -> list[str]:
"""Return a string list of new error messages.
Use a form suitable for displaying to the user.
Errors from different files are ordered based on the order in which
they first generated an error.
"""
msgs = []
for path in self.error_info_map.keys():
if path not in self.flushed_files:
msgs.extend(self.file_messages(path))
return msgs
def targets(self) -> set[str]:
"""Return a set of all targets that contain errors."""
# TODO: Make sure that either target is always defined or that not being defined
# is okay for fine-grained incremental checking.
return {
info.target for errs in self.error_info_map.values() for info in errs if info.target
}
def render_messages(self, errors: list[ErrorInfo]) -> list[ErrorTuple]:
"""Translate the messages into a sequence of tuples.
Each tuple is of form (path, line, col, severity, message, code).
The rendered sequence includes information about error contexts.
The path item may be None. If the line item is negative, the
line number is not defined for the tuple.
"""
result: list[ErrorTuple] = []
prev_import_context: list[tuple[str, int]] = []
prev_function_or_member: str | None = None
prev_type: str | None = None
for e in errors:
# Report module import context, if different from previous message.
if not self.options.show_error_context:
pass
elif e.import_ctx != prev_import_context:
last = len(e.import_ctx) - 1
i = last
while i >= 0:
path, line = e.import_ctx[i]
fmt = "{}:{}: note: In module imported here"
if i < last:
fmt = "{}:{}: note: ... from here"
if i > 0:
fmt += ","
else:
fmt += ":"
# Remove prefix to ignore from path (if present) to
# simplify path.
path = remove_path_prefix(path, self.ignore_prefix)
result.append((None, -1, -1, -1, -1, "note", fmt.format(path, line), None))
i -= 1
file = self.simplify_path(e.file)
# Report context within a source file.
if not self.options.show_error_context:
pass
elif e.function_or_member != prev_function_or_member or e.type != prev_type:
if e.function_or_member is None:
if e.type is None:
result.append((file, -1, -1, -1, -1, "note", "At top level:", None))
else:
result.append(
(file, -1, -1, -1, -1, "note", f'In class "{e.type}":', None)
)
else:
if e.type is None:
result.append(
(
file,
-1,
-1,
-1,
-1,
"note",
f'In function "{e.function_or_member}":',
None,
)
)
else:
result.append(
(
file,
-1,
-1,
-1,
-1,
"note",
'In member "{}" of class "{}":'.format(
e.function_or_member, e.type
),
None,
)
)
elif e.type != prev_type:
if e.type is None:
result.append((file, -1, -1, -1, -1, "note", "At top level:", None))
else:
result.append((file, -1, -1, -1, -1, "note", f'In class "{e.type}":', None))
result.append(
(file, e.line, e.column, e.end_line, e.end_column, e.severity, e.message, e.code)
)
prev_import_context = e.import_ctx
prev_function_or_member = e.function_or_member
prev_type = e.type
return result
def sort_messages(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
"""Sort an array of error messages locally by line number.
I.e., sort a run of consecutive messages with the same
context by line number, but otherwise retain the general
ordering of the messages.
"""
result: list[ErrorInfo] = []
i = 0
while i < len(errors):
i0 = i
# Find neighbouring errors with the same context and file.
while (
i + 1 < len(errors)
and errors[i + 1].import_ctx == errors[i].import_ctx
and errors[i + 1].file == errors[i].file
):
i += 1
i += 1
# Sort the errors specific to a file according to line number and column.
a = sorted(errors[i0:i], key=lambda x: (x.line, x.column))
a = self.sort_within_context(a)
result.extend(a)
return result
def sort_within_context(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
"""For the same location decide which messages to show first/last.
Currently, we only compare within the same error code, to decide the
order of various additional notes.
"""
result = []
i = 0
while i < len(errors):
i0 = i
# Find neighbouring errors with the same position and error code.
while (
i + 1 < len(errors)
and errors[i + 1].line == errors[i].line
and errors[i + 1].column == errors[i].column
and errors[i + 1].end_line == errors[i].end_line
and errors[i + 1].end_column == errors[i].end_column
and errors[i + 1].code == errors[i].code
):
i += 1
i += 1
# Sort the messages specific to a given error by priority.
a = sorted(errors[i0:i], key=lambda x: x.priority)
result.extend(a)
return result
def remove_duplicates(self, errors: list[ErrorInfo]) -> list[ErrorInfo]:
filtered_errors = []
seen_by_line: defaultdict[int, set[tuple[str, str]]] = defaultdict(set)
removed = set()
for err in errors:
if err.parent_error is not None:
# Notes with specified parent are removed together with error below.
filtered_errors.append(err)
elif (err.severity, err.message) not in seen_by_line[err.line]:
filtered_errors.append(err)
seen_by_line[err.line].add((err.severity, err.message))
else:
removed.add(err)
return [
err
for err in filtered_errors
if err.parent_error is None or err.parent_error not in removed
]
| Errors |
python | encode__django-rest-framework | rest_framework/validators.py | {
"start": 3775,
"end": 8529
} | class ____:
"""
Validator that corresponds to `unique_together = (...)` on a model class.
Should be applied to the serializer class, not to an individual field.
"""
message = _('The fields {field_names} must make a unique set.')
missing_message = _('This field is required.')
requires_context = True
code = 'unique'
def __init__(self, queryset, fields, message=None, condition_fields=None, condition=None, code=None):
self.queryset = queryset
self.fields = fields
self.message = message or self.message
self.condition_fields = [] if condition_fields is None else condition_fields
self.condition = condition
self.code = code or self.code
def enforce_required_fields(self, attrs, serializer):
"""
The `UniqueTogetherValidator` always forces an implied 'required'
state on the fields it applies to.
"""
if serializer.instance is not None:
return
missing_items = {
field_name: self.missing_message
for field_name in (*self.fields, *self.condition_fields)
if serializer.fields[field_name].source not in attrs
}
if missing_items:
raise ValidationError(missing_items, code='required')
def filter_queryset(self, attrs, queryset, serializer):
"""
Filter the queryset to all instances matching the given attributes.
"""
# field names => field sources
sources = [
serializer.fields[field_name].source
for field_name in self.fields
]
# If this is an update, then any unprovided field should
# have it's value set based on the existing instance attribute.
if serializer.instance is not None:
for source in sources:
if source not in attrs:
attrs[source] = getattr(serializer.instance, source)
# Determine the filter keyword arguments and filter the queryset.
filter_kwargs = {
source: attrs[source]
for source in sources
}
return qs_filter(queryset, **filter_kwargs)
def exclude_current_instance(self, attrs, queryset, instance):
"""
If an instance is being updated, then do not include
that instance itself as a uniqueness conflict.
"""
if instance is not None:
return queryset.exclude(pk=instance.pk)
return queryset
def __call__(self, attrs, serializer):
self.enforce_required_fields(attrs, serializer)
queryset = self.queryset
queryset = self.filter_queryset(attrs, queryset, serializer)
queryset = self.exclude_current_instance(attrs, queryset, serializer.instance)
checked_names = [
serializer.fields[field_name].source for field_name in self.fields
]
# Ignore validation if any field is None
if serializer.instance is None:
checked_values = [attrs[field_name] for field_name in checked_names]
else:
# Ignore validation if all field values are unchanged
checked_values = [
attrs[field_name]
for field_name in checked_names
if attrs[field_name] != getattr(serializer.instance, field_name)
]
condition_sources = (serializer.fields[field_name].source for field_name in self.condition_fields)
condition_kwargs = {
source: attrs[source]
if source in attrs
else getattr(serializer.instance, source)
for source in condition_sources
}
if checked_values and None not in checked_values and qs_exists_with_condition(queryset, self.condition, condition_kwargs):
field_names = ', '.join(self.fields)
message = self.message.format(field_names=field_names)
raise ValidationError(message, code=self.code)
def __repr__(self):
return '<{}({})>'.format(
self.__class__.__name__,
', '.join(
f'{attr}={smart_repr(getattr(self, attr))}'
for attr in ('queryset', 'fields', 'condition')
if getattr(self, attr) is not None)
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self.message == other.message
and self.requires_context == other.requires_context
and self.missing_message == other.missing_message
and self.queryset == other.queryset
and self.fields == other.fields
and self.code == other.code
)
| UniqueTogetherValidator |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 10736,
"end": 10783
} | class ____(XsdString):
pass
| ST_RelationshipId |
python | allegroai__clearml | clearml/backend_api/services/v2_9/projects.py | {
"start": 54213,
"end": 57404
} | class ____(Response):
"""
Response of projects.get_by_id endpoint.
:param project: Project info
:type project: Project
"""
_service = "projects"
_action = "get_by_id"
_version = "2.9"
_schema = {
"definitions": {
"project": {
"properties": {
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"last_update": {
"description": "Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Project name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"project": {
"description": "Project info",
"oneOf": [{"$ref": "#/definitions/project"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, project: Any = None, **kwargs: Any) -> None:
super(GetByIdResponse, self).__init__(**kwargs)
self.project = project
@schema_property("project")
def project(self) -> Any:
return self._property_project
@project.setter
def project(self, value: Any) -> None:
if value is None:
self._property_project = None
return
if isinstance(value, dict):
value = Project.from_dict(value)
else:
self.assert_isinstance(value, "project", Project)
self._property_project = value
| GetByIdResponse |
python | kamyu104__LeetCode-Solutions | Python/longest-consecutive-sequence.py | {
"start": 29,
"end": 531
} | class ____(object):
# @param num, a list of integer
# @return an integer
def longestConsecutive(self, num):
result, lengths = 1, {key: 0 for key in num}
for i in num:
if lengths[i] == 0:
lengths[i] = 1
left, right = lengths.get(i - 1, 0), lengths.get(i + 1, 0)
length = 1 + left + right
result, lengths[i - left], lengths[i + right] = max(result, length), length, length
return result
| Solution |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 2519,
"end": 7094
} | class ____(DefinedFunction):
r"""
Carmichael Numbers:
Certain cryptographic algorithms make use of big prime numbers.
However, checking whether a big number is prime is not so easy.
Randomized prime number checking tests exist that offer a high degree of
confidence of accurate determination at low cost, such as the Fermat test.
Let 'a' be a random number between $2$ and $n - 1$, where $n$ is the
number whose primality we are testing. Then, $n$ is probably prime if it
satisfies the modular arithmetic congruence relation:
.. math :: a^{n-1} = 1 \pmod{n}
(where mod refers to the modulo operation)
If a number passes the Fermat test several times, then it is prime with a
high probability.
Unfortunately, certain composite numbers (non-primes) still pass the Fermat
test with every number smaller than themselves.
These numbers are called Carmichael numbers.
A Carmichael number will pass a Fermat primality test to every base $b$
relatively prime to the number, even though it is not actually prime.
This makes tests based on Fermat's Little Theorem less effective than
strong probable prime tests such as the Baillie-PSW primality test and
the Miller-Rabin primality test.
Examples
========
>>> from sympy.ntheory.factor_ import find_first_n_carmichaels, find_carmichael_numbers_in_range
>>> find_first_n_carmichaels(5)
[561, 1105, 1729, 2465, 2821]
>>> find_carmichael_numbers_in_range(0, 562)
[561]
>>> find_carmichael_numbers_in_range(0,1000)
[561]
>>> find_carmichael_numbers_in_range(0,2000)
[561, 1105, 1729]
References
==========
.. [1] https://en.wikipedia.org/wiki/Carmichael_number
.. [2] https://en.wikipedia.org/wiki/Fermat_primality_test
.. [3] https://www.jstor.org/stable/23248683?seq=1#metadata_info_tab_contents
"""
@staticmethod
def is_perfect_square(n):
sympy_deprecation_warning(
"""
is_perfect_square is just a wrapper around sympy.ntheory.primetest.is_square
so use that directly instead.
""",
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return is_square(n)
@staticmethod
def divides(p, n):
sympy_deprecation_warning(
"""
divides can be replaced by directly testing n % p == 0.
""",
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return n % p == 0
@staticmethod
def is_prime(n):
sympy_deprecation_warning(
"""
is_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that
directly instead.
""",
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return isprime(n)
@staticmethod
def is_carmichael(n):
sympy_deprecation_warning(
"""
is_carmichael is just a wrapper around sympy.ntheory.factor_.is_carmichael so use that
directly instead.
""",
deprecated_since_version="1.13",
active_deprecations_target='deprecated-ntheory-symbolic-functions',
)
return is_carmichael(n)
@staticmethod
def find_carmichael_numbers_in_range(x, y):
sympy_deprecation_warning(
"""
find_carmichael_numbers_in_range is just a wrapper around sympy.ntheory.factor_.find_carmichael_numbers_in_range so use that
directly instead.
""",
deprecated_since_version="1.13",
active_deprecations_target='deprecated-ntheory-symbolic-functions',
)
return find_carmichael_numbers_in_range(x, y)
@staticmethod
def find_first_n_carmichaels(n):
sympy_deprecation_warning(
"""
find_first_n_carmichaels is just a wrapper around sympy.ntheory.factor_.find_first_n_carmichaels so use that
directly instead.
""",
deprecated_since_version="1.13",
active_deprecations_target='deprecated-ntheory-symbolic-functions',
)
return find_first_n_carmichaels(n)
#----------------------------------------------------------------------------#
# #
# Fibonacci numbers #
# #
#----------------------------------------------------------------------------#
| carmichael |
python | getsentry__sentry | src/sentry/deletions/manager.py | {
"start": 272,
"end": 1571
} | class ____:
def __init__(self, default_task: type[BaseDeletionTask[Any]] | None = None) -> None:
self.tasks: MutableMapping[type[Model], type[BaseDeletionTask[Any]]] = {}
self.default_task = default_task
def exec_sync(self, instance: Model) -> None:
task = self.get(
model=type(instance),
query={"id": instance.id},
)
while task.chunk():
pass
def exec_sync_many(self, instances: list[Model]) -> None:
if not instances:
return
task = self.get(
model=type(instances[0]),
query={"id__in": [i.id for i in instances]},
)
while task.chunk():
pass
def get(
self,
task: type[BaseDeletionTask[Any]] | None = None,
**kwargs: Any,
) -> BaseDeletionTask[Any]:
if task is None:
model = kwargs.get("model")
assert model, "The model parameter is required if `task` is not provided"
task = self.tasks.get(model, self.default_task)
assert task is not None, "Task cannot be None"
return task(manager=self, **kwargs)
def register(self, model: type[Model], task: type[BaseDeletionTask[Any]]) -> None:
self.tasks[model] = task
| DeletionTaskManager |
python | pytest-dev__pytest | doc/en/example/fixtures/test_fixtures_request_different_scope.py | {
"start": 166,
"end": 341
} | class ____:
@pytest.fixture
def inner(self, order):
order.append("one")
def test_order(self, order, outer):
assert order == ["one", "outer"]
| TestOne |
python | PyCQA__pylint | tests/functional/c/consider/consider_using_enumerate.py | {
"start": 449,
"end": 1807
} | class ____:
def __iter__(self):
iterable = [1, 2, 3]
for i in range(len(iterable)): # [consider-using-enumerate]
yield iterable[i]
def test(self):
for i in range(len(self)): # [consider-using-enumerate]
yield self[i]
def good():
iterable = other_obj = [1, 2, 3]
total = 0
for obj in range(len(iterable)):
total += obj
yield total
yield iterable[obj + 1: 2]
yield iterable[len(obj)]
for obj in iterable:
yield iterable[obj - 1]
for index, obj in enumerate(iterable):
yield iterable[index]
for index in range(0, 10):
yield iterable[index + 1]
for index in range(10):
yield iterable[index]
for index in range(len([1, 2, 3, 4])):
yield index
for index in range(1, len(iterable)):
yield index
for index in range(len(iterable)):
yield [1, 2, 3][index]
yield len([1, 2, 3])
for index in range(len(iterable)):
yield other_obj[index]
# pylint: disable=import-outside-toplevel
from unknown import unknown
for index in range(unknown(iterable)):
yield iterable[index]
for index in range(len(iterable)):
def test(iterable):
return iterable[index] # pylint: disable=cell-var-from-loop
yield test([1, 2, 3])
| Bad |
python | django__django | tests/model_package/tests.py | {
"start": 382,
"end": 2624
} | class ____(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(Article.publications.through._meta.fields[1].name, "article")
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
("article_id", "article_id"),
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, "publication"
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
("publication_id", "publication_id"),
)
self.assertEqual(
Article._meta.get_field("publications").m2m_db_table(),
truncate_name(
"model_package_article_publications", connection.ops.max_name_length()
),
)
self.assertEqual(
Article._meta.get_field("publications").m2m_column_name(), "article_id"
)
self.assertEqual(
Article._meta.get_field("publications").m2m_reverse_name(), "publication_id"
)
| ModelPackageTests |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 100874,
"end": 100965
} | class ____(CheckSignatureCompatibilityBase):
pass
| TestFunctionHelpersSignatureCompatibility |
python | prompt-toolkit__python-prompt-toolkit | examples/prompts/custom-lexer.py | {
"start": 251,
"end": 727
} | class ____(Lexer):
def lex_document(self, document):
colors = sorted(NAMED_COLORS, key=NAMED_COLORS.get)
def get_line(lineno):
return [
(colors[i % len(colors)], c)
for i, c in enumerate(document.lines[lineno])
]
return get_line
def main():
answer = prompt("Give me some input: ", lexer=RainbowLexer())
print(f"You said: {answer}")
if __name__ == "__main__":
main()
| RainbowLexer |
python | pypa__setuptools | setuptools/config/_validate_pyproject/extra_validations.py | {
"start": 691,
"end": 2858
} | class ____(ValidationError):
_DESC = """An included dependency group must exist and must not be cyclic.
"""
__doc__ = _DESC
_URL = "https://peps.python.org/pep-0735/"
def validate_project_dynamic(pyproject: T) -> T:
project_table = pyproject.get("project", {})
dynamic = project_table.get("dynamic", [])
for field in dynamic:
if field in project_table:
raise RedefiningStaticFieldAsDynamic(
message=f"You cannot provide a value for `project.{field}` and "
"list it under `project.dynamic` at the same time",
value={
field: project_table[field],
"...": " # ...",
"dynamic": dynamic,
},
name=f"data.project.{field}",
definition={
"description": cleandoc(RedefiningStaticFieldAsDynamic._DESC),
"see": RedefiningStaticFieldAsDynamic._URL,
},
rule="PEP 621",
)
return pyproject
def validate_include_depenency(pyproject: T) -> T:
dependency_groups = pyproject.get("dependency-groups", {})
for key, value in dependency_groups.items():
for each in value:
if (
isinstance(each, dict)
and (include_group := each.get("include-group"))
and include_group not in dependency_groups
):
raise IncludedDependencyGroupMustExist(
message=f"The included dependency group {include_group} doesn't exist",
value=each,
name=f"data.dependency_groups.{key}",
definition={
"description": cleandoc(IncludedDependencyGroupMustExist._DESC),
"see": IncludedDependencyGroupMustExist._URL,
},
rule="PEP 735",
)
# TODO: check for `include-group` cycles (can be conditional to graphlib)
return pyproject
EXTRA_VALIDATIONS = (validate_project_dynamic, validate_include_depenency)
| IncludedDependencyGroupMustExist |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/inspection.py | {
"start": 2495,
"end": 5051
} | class ____(Protocol[_TCov]):
"""a protocol defining a method that's used when an instance is
passed to inspect().
"""
def _sa_inspect_instance(self) -> _TCov: ...
@overload
def inspect(
subject: Type[_InspectableTypeProtocol[_IN]], raiseerr: bool = True
) -> _IN: ...
@overload
def inspect(
subject: _InspectableProtocol[_IN], raiseerr: bool = True
) -> _IN: ...
@overload
def inspect(subject: Inspectable[_IN], raiseerr: bool = True) -> _IN: ...
@overload
def inspect(subject: Any, raiseerr: Literal[False] = ...) -> Optional[Any]: ...
@overload
def inspect(subject: Any, raiseerr: bool = True) -> Any: ...
def inspect(subject: Any, raiseerr: bool = True) -> Any:
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`_orm.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`_engine.Engine` is passed, an
:class:`_reflection.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars.get(cls, None)
if reg is None:
continue
elif reg is True:
return subject
ret = reg(subject)
if ret is not None:
return ret
else:
reg = ret = None
if raiseerr and (reg is None or ret is None):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" % type_
)
return ret
def _inspects(
*types: Type[Any],
) -> Callable[[_F], _F]:
def decorate(fn_or_cls: _F) -> _F:
for type_ in types:
if type_ in _registrars:
raise AssertionError("Type %s is already registered" % type_)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
_TT = TypeVar("_TT", bound="Type[Any]")
def _self_inspects(cls: _TT) -> _TT:
if cls in _registrars:
raise AssertionError("Type %s is already registered" % cls)
_registrars[cls] = True
return cls
| _InspectableProtocol |
python | tensorflow__tensorflow | tensorflow/python/autograph/utils/tensors_test.py | {
"start": 1058,
"end": 2605
} | class ____(test.TestCase):
def _simple_tensor_array(self):
return tensor_array_ops.TensorArray(dtypes.int32, size=3)
def _simple_tensor_list(self):
return list_ops.empty_tensor_list(
element_shape=constant_op.constant([1]), element_dtype=dtypes.int32)
def _simple_list_of_tensors(self):
return [constant_op.constant(1), constant_op.constant(2)]
def test_is_tensor_array(self):
self.assertTrue(tensors.is_tensor_array(self._simple_tensor_array()))
self.assertFalse(tensors.is_tensor_array(self._simple_tensor_list()))
self.assertFalse(tensors.is_tensor_array(constant_op.constant(1)))
self.assertFalse(tensors.is_tensor_array(self._simple_list_of_tensors()))
self.assertFalse(tensors.is_tensor_array(None))
def test_is_tensor_list(self):
self.assertFalse(tensors.is_tensor_list(self._simple_tensor_array()))
self.assertTrue(tensors.is_tensor_list(self._simple_tensor_list()))
self.assertFalse(tensors.is_tensor_list(constant_op.constant(1)))
self.assertFalse(tensors.is_tensor_list(self._simple_list_of_tensors()))
self.assertFalse(tensors.is_tensor_list(None))
def is_range_tensor(self):
self.assertTrue(tensors.is_range_tensor(math_ops.range(1)))
self.assertTrue(tensors.is_range_tensor(math_ops.range(1, 2)))
self.assertTrue(tensors.is_range_tensor(math_ops.range(1, 2, 3)))
self.assertFalse(tensors.is_range_tensor(None))
self.assertFalse(tensors.is_range_tensor(constant_op.constant(range(1))))
if __name__ == '__main__':
test.main()
| TensorsTest |
python | patrick-kidger__equinox | equinox/internal/_noinline.py | {
"start": 11163,
"end": 15289
} | class ____(Module):
dynamic_index: Int[Array | np.ndarray, ""]
abstract_fn: Callable = field(static=True)
dynamic_fn: Any
@property
def __wrapped__(self):
return self.abstract_fn
def __call__(self, *args, **kwargs):
return filter_primitive_bind(
noinline_p,
self.dynamic_index,
self.abstract_fn,
[_impl_transform],
(self.dynamic_fn, args, kwargs),
)
def noinline(fn: Callable, abstract_fn: Callable | None = None) -> Callable: # pyright: ignore
"""Marks a function as not being inlined into a larger computation.
This can help to reduce compile time at the expense of increased runtime.
`fn` can be any callable `PyTree[Any] -> PyTree[Any]`. In addition `fn`
itself may be any (callable) PyTree; any JAX arrays in `fn`'s PyTree are
also treated as inputs to the noinline'd computation.
`abstract_fn` determines the shapes/dtypes/pytrees of the output. (And must
return results consistent with `fn`.) If `fn` is called as
`fn(*args, **kwargs)`, then `abstract_fn` is called as
```python
eqx.filter_eval_shape(
abstract_fn, eqx.filter(fn, eqx.is_array), *args, **kwargs)
)
```
If not passed then `abstract_fn` is automatically constructed from `fn`.
Specifying it is useful as noinline'd functions sharing the same
`abstract_fn` may be substituted for each other without recompiling the
enclosing computation graph; see the second example below.
!!! Example
```python
@noinline
def f(x, y):
print("Tracing!")
return x + y
@jax.jit
def g(x, y):
a = f(x, jnp.array(1))
b = f(x, jnp.array(2))
c = f(y, jnp.array(1))
d = f(y, jnp.array(2))
return a + b + c + d
g(1, 1)
```
In this example, `Tracing!` is printed twice. Once just for abstract
evaluation (only) to figure out out its return shape in `f`'s
computation graph. (Which is computationally cheap.) And then once
when it is actually compiled as part of its own computation graph.
(Which is computationally expensive.)
Without the `noinline`, we would see `Tracing!` printed four times:
once for each call, and with it being compiled each time. (Every time
being computationally expensive.)
Note how the `1` and the `2` are wrapped in arrays. If they were still
Python scalars then they would be treated as static arguments, which
will lead to recompilations of `f`. (E.g. what if there was an
`if y == 1` command in there?)
!!! Example
```python
def abstract(_, x, y):
return jnp.broadcast_arrays(x, y)[0]
def f(x, y):
print("Compiling f!")
return x + y
def g(x, y):
print("Compiling g!")
return x * y
f = noinline(f, abstract)
g = noinline(g, abstract)
@jax.jit
def call(fn, x, y):
print("Compiling call!")
return fn(x, y)
call(f, 1, 1) # Compiling call! Compiling f!
call(g, 1, 1) # Compiling g!
```
In this example, we see how noinline'd functions with the same
`abstract_fn` may be passed as inputs to the main call graph,
and swapped without needing to recompile the main call graph.
"""
dynamic_fn, static_fn = hashable_partition(fn, is_array)
if abstract_fn is None:
def abstract_fn(__dynamic_fn, *args, **kwargs):
_fn = hashable_combine(__dynamic_fn, static_fn)
return _fn(*args, **kwargs)
try:
dynamic_index = _fn_to_index[static_fn]
except KeyError:
dynamic_index = len(_index_to_fn)
_fn_to_index[static_fn] = dynamic_index
_index_to_fn.append(static_fn)
dynamic_index = np.array(dynamic_index)
noinline_fn = _NoInlineWrapper(dynamic_index, abstract_fn, dynamic_fn)
return module_update_wrapper(noinline_fn)
| _NoInlineWrapper |
python | spack__spack | lib/spack/spack/test/repo.py | {
"start": 12593,
"end": 18053
} | class ____:
def test_creation_from_string(self, mock_test_cache):
repo = spack.repo.RepoPath.from_descriptors(
spack.repo.RepoDescriptors(
{
"builtin_mock": spack.repo.LocalRepoDescriptor(
"builtin_mock", spack.paths.mock_packages_path
)
}
),
cache=mock_test_cache,
)
assert len(repo.repos) == 1
assert repo.by_namespace["builtin_mock"] is repo.repos[0]
def test_get_repo(self, mock_test_cache):
repo = spack.repo.RepoPath.from_descriptors(
spack.repo.RepoDescriptors(
{
"builtin_mock": spack.repo.LocalRepoDescriptor(
"builtin_mock", spack.paths.mock_packages_path
)
}
),
cache=mock_test_cache,
)
# builtin_mock is there
assert repo.get_repo("builtin_mock") is repo.repos[0]
# foo is not there, raise
with pytest.raises(spack.repo.UnknownNamespaceError):
repo.get_repo("foo")
def test_parse_package_api_version():
"""Test that we raise an error if a repository has a version that is not supported."""
# valid version
assert spack.repo._parse_package_api_version(
{"api": "v1.2"}, min_api=(1, 0), max_api=(2, 3)
) == (1, 2)
# too new and too old
with pytest.raises(
spack.repo.BadRepoError,
match=r"Package API v2.4 is not supported .* \(must be between v1.0 and v2.3\)",
):
spack.repo._parse_package_api_version({"api": "v2.4"}, min_api=(1, 0), max_api=(2, 3))
with pytest.raises(
spack.repo.BadRepoError,
match=r"Package API v0.9 is not supported .* \(must be between v1.0 and v2.3\)",
):
spack.repo._parse_package_api_version({"api": "v0.9"}, min_api=(1, 0), max_api=(2, 3))
# default to v1.0 if not specified
assert spack.repo._parse_package_api_version({}, min_api=(1, 0), max_api=(2, 3)) == (1, 0)
# if v1.0 support is dropped we should also raise
with pytest.raises(
spack.repo.BadRepoError,
match=r"Package API v1.0 is not supported .* \(must be between v2.0 and v2.3\)",
):
spack.repo._parse_package_api_version({}, min_api=(2, 0), max_api=(2, 3))
# finally test invalid input
with pytest.raises(spack.repo.BadRepoError, match="Invalid Package API version"):
spack.repo._parse_package_api_version({"api": "v2"}, min_api=(1, 0), max_api=(3, 3))
with pytest.raises(spack.repo.BadRepoError, match="Invalid Package API version"):
spack.repo._parse_package_api_version({"api": 2.0}, min_api=(1, 0), max_api=(3, 3))
def test_repo_package_api_version(tmp_path: pathlib.Path):
"""Test that we can specify the API version of a repository."""
(tmp_path / "example" / "packages").mkdir(parents=True)
(tmp_path / "example" / "repo.yaml").write_text(
"""\
repo:
namespace: example
"""
)
cache = spack.util.file_cache.FileCache(tmp_path / "cache")
assert spack.repo.Repo(str(tmp_path / "example"), cache=cache).package_api == (1, 0)
def test_mod_to_pkg_name_and_reverse():
# In repo v1 the dirname/module name is the package name
assert spack.util.naming.pkg_dir_to_pkg_name("zlib_ng", package_api=(1, 0)) == "zlib_ng"
assert (
spack.util.naming.pkg_dir_to_pkg_name("_3example_4", package_api=(1, 0)) == "_3example_4"
)
assert spack.util.naming.pkg_name_to_pkg_dir("zlib_ng", package_api=(1, 0)) == "zlib_ng"
assert (
spack.util.naming.pkg_name_to_pkg_dir("_3example_4", package_api=(1, 0)) == "_3example_4"
)
# In repo v2 there is a 1-1 mapping between module and package names
assert spack.util.naming.pkg_dir_to_pkg_name("_3example_4", package_api=(2, 0)) == "3example-4"
assert spack.util.naming.pkg_dir_to_pkg_name("zlib_ng", package_api=(2, 0)) == "zlib-ng"
assert spack.util.naming.pkg_name_to_pkg_dir("zlib-ng", package_api=(2, 0)) == "zlib_ng"
assert spack.util.naming.pkg_name_to_pkg_dir("3example-4", package_api=(2, 0)) == "_3example_4"
# reserved names need an underscore
assert spack.util.naming.pkg_dir_to_pkg_name("_finally", package_api=(2, 0)) == "finally"
assert spack.util.naming.pkg_dir_to_pkg_name("_assert", package_api=(2, 0)) == "assert"
assert spack.util.naming.pkg_name_to_pkg_dir("finally", package_api=(2, 0)) == "_finally"
assert spack.util.naming.pkg_name_to_pkg_dir("assert", package_api=(2, 0)) == "_assert"
# reserved names are case sensitive, so true/false/none are ok
assert spack.util.naming.pkg_dir_to_pkg_name("true", package_api=(2, 0)) == "true"
assert spack.util.naming.pkg_dir_to_pkg_name("none", package_api=(2, 0)) == "none"
assert spack.util.naming.pkg_name_to_pkg_dir("true", package_api=(2, 0)) == "true"
assert spack.util.naming.pkg_name_to_pkg_dir("none", package_api=(2, 0)) == "none"
def test_repo_v2_invalid_module_name(tmp_path: pathlib.Path, capsys):
# Create a repo with a v2 structure
root, _ = spack.repo.create_repo(str(tmp_path), namespace="repo_1", package_api=(2, 0))
repo_dir = pathlib.Path(root)
# Create two invalid module names
(repo_dir / "packages" / "zlib-ng").mkdir()
(repo_dir / "packages" / "zlib-ng" / "package.py").write_text(
"""
from spack.package import PackageBase
| TestRepoPath |
python | plotly__plotly.py | plotly/graph_objs/_ohlc.py | {
"start": 215,
"end": 64394
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "ohlc"
_valid_props = {
"close",
"closesrc",
"customdata",
"customdatasrc",
"decreasing",
"high",
"highsrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"increasing",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"low",
"lowsrc",
"meta",
"metasrc",
"name",
"opacity",
"open",
"opensrc",
"selectedpoints",
"showlegend",
"stream",
"text",
"textsrc",
"tickwidth",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xcalendar",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"yaxis",
"yhoverformat",
"zorder",
}
@property
def close(self):
"""
Sets the close values.
The 'close' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["close"]
@close.setter
def close(self, val):
self["close"] = val
@property
def closesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `close`.
The 'closesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["closesrc"]
@closesrc.setter
def closesrc(self, val):
self["closesrc"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Returns
-------
plotly.graph_objs.ohlc.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
@property
def high(self):
"""
Sets the high values.
The 'high' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["high"]
@high.setter
def high(self, val):
self["high"] = val
@property
def highsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `high`.
The 'highsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["highsrc"]
@highsrc.setter
def highsrc(self, val):
self["highsrc"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.ohlc.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `open`,
`high`, `low` and `close`. Anything contained in tag `<extra>`
is displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def increasing(self):
"""
The 'increasing' property is an instance of Increasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Increasing`
- A dict of string/value properties that will be passed
to the Increasing constructor
Returns
-------
plotly.graph_objs.ohlc.Increasing
"""
return self["increasing"]
@increasing.setter
def increasing(self, val):
self["increasing"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.ohlc.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.ohlc.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def low(self):
"""
Sets the low values.
The 'low' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["low"]
@low.setter
def low(self, val):
self["low"] = val
@property
def lowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `low`.
The 'lowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lowsrc"]
@lowsrc.setter
def lowsrc(self, val):
self["lowsrc"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def open(self):
"""
Sets the open values.
The 'open' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["open"]
@open.setter
def open(self, val):
self["open"] = val
@property
def opensrc(self):
"""
Sets the source reference on Chart Studio Cloud for `open`.
The 'opensrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opensrc"]
@opensrc.setter
def opensrc(self, val):
self["opensrc"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.ohlc.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets hover text elements associated with each sample point. If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
this trace's sample points.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def tickwidth(self):
"""
Sets the width of the open/close tick marks relative to the "x"
minimal interval.
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, 0.5]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def x(self):
"""
Sets the x coordinates. If absent, linear coordinate will be
generated.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `open`, `high`, `low` and `close`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.ohlc.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
close=None,
closesrc=None,
customdata=None,
customdatasrc=None,
decreasing=None,
high=None,
highsrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
increasing=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
low=None,
lowsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
open=None,
opensrc=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textsrc=None,
tickwidth=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xcalendar=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
yaxis=None,
yhoverformat=None,
zorder=None,
**kwargs,
):
"""
Construct a new Ohlc object
The ohlc (short for Open-High-Low-Close) is a style of
financial chart describing open, high, low and close for a
given `x` coordinate (most likely time). The tip of the lines
represent the `low` and `high` values and the horizontal
segments represent the `open` and `close` values. Sample points
where the close value is higher (lower) then the open value are
called increasing (decreasing). By default, increasing items
are drawn in green whereas decreasing are drawn in red.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Ohlc`
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `open`, `high`, `low` and `close`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.ohlc.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Ohlc
"""
super().__init__("ohlc")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Ohlc
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Ohlc`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("close", arg, close)
self._set_property("closesrc", arg, closesrc)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("decreasing", arg, decreasing)
self._set_property("high", arg, high)
self._set_property("highsrc", arg, highsrc)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("increasing", arg, increasing)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("low", arg, low)
self._set_property("lowsrc", arg, lowsrc)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("open", arg, open)
self._set_property("opensrc", arg, opensrc)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._set_property("x", arg, x)
self._set_property("xaxis", arg, xaxis)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xperiod", arg, xperiod)
self._set_property("xperiod0", arg, xperiod0)
self._set_property("xperiodalignment", arg, xperiodalignment)
self._set_property("xsrc", arg, xsrc)
self._set_property("yaxis", arg, yaxis)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("zorder", arg, zorder)
self._props["type"] = "ohlc"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Ohlc |
python | python__mypy | mypyc/rt_subtype.py | {
"start": 1037,
"end": 2448
} | class ____(RTypeVisitor[bool]):
"""Is left a runtime subtype of right?
A few special cases such as right being 'object' are handled in
is_runtime_subtype and don't need to be covered here.
"""
def __init__(self, right: RType) -> None:
self.right = right
def visit_rinstance(self, left: RInstance) -> bool:
return is_subtype(left, self.right)
def visit_runion(self, left: RUnion) -> bool:
return not self.right.is_unboxed and is_subtype(left, self.right)
def visit_rprimitive(self, left: RPrimitive) -> bool:
if is_short_int_rprimitive(left) and is_int_rprimitive(self.right):
return True
if is_bit_rprimitive(left) and is_bool_rprimitive(self.right):
return True
return left is self.right
def visit_rtuple(self, left: RTuple) -> bool:
if isinstance(self.right, RTuple):
return len(self.right.types) == len(left.types) and all(
is_runtime_subtype(t1, t2) for t1, t2 in zip(left.types, self.right.types)
)
return False
def visit_rstruct(self, left: RStruct) -> bool:
return isinstance(self.right, RStruct) and self.right.name == left.name
def visit_rarray(self, left: RArray) -> bool:
return left == self.right
def visit_rvoid(self, left: RVoid) -> bool:
return isinstance(self.right, RVoid)
| RTSubtypeVisitor |
python | sqlalchemy__sqlalchemy | test/ext/test_hybrid.py | {
"start": 22879,
"end": 24023
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, assignable):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
_value = Column("value", String)
@hybrid.hybrid_property
def value(self):
return self._value + "18"
if assignable:
@value.setter
def value(self, v):
self._value = v + "5"
return A
def test_nonassignable(self):
A = self._fixture(False)
a1 = A(_value=5)
assert_raises_message(
AttributeError, "can't set attribute", setattr, a1, "value", 10
)
def test_nondeletable(self):
A = self._fixture(False)
a1 = A(_value=5)
assert_raises_message(
AttributeError, "can't delete attribute", delattr, a1, "value"
)
def test_set_get(self):
A = self._fixture(True)
a1 = A(value="5")
eq_(a1.value, "5518")
eq_(a1._value, "55")
| PropertyValueTest |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/sources.py | {
"start": 829,
"end": 1409
} | class ____:
@property
def link(self) -> Optional[Link]:
"""Returns the underlying link, if there's one."""
raise NotImplementedError()
def page_candidates(self) -> FoundCandidates:
"""Candidates found by parsing an archive listing HTML file."""
raise NotImplementedError()
def file_links(self) -> FoundLinks:
"""Links found by specifying archives directly."""
raise NotImplementedError()
def _is_html_file(file_url: str) -> bool:
return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
| LinkSource |
python | huggingface__transformers | tests/models/dia/test_processing_dia.py | {
"start": 1291,
"end": 11393
} | class ____(unittest.TestCase):
def setUp(self):
self.checkpoint = "AntonV/Dia-1.6B"
self.audio_tokenizer_checkpoint = "descript/dac_44khz"
self.tmpdirname = tempfile.mkdtemp()
# Audio tokenizer is a bigger model so we will reuse this if possible
self.processor = DiaProcessor(
tokenizer=self.get_tokenizer(),
feature_extractor=self.get_feature_extractor(),
audio_tokenizer=self.get_audio_tokenizer(),
)
# Default audio values based on Dia and Dac
self.pad_id = 1025
self.bos_id = 1026
self.dac_chunk_len = 512
self.delay_pattern = [0, 8, 9, 10, 11, 12, 13, 14, 15]
def get_tokenizer(self, **kwargs):
return DiaTokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return DiaFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def get_audio_tokenizer(self, **kwargs):
return DacModel.from_pretrained(self.audio_tokenizer_checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
del self.processor
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
audio_tokenizer = self.get_audio_tokenizer()
processor = DiaProcessor(
tokenizer=tokenizer, feature_extractor=feature_extractor, audio_tokenizer=audio_tokenizer
)
processor.save_pretrained(self.tmpdirname)
processor = DiaProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, DiaTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, DiaFeatureExtractor)
self.assertEqual(processor.audio_tokenizer.__class__.__name__, audio_tokenizer.__class__.__name__)
self.assertEqual(processor.audio_tokenizer.name_or_path, audio_tokenizer.name_or_path)
self.assertTrue(check_models_equal(processor.audio_tokenizer, audio_tokenizer))
self.assertIsInstance(processor.audio_tokenizer, DacModel)
def test_save_load_pretrained_additional_features(self):
processor = DiaProcessor(
tokenizer=self.get_tokenizer(),
feature_extractor=self.get_feature_extractor(),
audio_tokenizer=self.get_audio_tokenizer(),
)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer()
feature_extractor_add_kwargs = self.get_feature_extractor()
audio_tokenizer_add_kwargs = self.get_audio_tokenizer()
processor = DiaProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, DiaTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, DiaFeatureExtractor)
self.assertEqual(processor.audio_tokenizer.__class__.__name__, audio_tokenizer_add_kwargs.__class__.__name__)
self.assertEqual(processor.audio_tokenizer.name_or_path, audio_tokenizer_add_kwargs.name_or_path)
self.assertTrue(check_models_equal(processor.audio_tokenizer, audio_tokenizer_add_kwargs))
self.assertIsInstance(processor.audio_tokenizer, DacModel)
def test_tokenize(self):
tokenizer = self.get_tokenizer()
random_text = ["This is a processing test for tokenization", "[S1] Dia template style [S2] Nice"]
input_tokenizer = tokenizer(random_text, padding=True, return_tensors="pt")
input_processor = self.processor(random_text)
for key in input_tokenizer:
self.assertTrue((input_tokenizer[key] == input_processor[key]).all())
def test_no_audio(self):
random_text = ["Dummy Input"] * 2
input_processor = self.processor(random_text)
audio_tokens, audio_mask = input_processor["decoder_input_ids"], input_processor["decoder_attention_mask"]
# full mask with +1 for bos
self.assertTrue(audio_mask.sum() == (max(self.delay_pattern) + 1) * len(random_text))
self.assertTrue(
audio_tokens.shape
== (
len(random_text),
max(self.delay_pattern) + 1,
len(self.delay_pattern),
)
)
for channel_idx, delay in enumerate(self.delay_pattern):
expected_sequence = torch.ones(size=(audio_tokens.shape[:-1])) * self.pad_id
expected_sequence[:, : delay + 1] = self.bos_id
self.assertTrue((audio_tokens[..., channel_idx] == expected_sequence).all())
def test_audio(self):
audio_tokenizer = self.get_audio_tokenizer()
feature_extractor = self.get_feature_extractor()
random_text = ["Dummy Input"] * 2
# Dac only starts accepting audio from a certain length (ensured via >=1024)
raw_speeches = [np.random.rand(2048).astype(np.float32), np.random.rand(1024).astype(np.float32)]
input_processor = self.processor(random_text, raw_speeches)
audio_tokens, audio_mask = input_processor["decoder_input_ids"], input_processor["decoder_attention_mask"]
sequence_len = audio_mask.shape[1]
for batch_idx, speech in enumerate(raw_speeches):
raw_audio = feature_extractor(speech, return_tensors="pt")["input_values"]
codebooks = audio_tokenizer(raw_audio).audio_codes.transpose(1, 2)
pad_len = sequence_len - audio_mask.sum(dim=-1)[batch_idx]
for channel_idx, delay in enumerate(self.delay_pattern):
# Left padding filled bos, right padding (delay) are pad
start_idx = pad_len + delay + 1
end_idx = start_idx + codebooks.shape[1]
encoded_sequence = audio_tokens[batch_idx, :, channel_idx]
expected_sequence = torch.ones(size=(sequence_len,)) * self.pad_id
expected_sequence[:start_idx] = self.bos_id
expected_sequence[start_idx:end_idx] = codebooks[0, :, channel_idx]
self.assertTrue((encoded_sequence == expected_sequence).all())
# Just to make sure the masking correctly only ignores bos tokens
self.assertTrue((audio_tokens[~audio_mask.bool()] == self.bos_id).all())
@parameterized.expand([([1, 1],), ([1, 5],), ([2, 4, 6],)])
def test_decode_audio(self, audio_lens):
feature_extractor = self.get_feature_extractor()
audio_tokenizer = self.get_audio_tokenizer()
random_text = ["Dummy Input"] * len(audio_lens)
raw_speeches = [np.random.rand(self.dac_chunk_len * l).astype(np.float32) for l in audio_lens]
# we need eos (given if training) to decode properly, also enforced via custom logits processor
input_processor = self.processor(random_text, raw_speeches, generation=False)
audio_tokens = input_processor["decoder_input_ids"]
decoded_speeches = self.processor.batch_decode(audio_tokens)
for batch_idx, speech in enumerate(raw_speeches):
raw_audio = feature_extractor(speech, return_tensors="pt")["input_values"]
codebooks = audio_tokenizer(raw_audio).audio_codes
decoded_audio = decoded_speeches[batch_idx]
expected_audio = audio_tokenizer.decode(audio_codes=codebooks).audio_values
self.assertTrue((expected_audio == decoded_audio).all())
self.assertTrue(decoded_speeches[batch_idx].shape[-1] == audio_lens[batch_idx] * self.dac_chunk_len)
@parameterized.expand([(1, 2, [0, 1, 4]), (2, 4, [1, 3, 2]), (4, 8, [0, 5, 7])])
def test_delay_in_audio(self, bsz, seq_len, delay_pattern):
# static functions which are crucial, hence we also test them here
build_indices_fn = DiaProcessor.build_indices
delay_fn = DiaProcessor.apply_audio_delay
bos, pad = -2, -1
num_channels = len(delay_pattern)
audio_input = torch.arange(bsz * seq_len * num_channels).view(bsz, seq_len, num_channels)
# imitate a delay mask with zeroes
audio_input = torch.cat([audio_input, torch.zeros(size=(bsz, max(delay_pattern), num_channels))], dim=1)
precomputed_idx = build_indices_fn(
bsz=bsz,
seq_len=seq_len + max(delay_pattern),
num_channels=num_channels,
delay_pattern=delay_pattern,
revert=False,
)
delayed_audio_out = delay_fn(
audio=audio_input,
pad_token_id=pad,
bos_token_id=bos,
precomputed_idx=precomputed_idx,
)
# every channel idx is shifted by delay_pattern[idx]
delayed_audio_res = audio_input.clone()
for idx, delay in enumerate(delay_pattern):
delayed_audio_res[:, :delay, idx] = bos
remaining_input = seq_len + max(delay_pattern) - delay
delayed_audio_res[:, delay:, idx] = audio_input[:, :remaining_input, idx]
self.assertTrue((delayed_audio_out == delayed_audio_res).all())
# we should get back to the original audio we had (when removing the delay pad)
bsz, new_seq_len, num_channels = delayed_audio_out.shape
precomputed_idx = build_indices_fn(
bsz=bsz,
seq_len=new_seq_len,
num_channels=num_channels,
delay_pattern=delay_pattern,
revert=True,
)
reverted_audio_out = delay_fn(
audio=delayed_audio_out,
pad_token_id=pad,
bos_token_id=bos,
precomputed_idx=precomputed_idx,
)
reverted_audio_res = audio_input.clone()[:, :seq_len]
self.assertTrue((reverted_audio_out[:, :seq_len] == reverted_audio_res).all())
| DiaProcessorTest |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_variance_threshold.py | {
"start": 431,
"end": 4688
} | class ____(SelectorMixin, BaseEstimator):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, default=0
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SelectFromModel: Meta-transformer for selecting features based on
importance weights.
SelectPercentile : Select features according to a percentile of the highest
scores.
SequentialFeatureSelector : Transformer that performs Sequential Feature
Selection.
Notes
-----
Allows NaN in the input.
Raises ValueError if no feature in X meets the variance threshold.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0, None, closed="left")]
}
def __init__(self, threshold=0.0):
self.threshold = threshold
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self)
return self.variances_ > self.threshold
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
tags.input_tags.sparse = True
return tags
| VarianceThreshold |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 24448,
"end": 24598
} | class ____(Interface):
"""*internal only* interface used as in a utility lookup to find
route-specific interfaces. Not an API."""
| IRouteRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.