language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__warehouse | tests/unit/admin/views/test_prohibited_email_domains.py | {
"start": 2106,
"end": 5278
} | class ____:
def test_no_email_domain(self, db_request):
db_request.method = "POST"
db_request.route_path = lambda a: "/admin/prohibited_email_domains/add/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {}
with pytest.raises(HTTPSeeOther):
views.add_prohibited_email_domain(db_request)
assert db_request.session.flash.calls == [
pretend.call("Email domain is required.", queue="error")
]
def test_invalid_domain(self, db_request):
db_request.method = "POST"
db_request.route_path = lambda a: "/admin/prohibited_email_domains/add/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {"email_domain": "invalid"}
with pytest.raises(HTTPSeeOther):
views.add_prohibited_email_domain(db_request)
assert db_request.session.flash.calls == [
pretend.call("Invalid domain name 'invalid'", queue="error")
]
def test_duplicate_domain(self, db_request):
existing_domain = ProhibitedEmailDomainFactory.create()
db_request.method = "POST"
db_request.route_path = lambda a: "/admin/prohibited_email_domains/add/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {"email_domain": existing_domain.domain}
with pytest.raises(HTTPSeeOther):
views.add_prohibited_email_domain(db_request)
assert db_request.session.flash.calls == [
pretend.call(
f"Email domain '{existing_domain.domain}' already exists.",
queue="error",
)
]
@pytest.mark.parametrize(
("input_domain", "expected_domain"),
[
("example.com", "example.com"),
("mail.example.co.uk", "example.co.uk"),
("https://example.com/", "example.com"),
],
)
def test_success(self, db_request, input_domain, expected_domain):
db_request.method = "POST"
db_request.route_path = lambda a: "/admin/prohibited_email_domains/list/"
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
db_request.POST = {
"email_domain": input_domain,
"is_mx_record": "on",
"comment": "testing",
}
response = views.add_prohibited_email_domain(db_request)
assert response.status_code == 303
assert response.headers["Location"] == "/admin/prohibited_email_domains/list/"
assert db_request.session.flash.calls == [
pretend.call("Prohibited email domain added.", queue="success")
]
query = db_request.db.query(ProhibitedEmailDomain).filter(
ProhibitedEmailDomain.domain == expected_domain
)
assert query.count() == 1
assert query.one().is_mx_record
assert query.one().comment == "testing"
| TestProhibitedEmailDomainsAdd |
python | PrefectHQ__prefect | src/prefect/events/schemas/events.py | {
"start": 6997,
"end": 9370
} | class ____(RootModel[Dict[str, Union[str, List[str]]]]):
def matches_every_resource(self) -> bool:
return len(self.root) == 0
def matches_every_resource_of_kind(self, prefix: str) -> bool:
if self.matches_every_resource():
return True
if len(self.root) == 1:
resource_id = self.root.get("prefect.resource.id")
if resource_id:
values = [resource_id] if isinstance(resource_id, str) else resource_id
return any(value == f"{prefix}.*" for value in values)
return False
def includes(self, candidates: Iterable[Resource]) -> bool:
if self.matches_every_resource():
return True
for candidate in candidates:
if self.matches(candidate):
return True
return False
def matches(self, resource: Resource) -> bool:
for label, expected in self.items():
value = resource.get(label)
if not any(matches(candidate, value) for candidate in expected):
return False
return True
def items(self) -> Iterable[Tuple[str, List[str]]]:
return [
(label, [value] if isinstance(value, str) else value)
for label, value in self.root.items()
]
def __contains__(self, key: str) -> bool:
return key in self.root
def __getitem__(self, key: str) -> List[str]:
value = self.root[key]
if not value:
return []
if not isinstance(value, list):
value = [value]
return value
def pop(
self, key: str, default: Optional[Union[str, List[str]]] = None
) -> Optional[List[str]]:
value = self.root.pop(key, default)
if not value:
return []
if not isinstance(value, list):
value = [value]
return value
def get(
self, key: str, default: Optional[Union[str, List[str]]] = None
) -> Optional[List[str]]:
value = self.root.get(key, default)
if not value:
return []
if not isinstance(value, list):
value = [value]
return value
def __len__(self) -> int:
return len(self.root)
def deepcopy(self) -> "ResourceSpecification":
return ResourceSpecification(root=copy.deepcopy(self.root))
| ResourceSpecification |
python | kamyu104__LeetCode-Solutions | Python/construct-string-from-binary-tree.py | {
"start": 29,
"end": 372
} | class ____(object):
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if not t: return ""
s = str(t.val)
if t.left or t.right:
s += "(" + self.tree2str(t.left) + ")"
if t.right:
s += "(" + self.tree2str(t.right) + ")"
return s
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_sketches.py | {
"start": 276,
"end": 4010
} | class ____:
"""
Testing the Clarkson Woodruff Transform
"""
# set seed for generating test matrices
rng = np.random.default_rng(1179103485)
# Test matrix parameters
n_rows = 2000
n_cols = 100
density = 0.1
# Sketch matrix dimensions
n_sketch_rows = 200
# Seeds to test with
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
A_dense = rng.random((n_rows, n_cols))
A_csc = rand(
n_rows, n_cols, density=density, format='csc', random_state=rng,
)
A_csr = rand(
n_rows, n_cols, density=density, format='csr', random_state=rng,
)
A_coo = rand(
n_rows, n_cols, density=density, format='coo', random_state=rng,
)
# Collect the test matrices
test_matrices = [
A_dense, A_csc, A_csr, A_coo,
]
# Test vector with norm ~1
x = rng.random((n_rows, 1)) / np.sqrt(n_rows)
del rng # Not deterministic in pytest-run-parallel
def test_sketch_dimensions(self):
for A in self.test_matrices:
for seed in self.seeds:
# seed to ensure backwards compatibility post SPEC7
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
def test_seed_returns_identical_transform_matrix(self):
for seed in self.seeds:
S1 = cwt_matrix(
self.n_sketch_rows, self.n_rows, rng=seed
).toarray()
S2 = cwt_matrix(
self.n_sketch_rows, self.n_rows, rng=seed
).toarray()
assert_equal(S1, S2)
def test_seed_returns_identically(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch1 = clarkson_woodruff_transform(
A, self.n_sketch_rows, rng=seed
)
sketch2 = clarkson_woodruff_transform(
A, self.n_sketch_rows, rng=seed
)
if issparse(sketch1):
sketch1 = sketch1.toarray()
if issparse(sketch2):
sketch2 = sketch2.toarray()
assert_equal(sketch1, sketch2)
def test_sketch_preserves_frobenius_norm(self):
# Given the probabilistic nature of the sketches
# we run the test multiple times and check that
# we pass all/almost all the tries.
n_errors = 0
for A in self.test_matrices:
if issparse(A):
true_norm = norm(A)
else:
true_norm = np.linalg.norm(A)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, rng=seed,
)
if issparse(sketch):
sketch_norm = norm(sketch)
else:
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
n_errors += 1
assert_(n_errors == 0)
def test_sketch_preserves_vector_norm(self):
n_errors = 0
n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
true_norm = np.linalg.norm(self.x)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
self.x, n_sketch_rows, rng=seed,
)
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
n_errors += 1
assert_(n_errors == 0)
| TestClarksonWoodruffTransform |
python | pallets__werkzeug | examples/i18nurls/application.py | {
"start": 1066,
"end": 1107
} | class ____(BaseResponse):
pass
| Response |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0104_alter_httpheader_value.py | {
"start": 148,
"end": 505
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0103_alter_emailhook_project_alter_webhook_project"),
]
operations = [
migrations.AlterField(
model_name="httpheader",
name="value",
field=models.CharField(max_length=4096),
),
]
| Migration |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 278130,
"end": 281352
} | class ____(Request):
"""
Mark a task status as in_progress. Optionally allows to set the task's execution progress.
:param force: If not true, call fails if the task status is not 'not_started'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "started"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'not_started'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(StartedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| StartedRequest |
python | chroma-core__chroma | chromadb/api/collection_configuration.py | {
"start": 7934,
"end": 16237
} | class ____(TypedDict, total=False):
hnsw: Optional[CreateHNSWConfiguration]
spann: Optional[CreateSpannConfiguration]
embedding_function: Optional[EmbeddingFunction] # type: ignore
def create_collection_configuration_from_legacy_collection_metadata(
metadata: CollectionMetadata,
) -> CreateCollectionConfiguration:
"""Create a CreateCollectionConfiguration from legacy collection metadata"""
return create_collection_configuration_from_legacy_metadata_dict(metadata)
def create_collection_configuration_from_legacy_metadata_dict(
metadata: Dict[str, Any],
) -> CreateCollectionConfiguration:
"""Create a CreateCollectionConfiguration from legacy collection metadata"""
old_to_new = {
"hnsw:space": "space",
"hnsw:construction_ef": "ef_construction",
"hnsw:M": "max_neighbors",
"hnsw:search_ef": "ef_search",
"hnsw:num_threads": "num_threads",
"hnsw:batch_size": "batch_size",
"hnsw:sync_threshold": "sync_threshold",
"hnsw:resize_factor": "resize_factor",
}
json_map = {}
for name, value in metadata.items():
if name in old_to_new:
json_map[old_to_new[name]] = value
hnsw_config = json_to_create_hnsw_configuration(json_map)
hnsw_config = populate_create_hnsw_defaults(hnsw_config)
return CreateCollectionConfiguration(hnsw=hnsw_config)
# TODO: make warnings prettier and add link to migration docs
def load_create_collection_configuration_from_json(
json_map: Dict[str, Any]
) -> CreateCollectionConfiguration:
if json_map.get("hnsw") is not None and json_map.get("spann") is not None:
raise ValueError("hnsw and spann cannot both be provided")
result = CreateCollectionConfiguration()
# Handle vector index configuration
if json_map.get("hnsw") is not None:
result["hnsw"] = json_to_create_hnsw_configuration(json_map["hnsw"])
if json_map.get("spann") is not None:
result["spann"] = json_to_create_spann_configuration(json_map["spann"])
# Handle embedding function configuration
if json_map.get("embedding_function") is not None:
ef_config = json_map["embedding_function"]
if ef_config["type"] == "legacy":
warnings.warn(
"legacy embedding function config",
DeprecationWarning,
stacklevel=2,
)
else:
ef = known_embedding_functions[ef_config["name"]]
result["embedding_function"] = ef.build_from_config(ef_config["config"])
return result
def create_collection_configuration_to_json_str(
config: CreateCollectionConfiguration,
metadata: Optional[CollectionMetadata] = None,
) -> str:
"""Convert a CreateCollection configuration to a JSON-serializable string"""
return json.dumps(create_collection_configuration_to_json(config, metadata))
# TODO: make warnings prettier and add link to migration docs
def create_collection_configuration_to_json(
config: CreateCollectionConfiguration,
metadata: Optional[CollectionMetadata] = None,
) -> Dict[str, Any]:
"""Convert a CreateCollection configuration to a JSON-serializable dict"""
ef_config: Dict[str, Any] | None = None
hnsw_config = config.get("hnsw")
spann_config = config.get("spann")
if hnsw_config is not None:
try:
hnsw_config = cast(CreateHNSWConfiguration, hnsw_config)
except Exception as e:
raise ValueError(f"not a valid hnsw config: {e}")
if spann_config is not None:
try:
spann_config = cast(CreateSpannConfiguration, spann_config)
except Exception as e:
raise ValueError(f"not a valid spann config: {e}")
if hnsw_config is not None and spann_config is not None:
raise ValueError("hnsw and spann cannot both be provided")
if config.get("embedding_function") is None:
ef = None
ef_config = {"type": "legacy"}
return {
"hnsw": hnsw_config,
"spann": spann_config,
"embedding_function": ef_config,
}
try:
ef = cast(EmbeddingFunction, config.get("embedding_function")) # type: ignore
if ef.is_legacy():
ef_config = {"type": "legacy"}
else:
# default space logic: if neither hnsw nor spann config is provided and metadata doesn't have space,
# then populate space from ef
# otherwise dont use default space from ef
# then validate the space afterwards based on the supported spaces of the embedding function,
# warn if space is not supported
if hnsw_config is None and spann_config is None:
if metadata is None or metadata.get("hnsw:space") is None:
# this populates space from ef if not provided in either config
hnsw_config = CreateHNSWConfiguration(space=ef.default_space())
# if hnsw config or spann config exists but space is not provided, populate it from ef
if hnsw_config is not None and hnsw_config.get("space") is None:
hnsw_config["space"] = ef.default_space()
if spann_config is not None and spann_config.get("space") is None:
spann_config["space"] = ef.default_space()
# Validate space compatibility with embedding function
if hnsw_config is not None:
if hnsw_config.get("space") not in ef.supported_spaces():
warnings.warn(
f"space {hnsw_config.get('space')} is not supported by {ef.name()}. Supported spaces: {ef.supported_spaces()}",
UserWarning,
stacklevel=2,
)
if spann_config is not None:
if spann_config.get("space") not in ef.supported_spaces():
warnings.warn(
f"space {spann_config.get('space')} is not supported by {ef.name()}. Supported spaces: {ef.supported_spaces()}",
UserWarning,
stacklevel=2,
)
# only validate space from metadata if config is not provided
if (
hnsw_config is None
and spann_config is None
and metadata is not None
and metadata.get("hnsw:space") is not None
):
if metadata.get("hnsw:space") not in ef.supported_spaces():
warnings.warn(
f"space {metadata.get('hnsw:space')} is not supported by {ef.name()}. Supported spaces: {ef.supported_spaces()}",
UserWarning,
stacklevel=2,
)
ef_config = {
"name": ef.name(),
"type": "known",
"config": ef.get_config(),
}
register_embedding_function(type(ef)) # type: ignore
except Exception as e:
warnings.warn(
f"legacy embedding function config: {e}",
DeprecationWarning,
stacklevel=2,
)
ef = None
ef_config = {"type": "legacy"}
return {
"hnsw": hnsw_config,
"spann": spann_config,
"embedding_function": ef_config,
}
def populate_create_hnsw_defaults(
config: CreateHNSWConfiguration, ef: Optional[EmbeddingFunction] = None # type: ignore
) -> CreateHNSWConfiguration:
"""Populate a CreateHNSW configuration with default values"""
if config.get("space") is None:
config["space"] = ef.default_space() if ef else "l2"
if config.get("ef_construction") is None:
config["ef_construction"] = 100
if config.get("max_neighbors") is None:
config["max_neighbors"] = 16
if config.get("ef_search") is None:
config["ef_search"] = 100
if config.get("num_threads") is None:
config["num_threads"] = cpu_count()
if config.get("batch_size") is None:
config["batch_size"] = 100
if config.get("sync_threshold") is None:
config["sync_threshold"] = 1000
if config.get("resize_factor") is None:
config["resize_factor"] = 1.2
return config
| CreateCollectionConfiguration |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_audio.py | {
"start": 16308,
"end": 16823
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.output_hidden_size,
2 * config.output_hidden_size,
config.adapter_kernel_size,
stride=config.adapter_stride,
padding=1,
)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
return hidden_states
| Data2VecAudioAdapterLayer |
python | PyCQA__bandit | tests/unit/cli/test_main.py | {
"start": 903,
"end": 1838
} | class ____(testtools.TestCase):
def setUp(self):
super().setUp()
self.logger = logging.getLogger()
self.original_logger_handlers = self.logger.handlers
self.original_logger_level = self.logger.level
self.logger.handlers = []
def tearDown(self):
super().tearDown()
self.logger.handlers = self.original_logger_handlers
self.logger.level = self.original_logger_level
def test_init_logger(self):
# Test that a logger was properly initialized
bandit._init_logger()
self.assertIsNotNone(self.logger)
self.assertNotEqual(self.logger.handlers, [])
self.assertEqual(logging.INFO, self.logger.level)
def test_init_logger_debug_mode(self):
# Test that the logger's level was set at 'DEBUG'
bandit._init_logger(logging.DEBUG)
self.assertEqual(logging.DEBUG, self.logger.level)
| BanditCLIMainLoggerTests |
python | jina-ai__jina | tests/unit/jaml/parsers/executors/test_legacy.py | {
"start": 333,
"end": 390
} | class ____:
def __init__(self, c):
self.c = c
| C |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass7.py | {
"start": 184,
"end": 307
} | class ____(Protocol):
rgb: Tuple[int, int, int]
@abstractmethod
def intensity(self) -> int:
return 0
| RGB |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1368668,
"end": 1370450
} | class ____(sgqlc.types.Type, RepositoryNode, Node):
"""Represents a commit comment thread part of a pull request."""
__schema__ = github_schema
__field_names__ = ("comments", "commit", "path", "position", "pull_request")
comments = sgqlc.types.Field(
sgqlc.types.non_null(CommitCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The comments that exist in this thread.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
commit = sgqlc.types.Field(sgqlc.types.non_null(Commit), graphql_name="commit")
"""The commit the comments were made on."""
path = sgqlc.types.Field(String, graphql_name="path")
"""The file the comments were made on."""
position = sgqlc.types.Field(Int, graphql_name="position")
"""The position in the diff for the commit that the comment was made
on.
"""
pull_request = sgqlc.types.Field(sgqlc.types.non_null(PullRequest), graphql_name="pullRequest")
"""The pull request this commit comment thread belongs to"""
| PullRequestCommitCommentThread |
python | plotly__plotly.py | plotly/graph_objs/layout/scene/zaxis/_autorangeoptions.py | {
"start": 235,
"end": 5894
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.zaxis"
_path_str = "layout.scene.zaxis.autorangeoptions"
_valid_props = {
"clipmax",
"clipmin",
"include",
"includesrc",
"maxallowed",
"minallowed",
}
@property
def clipmax(self):
"""
Clip autorange maximum if it goes beyond this value. Has no
effect when `autorangeoptions.maxallowed` is provided.
The 'clipmax' property accepts values of any type
Returns
-------
Any
"""
return self["clipmax"]
@clipmax.setter
def clipmax(self, val):
self["clipmax"] = val
@property
def clipmin(self):
"""
Clip autorange minimum if it goes beyond this value. Has no
effect when `autorangeoptions.minallowed` is provided.
The 'clipmin' property accepts values of any type
Returns
-------
Any
"""
return self["clipmin"]
@clipmin.setter
def clipmin(self, val):
self["clipmin"] = val
@property
def include(self):
"""
Ensure this value is included in autorange.
The 'include' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["include"]
@include.setter
def include(self, val):
self["include"] = val
@property
def includesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `include`.
The 'includesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["includesrc"]
@includesrc.setter
def includesrc(self, val):
self["includesrc"] = val
@property
def maxallowed(self):
"""
Use this value exactly as autorange maximum.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Use this value exactly as autorange minimum.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def _prop_descriptions(self):
return """\
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
"""
def __init__(
self,
arg=None,
clipmax=None,
clipmin=None,
include=None,
includesrc=None,
maxallowed=None,
minallowed=None,
**kwargs,
):
"""
Construct a new Autorangeoptions object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.z
axis.Autorangeoptions`
clipmax
Clip autorange maximum if it goes beyond this value.
Has no effect when `autorangeoptions.maxallowed` is
provided.
clipmin
Clip autorange minimum if it goes beyond this value.
Has no effect when `autorangeoptions.minallowed` is
provided.
include
Ensure this value is included in autorange.
includesrc
Sets the source reference on Chart Studio Cloud for
`include`.
maxallowed
Use this value exactly as autorange maximum.
minallowed
Use this value exactly as autorange minimum.
Returns
-------
Autorangeoptions
"""
super().__init__("autorangeoptions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.zaxis.Autorangeoptions
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.zaxis.Autorangeoptions`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("clipmax", arg, clipmax)
self._set_property("clipmin", arg, clipmin)
self._set_property("include", arg, include)
self._set_property("includesrc", arg, includesrc)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Autorangeoptions |
python | getsentry__sentry | tests/sentry/api/bases/test_project.py | {
"start": 21913,
"end": 24242
} | class ____(ProjectPermissionBase):
def setUp(self) -> None:
super().setUp()
self.permission_cls = ProjectAndStaffPermission
def test_member_without_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member")
# if `allow_joinleave` is True, members should be able to GET a project even if it has no teams
assert self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_superuser(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
assert self.has_object_perm("GET", self.project, user=superuser, is_superuser=True)
assert self.has_object_perm("POST", self.project, user=superuser, is_superuser=True)
assert self.has_object_perm("PUT", self.project, user=superuser, is_superuser=True)
assert self.has_object_perm("DELETE", self.project, user=superuser, is_superuser=True)
def test_staff(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
assert self.has_object_perm("GET", self.project, user=staff_user, is_staff=True)
assert self.has_object_perm("POST", self.project, user=staff_user, is_staff=True)
assert self.has_object_perm("PUT", self.project, user=staff_user, is_staff=True)
assert self.has_object_perm("DELETE", self.project, user=staff_user, is_staff=True)
def test_staff_passes_2FA(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
request = self.make_request(user=serialize_rpc_user(staff_user), is_staff=True)
drf_request = drf_request_from_request(request)
permission = self.permission_cls()
self.organization.flags.require_2fa = True
self.organization.save()
assert not permission.is_not_2fa_compliant(
request=drf_request, organization=self.organization
)
| ProjectAndStaffPermissionTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N805.py | {
"start": 862,
"end": 1032
} | class ____:
def good_method_pos_only(self, blah, /, something: str):
pass
def bad_method_pos_only(this, blah, /, something: str):
pass
| PosOnlyClass |
python | eventlet__eventlet | eventlet/queue.py | {
"start": 17622,
"end": 18082
} | class ____(Queue):
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: ``(priority number, data)``.
'''
def _init(self, maxsize):
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
self._put_bookkeeping()
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
| PriorityQueue |
python | huggingface__transformers | src/transformers/trainer_utils.py | {
"start": 6310,
"end": 6512
} | class ____(NamedTuple):
predictions: np.ndarray | tuple[np.ndarray]
label_ids: np.ndarray | tuple[np.ndarray] | None
metrics: dict[str, float] | None
num_samples: int | None
| EvalLoopOutput |
python | getsentry__sentry | src/sentry/models/releaseprojectenvironment.py | {
"start": 3580,
"end": 4099
} | class ____(TypedDict):
stage: ReleaseStages
adopted: datetime | None
unadopted: datetime | None
def adoption_stage(adopted: datetime | None, unadopted: datetime | None) -> AdoptionStage:
if adopted is not None and unadopted is None:
stage = ReleaseStages.ADOPTED
elif adopted is not None and unadopted is not None:
stage = ReleaseStages.REPLACED
else:
stage = ReleaseStages.LOW_ADOPTION
return {"stage": stage, "adopted": adopted, "unadopted": unadopted}
| AdoptionStage |
python | pytransitions__transitions | transitions/extensions/locking.py | {
"start": 1668,
"end": 1977
} | class ____:
"""Manages the identity of threads to detect whether the current thread already has a lock."""
def __init__(self):
self.current = 0
def __enter__(self):
self.current = get_ident()
def __exit__(self, exc_type, exc_val, exc_tb):
self.current = 0
| IdentManager |
python | getsentry__sentry | tests/sentry/explore/endpoints/test_explore_saved_queries.py | {
"start": 344,
"end": 48581
} | class ____(APITestCase):
features = {
"organizations:visibility-explore-view": True,
}
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user)
self.projects = [
self.create_project(organization=self.org),
self.create_project(organization=self.org),
]
self.project_ids = [project.id for project in self.projects]
self.project_ids_without_access = [self.create_project().id]
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Test query",
query=query,
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model,
last_visited=before_now(),
)
model.set_projects(self.project_ids)
self.url = reverse("sentry-api-0-explore-saved-queries", args=[self.org.slug])
def test_get(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200, response.content
assert len(response.data) == 5
# Prebuilt query
assert response.data[0]["name"] == "All Transactions"
assert response.data[0]["projects"] == []
assert "range" not in response.data[0]
assert response.data[0]["query"] == [
{
"fields": [
"id",
"span.op",
"span.description",
"span.duration",
"transaction",
"timestamp",
],
"query": "is_transaction:true",
"mode": "samples",
"visualize": [
{
"chartType": 0,
"yAxes": ["count()"],
},
{
"chartType": 1,
"yAxes": ["p75(span.duration)", "p90(span.duration)"],
},
],
"orderby": "-timestamp",
}
]
assert "createdBy" in response.data[0]
assert response.data[0]["createdBy"] is None
assert not response.data[0]["expired"]
# User saved query
assert response.data[3]["name"] == "Test query"
assert response.data[3]["projects"] == self.project_ids
assert response.data[3]["range"] == "24h"
assert response.data[3]["query"] == [{"fields": ["span.op"], "mode": "samples"}]
assert "createdBy" in response.data[3]
assert response.data[3]["createdBy"]["username"] == self.user.username
assert not response.data[3]["expired"]
def test_get_name_filter(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url, format="json", data={"query": "Test"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["name"] == "Test query"
with self.feature(self.features):
# Also available as the name: filter.
response = self.client.get(self.url, format="json", data={"query": "name:Test"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["name"] == "Test query"
with self.feature(self.features):
response = self.client.get(self.url, format="json", data={"query": "name:Nope"})
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_get_all_paginated(self) -> None:
for i in range(0, 10):
query = {
"range": "24h",
"query": [{"fields": ["span.op"], "mode": "samples"}],
}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name=f"My query {i}",
query=query,
)
model.set_projects(self.project_ids)
with self.feature(self.features):
response = self.client.get(self.url, data={"per_page": 1})
assert response.status_code == 200, response.content
assert len(response.data) == 1
def test_get_sortby(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="My query",
query=query,
date_added=before_now(minutes=10),
date_updated=before_now(minutes=10),
)
model.set_projects(self.project_ids)
sort_options = {
"dateAdded": True,
"-dateAdded": False,
"dateUpdated": True,
"-dateUpdated": False,
"name": True,
"-name": False,
}
for sorting, forward_sort in sort_options.items():
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": sorting})
assert response.status_code == 200
values = [row[sorting.strip("-")] for row in response.data]
if not forward_sort:
values = list(reversed(values))
assert list(sorted(values)) == values
def test_get_sortby_most_popular(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
ExploreSavedQuery.objects.filter(name="Test query").update(visits=2)
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="My query",
query=query,
visits=3,
date_added=before_now(minutes=10),
date_updated=before_now(minutes=10),
last_visited=before_now(minutes=5),
)
model.set_projects(self.project_ids)
for forward_sort in [True, False]:
sorting = "mostPopular" if forward_sort else "-mostPopular"
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": sorting})
assert response.status_code == 200
values = [row["name"] for row in response.data]
expected = ["My query", "Test query"]
if forward_sort:
assert values[0] == expected[0]
assert values[1] == expected[1]
else:
assert values[-1] == expected[0]
assert values[-2] == expected[1]
def test_get_sortby_recently_viewed(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="My query",
query=query,
visits=3,
date_added=before_now(minutes=10),
date_updated=before_now(minutes=10),
last_visited=before_now(minutes=5),
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model,
last_visited=before_now(minutes=5),
)
model.set_projects(self.project_ids)
for forward_sort in [True, False]:
sorting = "recentlyViewed" if forward_sort else "-recentlyViewed"
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": sorting})
assert response.status_code == 200
values = [row["name"] for row in response.data]
expected = ["Test query", "My query"]
if not forward_sort:
assert values[0] == expected[1]
assert values[1] == expected[0]
else:
assert values[0] == expected[0]
assert values[1] == expected[1]
def test_get_sortby_myqueries(self) -> None:
uhoh_user = self.create_user(username="uhoh")
self.create_member(organization=self.org, user=uhoh_user)
whoops_user = self.create_user(username="whoops")
self.create_member(organization=self.org, user=whoops_user)
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=uhoh_user.id,
name="a query for uhoh",
query=query,
date_added=before_now(minutes=10),
date_updated=before_now(minutes=10),
)
model.set_projects(self.project_ids)
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=whoops_user.id,
name="a query for whoops",
query=query,
date_added=before_now(minutes=10),
date_updated=before_now(minutes=10),
)
model.set_projects(self.project_ids)
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": "myqueries"})
assert response.status_code == 200, response.content
assert response.data[0]["createdBy"]["id"] == str(self.user.id)
assert response.data[1]["createdBy"]["id"] == str(uhoh_user.id)
assert response.data[2]["createdBy"]["id"] == str(whoops_user.id)
def test_get_expired_query(self) -> None:
query = {
"start": str(before_now(days=90)),
"end": str(before_now(days=61)),
}
ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="My expired query",
query=query,
date_added=before_now(days=90),
date_updated=before_now(minutes=10),
)
with (
self.options({"system.event-retention-days": 60}),
self.feature(self.features),
):
response = self.client.get(self.url, {"query": "name:My expired query"})
assert response.status_code == 200, response.content
assert response.data[0]["expired"]
def test_get_my_queries(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url, data={"exclude": "shared"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["name"] == "Test query"
def test_get_shared_queries(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id + 1,
name="Shared query",
query=query,
)
model.set_projects(self.project_ids)
with self.feature(self.features):
response = self.client.get(self.url, data={"exclude": "owned", "sortBy": "dateAdded"})
assert response.status_code == 200, response.content
assert len(response.data) == 5
assert response.data[0]["name"] == "Shared query"
def test_get_query_last_visited(self) -> None:
last_visited = before_now(minutes=10)
query = {"fields": ["span.op"], "mode": "samples"}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Query with last visited",
query=query,
last_visited=last_visited,
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model,
last_visited=last_visited,
)
model.set_projects(self.project_ids)
with self.feature(self.features):
response = self.client.get(self.url, data={"query": "name:Query with last visited"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["lastVisited"] == last_visited
def test_get_no_starred_queries(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url, data={"starred": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 4
# Unstars prebuilt queries
ExploreSavedQueryStarred.objects.filter(
organization=self.org,
user_id=self.user.id,
starred=True,
).update(starred=False)
with self.feature(self.features):
response = self.client.get(self.url, data={"starred": "1"})
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_get_starred_queries(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model_a = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Starred query A",
query=query,
)
model_a.set_projects(self.project_ids)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_a,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 1,
explore_saved_query=model_a,
position=1,
)
model_b = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Starred query B",
query=query,
)
model_b.set_projects(self.project_ids)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 1,
explore_saved_query=model_b,
position=2,
)
with self.feature(self.features):
response = self.client.get(self.url, data={"starred": "1"})
assert response.status_code == 200, response.content
assert (
len(response.data) == 5
) # Only one query should be returned because the other query is starred by a different user
assert response.data[0]["name"] == "Starred query A"
assert response.data[0]["starred"] is True
assert response.data[0]["position"] == 1
def test_get_most_starred_queries(self) -> None:
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Most starred query",
query=query,
)
second_model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Second most starred query",
query=query,
)
model.set_projects(self.project_ids)
second_model.set_projects(self.project_ids)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 1,
explore_saved_query=model,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 2,
explore_saved_query=model,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=second_model,
position=2,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 1,
explore_saved_query=second_model,
position=2,
)
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": "mostStarred"})
assert response.status_code == 200, response.content
assert len(response.data) == 7
assert response.data[0]["name"] == "Most starred query"
assert response.data[0]["starred"] is True
assert response.data[0]["position"] == 1
assert response.data[1]["name"] == "Second most starred query"
assert response.data[1]["starred"] is True
assert response.data[1]["position"] == 2
assert response.data[-1]["name"] == "Test query"
assert response.data[-1]["starred"] is False
assert response.data[-1]["position"] is None
def test_get_sortby_multiple(self) -> None:
# Trigger prebuilt queries creation and unstar prebuilt queries to simplify test
with self.feature(self.features):
response = self.client.get(self.url)
ExploreSavedQueryStarred.objects.filter(
organization=self.org,
user_id=self.user.id,
starred=True,
).update(starred=False, position=None)
query = {"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]}
model_a = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Query A",
query=query,
last_visited=before_now(minutes=30),
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_a,
last_visited=before_now(minutes=30),
)
model_a.set_projects(self.project_ids)
model_b = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Query B",
query=query,
last_visited=before_now(minutes=20),
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_b,
last_visited=before_now(minutes=20),
)
model_b.set_projects(self.project_ids)
model_c = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Query C",
query=query,
last_visited=before_now(minutes=10),
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_c,
last_visited=before_now(minutes=10),
)
model_c.set_projects(self.project_ids)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_a,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_b,
position=2,
)
model_d = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Query D",
query=query,
last_visited=before_now(minutes=15),
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model_d,
last_visited=before_now(minutes=15),
)
model_d.set_projects(self.project_ids)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 1,
explore_saved_query=model_d,
position=1,
)
ExploreSavedQueryStarred.objects.create(
organization=self.org,
user_id=self.user.id + 2,
explore_saved_query=model_d,
position=1,
)
with self.feature(self.features):
response = self.client.get(self.url, data={"sortBy": ["starred", "recentlyViewed"]})
assert response.status_code == 200, response.content
assert len(response.data) == 9
assert response.data[0]["name"] == "Query B"
assert response.data[0]["starred"] is True
assert response.data[0]["position"] == 2
assert response.data[1]["name"] == "Query A"
assert response.data[1]["starred"] is True
assert response.data[1]["position"] == 1
assert response.data[2]["name"] == "Test query"
assert response.data[2]["starred"] is False
assert response.data[2]["position"] is None
assert response.data[3]["name"] == "Query C"
assert response.data[3]["starred"] is False
assert response.data[3]["position"] is None
assert response.data[4]["name"] == "Query D"
assert (
response.data[4]["starred"] is False
) # This should be false because this query is starred by a different user
assert response.data[4]["position"] is None
def test_post_require_mode(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "New query",
"projects": self.project_ids,
"query": [{"fields": []}],
"range": "24h",
},
)
assert response.status_code == 400, response.content
assert "This field is required." == response.data["query"]["mode"][0]
def test_post_success(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "new query",
"projects": self.project_ids,
"environment": ["dev"],
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": "span.op:pageload",
}
],
"range": "24h",
},
)
assert response.status_code == 201, response.content
data = response.data
assert data["range"] == "24h"
assert data["environment"] == ["dev"]
assert data["query"] == [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": "span.op:pageload",
}
]
assert data["projects"] == self.project_ids
assert data["dataset"] == "spans"
def test_post_all_projects(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "New query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
}
],
},
)
assert response.status_code == 201, response.content
assert response.data["projects"] == [-1]
def test_save_with_project(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": self.project_ids,
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{self.projects[0].slug}",
}
],
},
)
assert response.status_code == 201, response.content
assert ExploreSavedQuery.objects.filter(name="project query").exists()
def test_save_with_project_and_my_projects(self) -> None:
team = self.create_team(organization=self.org, members=[self.user])
project = self.create_project(organization=self.org, teams=[team])
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": [],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{project.slug}",
}
],
},
)
assert response.status_code == 201, response.content
assert ExploreSavedQuery.objects.filter(name="project query").exists()
def test_save_with_org_projects(self) -> None:
project = self.create_project(organization=self.org)
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": [project.id],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{project.slug}",
}
],
},
)
assert response.status_code == 201, response.content
assert ExploreSavedQuery.objects.filter(name="project query").exists()
def test_save_with_team_project(self) -> None:
team = self.create_team(organization=self.org, members=[self.user])
project = self.create_project(organization=self.org, teams=[team])
self.create_project(organization=self.org, teams=[team])
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": [project.id],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{project.slug}",
}
],
},
)
assert response.status_code == 201, response.content
assert ExploreSavedQuery.objects.filter(name="project query").exists()
def test_save_without_team(self) -> None:
team = self.create_team(organization=self.org, members=[])
self.create_project(organization=self.org, teams=[team])
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "without team query",
"projects": [],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
}
],
},
)
assert response.status_code == 400
assert "No Projects found, join a Team" == response.data["detail"]
def test_save_with_team_and_without_project(self) -> None:
team = self.create_team(organization=self.org, members=[self.user])
self.create_project(organization=self.org, teams=[team])
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "with team query",
"projects": [],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
}
],
},
)
assert response.status_code == 201, response.content
assert ExploreSavedQuery.objects.filter(name="with team query").exists()
def test_save_with_wrong_projects(self) -> None:
other_org = self.create_organization(owner=self.user)
project = self.create_project(organization=other_org)
project2 = self.create_project(organization=self.org)
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": [project.id],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{project.slug}",
}
],
},
)
assert response.status_code == 403, response.content
assert not ExploreSavedQuery.objects.filter(name="project query").exists()
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "project query",
"projects": [project.id, project2.id],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": f"project:{project.slug} project:{project2.slug}",
}
],
},
)
assert response.status_code == 403, response.content
assert not ExploreSavedQuery.objects.filter(name="project query").exists()
def test_save_interval(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Interval query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": "spaceAfterColon:1",
}
],
"interval": "1m",
},
)
assert response.status_code == 201, response.content
assert response.data["name"] == "Interval query"
assert response.data["interval"] == "1m"
def test_save_invalid_interval(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Interval query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": "spaceAfterColon:1",
}
],
"interval": "1s",
},
)
assert response.status_code == 400, response.content
def test_save_without_chart_type(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"query": "spaceAfterColon:1",
"visualize": [
{
"yAxes": ["count(span.duration)"],
},
],
}
],
"interval": "1m",
},
)
assert response.status_code == 201, response.content
assert len(response.data["query"]) == 1
assert response.data["query"][0]["visualize"] == [
{"yAxes": ["count(span.duration)"]},
]
def test_save_aggregate_field_and_orderby(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"aggregateField": [
{
"groupBy": "span.op",
},
{
"yAxes": ["count(span.duration)"],
},
{
"yAxes": ["avg(span.duration)"],
"chartType": 0,
},
],
"aggregateOrderby": "-avg(span.duration)",
}
],
"interval": "1m",
},
)
assert response.status_code == 201, response.content
assert len(response.data["query"]) == 1
assert "visualize" not in response.data["query"][0]
assert "groupby" not in response.data["query"][0]
assert response.data["query"][0]["aggregateField"] == [
{
"groupBy": "span.op",
},
{
"yAxes": ["count(span.duration)"],
},
{
"yAxes": ["avg(span.duration)"],
"chartType": 0,
},
]
assert response.data["query"][0]["aggregateOrderby"] == "-avg(span.duration)"
def test_save_invalid_ambiguous_aggregate_field(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"aggregateField": [
{
"groupBy": "span.op",
"yAxes": ["count(span.duration)"],
"chartType": 0,
},
],
}
],
"interval": "1m",
},
)
assert response.status_code == 400, response.content
assert response.data == {
"detail": ErrorDetail(
"Ambiguous aggregate field. Must specify groupBy or yAxes, not both.",
code="parse_error",
),
}
def test_save_invalid_aggregate_field(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"aggregateField": [{}],
}
],
"interval": "1m",
},
)
assert response.status_code == 400, response.content
assert response.data == {
"query": {
"aggregateField": {
"yAxes": [
ErrorDetail(
"This field is required.",
code="required",
),
],
"groupBy": [
ErrorDetail(
"This field is required.",
code="required",
),
],
},
},
}
def test_save_invalid_aggregate_field_bad_y_axes(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"aggregateField": [
{
"yAxes": "foobar",
},
],
}
],
"interval": "1m",
},
)
assert response.status_code == 400, response.content
assert response.data == {
"query": {
"aggregateField": {
"yAxes": [
ErrorDetail(
'Expected a list of items but got type "str".',
code="not_a_list",
),
],
},
},
}
def test_save_invalid_aggregate_field_bad_group_by(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Query",
"projects": [-1],
"range": "24h",
"query": [
{
"fields": ["span.op", "count(span.duration)"],
"mode": "samples",
"aggregateField": [
{
"groupBy": [123],
},
],
}
],
"interval": "1m",
},
)
assert response.status_code == 400, response.content
assert response.data == {
"query": {
"aggregateField": {
"groupBy": [
ErrorDetail(
"Not a valid string.",
code="invalid",
),
],
},
},
}
def test_get_with_migration_feature_flag(self) -> None:
self.features_with_migration = {"organizations:expose-migrated-discover-queries": True}
self.features_with_migration.update(self.features)
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
date_added=before_now(),
# sort by name so it shows up last
name="Z - Segment span query",
query={"range": "24h", "query": [{"fields": ["span.op"], "mode": "samples"}]},
dataset=ExploreSavedQueryDataset.SEGMENT_SPANS,
)
ExploreSavedQueryLastVisited.objects.create(
organization=self.org,
user_id=self.user.id,
explore_saved_query=model,
last_visited=before_now(),
)
with self.feature(self.features_with_migration):
response_with_flag = self.client.get(self.url, data={"sortBy": ["name"]})
assert response_with_flag.status_code == 200, response_with_flag.content
assert len(response_with_flag.data) == 6
assert response_with_flag.data[5]["name"] == "Z - Segment span query"
assert response_with_flag.data[5]["dataset"] == "segment_spans"
with self.feature(self.features):
response_without_flag = self.client.get(self.url)
assert response_without_flag.status_code == 200, response_without_flag.content
assert len(response_without_flag.data) == 5
def test_post_metrics_dataset_with_metric_field(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Metrics query with metric field",
"projects": self.project_ids,
"dataset": "metrics",
"query": [
{
"fields": ["count()"],
"mode": "aggregate",
"metric": {
"name": "sentry.alert_endpoint.executed",
"type": "counter",
},
}
],
"range": "24h",
},
)
assert response.status_code == 201, response.content
data = response.data
assert data["dataset"] == "metrics"
assert data["query"] == [
{
"fields": ["count()"],
"mode": "aggregate",
"metric": {
"name": "sentry.alert_endpoint.executed",
"type": "counter",
},
}
]
def test_post_metrics_dataset_with_metric_field_and_unit(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Metrics query with unit",
"projects": self.project_ids,
"dataset": "metrics",
"query": [
{
"fields": ["avg()"],
"mode": "aggregate",
"metric": {
"name": "sentry.response_time",
"type": "gauge",
"unit": "millisecond",
},
}
],
"range": "1h",
},
)
assert response.status_code == 201, response.content
data = response.data
assert data["dataset"] == "metrics"
assert data["query"] == [
{
"fields": ["avg()"],
"mode": "aggregate",
"metric": {
"name": "sentry.response_time",
"type": "gauge",
"unit": "millisecond",
},
}
]
def test_get_metrics_dataset_with_metric_field(self) -> None:
query = {
"range": "24h",
"query": [
{
"fields": ["count()"],
"mode": "aggregate",
"metric": {
"name": "sentry.alert_endpoint.executed",
"type": "counter",
},
}
],
}
model = ExploreSavedQuery.objects.create(
organization=self.org,
created_by_id=self.user.id,
name="Test metrics query",
query=query,
dataset=ExploreSavedQueryDataset.METRICS,
)
model.set_projects(self.project_ids)
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200, response.content
test_query = None
for item in response.data:
if item["name"] == "Test metrics query":
test_query = item
break
assert test_query is not None
assert test_query["dataset"] == "metrics"
assert test_query["query"][0]["metric"] == {
"name": "sentry.alert_endpoint.executed",
"type": "counter",
}
def test_post_non_metrics_dataset_rejects_metric_field(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Spans query with invalid metric",
"projects": self.project_ids,
"dataset": "spans",
"query": [
{
"fields": ["span.op"],
"mode": "samples",
"metric": {
"name": "sentry.alert_endpoint.executed",
"type": "counter",
},
}
],
"range": "24h",
},
)
assert response.status_code == 400, response.content
assert "Metric field is only allowed for metrics dataset" in str(response.data)
def test_post_metrics_dataset_requires_metric_field(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Metrics query without metric field",
"projects": self.project_ids,
"dataset": "metrics",
"query": [
{
"fields": ["span.op"],
"mode": "samples",
}
],
"range": "24h",
},
)
assert response.status_code == 400, response.content
assert "Metric field is required for metrics dataset" in str(response.data)
def test_save_with_start_and_end_time(self) -> None:
with self.feature(self.features):
response = self.client.post(
self.url,
{
"name": "Start and end time query",
"projects": self.project_ids,
"dataset": "spans",
"start": "2025-11-12T23:00:00.000Z",
"end": "2025-11-20T22:59:59.000Z",
},
)
assert response.status_code == 201, response.content
data = response.data
assert data["start"] is not None
assert data["end"] is not None
| ExploreSavedQueriesTest |
python | apache__airflow | providers/alibaba/src/airflow/providers/alibaba/cloud/links/maxcompute.py | {
"start": 1121,
"end": 1971
} | class ____(BaseOperatorLink):
"""Helper class for constructing MaxCompute Log View Link."""
name = "MaxCompute Log View"
key = "maxcompute_log_view"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
url = XCom.get_value(key=self.key, ti_key=ti_key)
if not url:
return ""
return url
@staticmethod
def persist(
context: Context,
log_view_url: str,
):
"""
Persist the log view URL to XCom for later retrieval.
:param context: The context of the task instance.
:param log_view_url: The log view URL to persist.
"""
context["task_instance"].xcom_push(
key=MaxComputeLogViewLink.key,
value=log_view_url,
)
| MaxComputeLogViewLink |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-visiting-all-points.py | {
"start": 29,
"end": 351
} | class ____(object):
def minTimeToVisitAllPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
return sum(max(abs(points[i+1][0] - points[i][0]),
abs(points[i+1][1] - points[i][1]))
for i in xrange(len(points)-1))
| Solution |
python | optuna__optuna | optuna/visualization/_parallel_coordinate.py | {
"start": 1203,
"end": 10285
} | class ____(NamedTuple):
dim_objective: _DimensionInfo
dims_params: list[_DimensionInfo]
reverse_scale: bool
target_name: str
def plot_parallel_coordinate(
study: Study,
params: list[str] | None = None,
*,
target: Callable[[FrozenTrial], float] | None = None,
target_name: str = "Objective Value",
) -> "go.Figure":
"""Plot the high-dimensional parameter relationships in a study.
Note that, if a parameter contains missing values, a trial with missing values is not plotted.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
params:
Parameter list to visualize. The default is all parameters.
target:
A function to specify the value to display. If it is :obj:`None` and ``study`` is being
used for single-objective optimization, the objective values are plotted.
.. note::
Specify this argument if ``study`` is being used for multi-objective optimization.
target_name:
Target's name to display on the axis label and the legend.
Returns:
A :class:`plotly.graph_objects.Figure` object.
.. note::
The colormap is reversed when the ``target`` argument isn't :obj:`None` or ``direction``
of :class:`~optuna.study.Study` is ``minimize``.
"""
_imports.check()
info = _get_parallel_coordinate_info(study, params, target, target_name)
return _get_parallel_coordinate_plot(info)
def _get_parallel_coordinate_plot(info: _ParallelCoordinateInfo) -> "go.Figure":
layout = go.Layout(title="Parallel Coordinate Plot")
if len(info.dims_params) == 0 or len(info.dim_objective.values) == 0:
return go.Figure(data=[], layout=layout)
dims = _get_dims_from_info(info)
reverse_scale = info.reverse_scale
target_name = info.target_name
traces = [
go.Parcoords(
dimensions=dims,
labelangle=30,
labelside="bottom",
line={
"color": dims[0]["values"],
"colorscale": COLOR_SCALE,
"colorbar": {"title": target_name},
"showscale": True,
"reversescale": reverse_scale,
},
)
]
figure = go.Figure(data=traces, layout=layout)
return figure
def _get_parallel_coordinate_info(
study: Study,
params: list[str] | None = None,
target: Callable[[FrozenTrial], float] | None = None,
target_name: str = "Objective Value",
) -> _ParallelCoordinateInfo:
_check_plot_args(study, target, target_name)
reverse_scale = _is_reverse_scale(study, target)
trials = _filter_nonfinite(
study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target
)
all_params = {p_name for t in trials for p_name in t.params.keys()}
if params is not None:
for input_p_name in params:
if input_p_name not in all_params:
raise ValueError(f"Parameter {input_p_name} does not exist in your study.")
all_params = set(params)
sorted_params = sorted(all_params)
if target is None:
def _target(t: FrozenTrial) -> float:
return cast(float, t.value)
target = _target
skipped_trial_numbers = _get_skipped_trial_numbers(trials, sorted_params)
objectives = tuple([target(t) for t in trials if t.number not in skipped_trial_numbers])
# The value of (0, 0) is a dummy range. It is ignored when we plot.
objective_range = (min(objectives), max(objectives)) if len(objectives) > 0 else (0, 0)
dim_objective = _DimensionInfo(
label=target_name,
values=objectives,
range=objective_range,
is_log=False,
is_cat=False,
tickvals=[],
ticktext=[],
)
if len(trials) == 0:
_logger.warning("Your study does not have any completed trials.")
return _ParallelCoordinateInfo(
dim_objective=dim_objective,
dims_params=[],
reverse_scale=reverse_scale,
target_name=target_name,
)
if len(objectives) == 0:
_logger.warning("Your study has only completed trials with missing parameters.")
return _ParallelCoordinateInfo(
dim_objective=dim_objective,
dims_params=[],
reverse_scale=reverse_scale,
target_name=target_name,
)
numeric_cat_params_indices: list[int] = []
dims = []
for dim_index, p_name in enumerate(sorted_params, start=1):
values = []
is_categorical = False
for t in trials:
if t.number in skipped_trial_numbers:
continue
if p_name in t.params:
values.append(t.params[p_name])
is_categorical |= isinstance(t.distributions[p_name], CategoricalDistribution)
if _is_log_scale(trials, p_name):
values = [math.log10(v) for v in values]
min_value = min(values)
max_value = max(values)
tickvals: list[int | float] = list(
range(math.ceil(min_value), math.floor(max_value) + 1)
)
if min_value not in tickvals:
tickvals = [min_value] + tickvals
if max_value not in tickvals:
tickvals = tickvals + [max_value]
dim = _DimensionInfo(
label=_truncate_label(p_name),
values=tuple(values),
range=(min_value, max_value),
is_log=True,
is_cat=False,
tickvals=tickvals,
ticktext=["{:.3g}".format(math.pow(10, x)) for x in tickvals],
)
elif is_categorical:
vocab: defaultdict[int | str, int] = defaultdict(lambda: len(vocab))
ticktext: list[str]
if _is_numerical(trials, p_name):
_ = [vocab[v] for v in sorted(values)]
values = [vocab[v] for v in values]
ticktext = [str(v) for v in list(sorted(vocab.keys()))]
numeric_cat_params_indices.append(dim_index)
else:
values = [vocab[v] for v in values]
ticktext = [str(v) for v in list(sorted(vocab.keys(), key=lambda x: vocab[x]))]
dim = _DimensionInfo(
label=_truncate_label(p_name),
values=tuple(values),
range=(min(values), max(values)),
is_log=False,
is_cat=True,
tickvals=list(range(len(vocab))),
ticktext=ticktext,
)
else:
dim = _DimensionInfo(
label=_truncate_label(p_name),
values=tuple(values),
range=(min(values), max(values)),
is_log=False,
is_cat=False,
tickvals=[],
ticktext=[],
)
dims.append(dim)
if numeric_cat_params_indices:
dims.insert(0, dim_objective)
# np.lexsort consumes the sort keys the order from back to front.
# So the values of parameters have to be reversed the order.
idx = np.lexsort([dims[index].values for index in numeric_cat_params_indices][::-1])
updated_dims = []
for dim in dims:
# Since the values are mapped to other categories by the index,
# the index will be swapped according to the sorted index of numeric params.
updated_dims.append(
_DimensionInfo(
label=dim.label,
values=tuple(np.array(dim.values)[idx]),
range=dim.range,
is_log=dim.is_log,
is_cat=dim.is_cat,
tickvals=dim.tickvals,
ticktext=dim.ticktext,
)
)
dim_objective = updated_dims[0]
dims = updated_dims[1:]
return _ParallelCoordinateInfo(
dim_objective=dim_objective,
dims_params=dims,
reverse_scale=reverse_scale,
target_name=target_name,
)
def _get_dims_from_info(info: _ParallelCoordinateInfo) -> list[dict[str, Any]]:
dims = [
{
"label": info.dim_objective.label,
"values": info.dim_objective.values,
"range": info.dim_objective.range,
}
]
for dim in info.dims_params:
if dim.is_log or dim.is_cat:
dims.append(
{
"label": dim.label,
"values": dim.values,
"range": dim.range,
"tickvals": dim.tickvals,
"ticktext": dim.ticktext,
}
)
else:
dims.append({"label": dim.label, "values": dim.values, "range": dim.range})
return dims
def _truncate_label(label: str) -> str:
return label if len(label) < 20 else "{}...".format(label[:17])
| _ParallelCoordinateInfo |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 10501,
"end": 12356
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_fresnel_cos_boundary(self):
self.assertAllClose(0., special_math_ops.fresnel_cos(0.))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.fresnel_cos(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.fresnel_cos(x)),
self.evaluate(-special_math_ops.fresnel_cos(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_small(self, dtype):
x = np.random.uniform(0., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[1], self.evaluate(special_math_ops.fresnel_cos(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_fresnel_cos_larger(self, dtype):
x = np.random.uniform(1., 100., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.fresnel(x)[1],
self.evaluate(special_math_ops.fresnel_cos(x)),
rtol=1e-5)
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_fresnel_cos_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.fresnel_cos, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 5e-3)
@test_util.run_all_in_graph_and_eager_modes
| FresnelCosTest |
python | agronholm__apscheduler | src/apscheduler/_structures.py | {
"start": 4247,
"end": 9516
} | class ____:
"""
Represents a schedule on which a task will be run.
:var str id: the unique identifier of this schedule
:var str task_id: unique identifier of the task to be run on this schedule
:var Trigger trigger: the trigger that determines when the task will be run
:var tuple args: positional arguments to pass to the task callable
:var dict[str, Any] kwargs: keyword arguments to pass to the task callable
:var bool paused: whether the schedule is paused
:var CoalescePolicy coalesce: determines what to do when processing the schedule if
multiple fire times have become due for this schedule since the last processing
:var ~datetime.timedelta | None misfire_grace_time: maximum number of seconds the
scheduled job's actual run time is allowed to be late, compared to the scheduled
run time
:var ~datetime.timedelta | None max_jitter: maximum number of seconds to randomly
add to the scheduled time for each job created from this schedule
:var ~datetime.timedelta job_result_expiration_time: minimum time to keep the job
results in storage from the jobs created by this schedule
:var metadata: key-value pairs for storing JSON compatible custom information
:var ~datetime.datetime next_fire_time: the next time the task will be run
:var ~datetime.datetime | None last_fire_time: the last time the task was scheduled
to run
:var str | None acquired_by: ID of the scheduler that has acquired this schedule for
processing
:var str | None acquired_until: the time after which other schedulers are free to
acquire the schedule for processing even if it is still marked as acquired
"""
id: str = attrs.field(validator=[instance_of(str), min_len(1)], on_setattr=frozen)
task_id: str = attrs.field(
validator=[instance_of(str), min_len(1)], on_setattr=frozen
)
trigger: Trigger = attrs.field(
validator=instance_of(Trigger), # type: ignore[type-abstract]
on_setattr=frozen,
)
args: tuple = attrs.field(converter=tuple, default=())
kwargs: dict[str, Any] = attrs.field(converter=dict, default=())
paused: bool = attrs.field(default=False)
coalesce: CoalescePolicy = attrs.field(
default=CoalescePolicy.latest,
converter=as_enum(CoalescePolicy),
validator=instance_of(CoalescePolicy),
on_setattr=frozen,
)
misfire_grace_time: timedelta | None = attrs.field(
default=None,
converter=as_timedelta,
validator=optional(instance_of(timedelta)),
on_setattr=frozen,
)
max_jitter: timedelta | None = attrs.field(
converter=as_timedelta,
default=None,
validator=optional(instance_of(timedelta)),
on_setattr=frozen,
)
job_executor: str = attrs.field(validator=instance_of(str), on_setattr=frozen)
job_result_expiration_time: timedelta = attrs.field(
default=0,
converter=as_timedelta,
validator=optional(instance_of(timedelta)),
on_setattr=frozen,
)
metadata: MetadataType = attrs.field(validator=valid_metadata, factory=dict)
next_fire_time: datetime | None = attrs.field(
converter=as_aware_datetime,
default=None,
)
last_fire_time: datetime | None = attrs.field(
converter=as_aware_datetime,
default=None,
)
acquired_by: str | None = attrs.field(default=None)
acquired_until: datetime | None = attrs.field(
converter=as_aware_datetime, default=None
)
def marshal(self, serializer: Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, recurse=False, value_serializer=serialize)
marshalled["trigger"] = serializer.serialize(self.trigger)
marshalled["args"] = serializer.serialize(self.args)
marshalled["kwargs"] = serializer.serialize(self.kwargs)
if not self.acquired_by:
del marshalled["acquired_by"]
del marshalled["acquired_until"]
return marshalled
@classmethod
def unmarshal(cls, serializer: Serializer, marshalled: dict[str, Any]) -> Schedule:
marshalled["trigger"] = serializer.deserialize(marshalled["trigger"])
marshalled["args"] = serializer.deserialize(marshalled["args"])
marshalled["kwargs"] = serializer.deserialize(marshalled["kwargs"])
return cls(**marshalled)
def __hash__(self) -> int:
return hash(self.id)
def __eq__(self, other: object) -> bool:
if isinstance(other, Schedule):
return self.id == other.id
return NotImplemented
def __lt__(self, other: object) -> bool:
if isinstance(other, Schedule):
# Sort by next_fire_time first, exhausted schedules last
if self.next_fire_time is not None and other.next_fire_time is not None:
return self.next_fire_time < other.next_fire_time
elif self.next_fire_time is None:
return False
elif other.next_fire_time is None:
return True
# In all other cases, sort by schedule ID
return self.id < other.id
return NotImplemented
@attrs.define(kw_only=True, frozen=True)
| Schedule |
python | getsentry__sentry-python | sentry_sdk/integrations/sqlalchemy.py | {
"start": 789,
"end": 4344
} | class ____(Integration):
identifier = "sqlalchemy"
origin = f"auto.db.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(SQLALCHEMY_VERSION)
_check_minimum_version(SqlalchemyIntegration, version)
listen(Engine, "before_cursor_execute", _before_cursor_execute)
listen(Engine, "after_cursor_execute", _after_cursor_execute)
listen(Engine, "handle_error", _handle_error)
@ensure_integration_enabled(SqlalchemyIntegration)
def _before_cursor_execute(
conn, cursor, statement, parameters, context, executemany, *args
):
# type: (Any, Any, Any, Any, Any, bool, *Any) -> None
ctx_mgr = record_sql_queries(
cursor,
statement,
parameters,
paramstyle=context and context.dialect and context.dialect.paramstyle or None,
executemany=executemany,
span_origin=SqlalchemyIntegration.origin,
)
context._sentry_sql_span_manager = ctx_mgr
span = ctx_mgr.__enter__()
if span is not None:
_set_db_data(span, conn)
context._sentry_sql_span = span
@ensure_integration_enabled(SqlalchemyIntegration)
def _after_cursor_execute(conn, cursor, statement, parameters, context, *args):
# type: (Any, Any, Any, Any, Any, *Any) -> None
ctx_mgr = getattr(context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
if ctx_mgr is not None:
context._sentry_sql_span_manager = None
ctx_mgr.__exit__(None, None, None)
span = getattr(context, "_sentry_sql_span", None) # type: Optional[Span]
if span is not None:
with capture_internal_exceptions():
add_query_source(span)
def _handle_error(context, *args):
# type: (Any, *Any) -> None
execution_context = context.execution_context
if execution_context is None:
return
span = getattr(execution_context, "_sentry_sql_span", None) # type: Optional[Span]
if span is not None:
span.set_status(SPANSTATUS.INTERNAL_ERROR)
# _after_cursor_execute does not get called for crashing SQL stmts. Judging
# from SQLAlchemy codebase it does seem like any error coming into this
# handler is going to be fatal.
ctx_mgr = getattr(execution_context, "_sentry_sql_span_manager", None) # type: Optional[ContextManager[Any]]
if ctx_mgr is not None:
execution_context._sentry_sql_span_manager = None
ctx_mgr.__exit__(None, None, None)
# See: https://docs.sqlalchemy.org/en/20/dialects/index.html
def _get_db_system(name):
# type: (str) -> Optional[str]
name = str(name)
if "sqlite" in name:
return "sqlite"
if "postgres" in name:
return "postgresql"
if "mariadb" in name:
return "mariadb"
if "mysql" in name:
return "mysql"
if "oracle" in name:
return "oracle"
return None
def _set_db_data(span, conn):
# type: (Span, Any) -> None
db_system = _get_db_system(conn.engine.name)
if db_system is not None:
span.set_data(SPANDATA.DB_SYSTEM, db_system)
if conn.engine.url is None:
return
db_name = conn.engine.url.database
if db_name is not None:
span.set_data(SPANDATA.DB_NAME, db_name)
server_address = conn.engine.url.host
if server_address is not None:
span.set_data(SPANDATA.SERVER_ADDRESS, server_address)
server_port = conn.engine.url.port
if server_port is not None:
span.set_data(SPANDATA.SERVER_PORT, server_port)
| SqlalchemyIntegration |
python | doocs__leetcode | solution/2500-2599/2527.Find Xor-Beauty of Array/Solution.py | {
"start": 0,
"end": 98
} | class ____:
def xorBeauty(self, nums: List[int]) -> int:
return reduce(xor, nums)
| Solution |
python | catalyst-team__catalyst | catalyst/metrics/_r2_squared.py | {
"start": 104,
"end": 1816
} | class ____(ICallbackLoaderMetric):
"""This metric accumulates r2 score along loader
Args:
compute_on_call: if True, allows compute metric's value on call
prefix: metric prefix
suffix: metric suffix
"""
def __init__(
self,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> None:
"""Init R2Squared"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.metric_name = f"{self.prefix}r2squared{self.suffix}"
self.num_examples = 0
self.delta_sum = 0
self.y_sum = 0
self.y_sq_sum = 0
def reset(self, num_batches: int, num_samples: int) -> None:
"""
Reset metrics fields
"""
self.num_examples = 0
self.delta_sum = 0
self.y_sum = 0
self.y_sq_sum = 0
def update(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> None:
"""
Update accumulated data with new batch
"""
self.num_examples += len(y_true)
self.delta_sum += torch.sum(torch.pow(y_pred - y_true, 2))
self.y_sum += torch.sum(y_true)
self.y_sq_sum += torch.sum(torch.pow(y_true, 2))
def compute(self) -> torch.Tensor:
"""
Return accumulated metric
"""
return 1 - self.delta_sum / (
self.y_sq_sum - (self.y_sum ** 2) / self.num_examples
)
def compute_key_value(self) -> torch.Tensor:
"""
Return key-value
"""
r2squared = self.compute()
output = {self.metric_name: r2squared}
return output
__all__ = ["R2Squared"]
| R2Squared |
python | Textualize__textual | docs/examples/guide/input/key01.py | {
"start": 108,
"end": 390
} | class ____(App):
"""App to display key events."""
def compose(self) -> ComposeResult:
yield RichLog()
def on_key(self, event: events.Key) -> None:
self.query_one(RichLog).write(event)
if __name__ == "__main__":
app = InputApp()
app.run()
| InputApp |
python | dask__distributed | distributed/shuffle/_buffer.py | {
"start": 618,
"end": 9412
} | class ____(Generic[ShardType]):
"""A buffer for P2P shuffle
The objects to buffer are typically bytes belonging to certain shards.
Typically the buffer is implemented on sending and receiving end.
The buffer allows for concurrent writing and buffers shards to reduce overhead of writing.
The shards are typically provided in a format like::
{
"bucket-0": [b"shard1", b"shard2"],
"bucket-1": [b"shard1", b"shard2"],
}
Buckets typically correspond to output partitions.
If exceptions occur during writing, the buffer is automatically closed. Subsequent attempts to write will raise the same exception.
Flushing will not raise an exception. To ensure that the buffer finished successfully, please call `ShardsBuffer.raise_on_exception`
"""
shards: defaultdict[str, list[ShardType]]
sizes: defaultdict[str, int]
sizes_detail: defaultdict[str, list[int]]
concurrency_limit: int
memory_limiter: ResourceLimiter
max_message_size: int
bytes_total: int
bytes_memory: int
bytes_written: int
bytes_read: int
avg_size: float
avg_duration: float
_accepts_input: bool
_inputs_done: bool
_exception: None | Exception
_tasks: list[asyncio.Task]
_shards_available: asyncio.Condition
_flush_lock: asyncio.Lock
def __init__(
self,
memory_limiter: ResourceLimiter,
concurrency_limit: int = 2,
max_message_size: int = -1,
) -> None:
self._accepts_input = True
self.shards = defaultdict(list)
self.sizes = defaultdict(int)
self.sizes_detail = defaultdict(list)
self._exception = None
self.concurrency_limit = concurrency_limit
self._inputs_done = False
self.memory_limiter = memory_limiter
self._tasks = [
asyncio.create_task(self._background_task())
for _ in range(concurrency_limit)
]
self._shards_available = asyncio.Condition()
self._flush_lock = asyncio.Lock()
self.max_message_size = max_message_size
self.bytes_total = 0
self.bytes_memory = 0
self.bytes_written = 0
self.bytes_read = 0
self.avg_size = 0.0
self.avg_duration = 0.0
def heartbeat(self) -> dict[str, Any]:
return {
"memory": self.bytes_memory,
"total": self.bytes_total,
"buckets": len(self.shards),
"written": self.bytes_written,
"read": self.bytes_read,
"avg_size": self.avg_size,
"avg_duration": self.avg_duration,
"memory_limit": self.memory_limiter.limit,
}
async def process(self, id: str, shards: list[ShardType], size: int) -> None:
try:
start = time()
with context_meter.meter("process"):
await self._process(id, shards)
context_meter.digest_metric("process", size, "bytes")
context_meter.digest_metric("process", 1, "count")
self.bytes_written += size
stop = time()
self.avg_size = 0.98 * self.avg_size + 0.02 * size
self.avg_duration = 0.98 * self.avg_duration + 0.02 * (stop - start)
except Exception as e:
self._exception = e
self._inputs_done = True
finally:
await self.memory_limiter.decrease(size)
self.bytes_memory -= size
@abc.abstractmethod
async def _process(self, id: str, shards: list[ShardType]) -> None: ...
def read(self, id: str) -> ShardType:
raise NotImplementedError() # pragma: nocover
@property
def empty(self) -> bool:
return not self.shards
async def _background_task(self) -> None:
def _continue() -> bool:
return bool(self.shards or self._inputs_done)
while True:
with context_meter.meter("idle"):
async with self._shards_available:
await self._shards_available.wait_for(_continue)
if self._inputs_done and not self.shards:
break
part_id = max(self.sizes, key=self.sizes.__getitem__)
if self.max_message_size > 0:
size = 0
shards = []
# FIXME: We always exceed the limit, not just on the first shard.
while size < self.max_message_size:
try:
shard = self.shards[part_id].pop()
shards.append(shard)
s = self.sizes_detail[part_id].pop()
size += s
self.sizes[part_id] -= s
except IndexError:
break
finally:
if not self.shards[part_id]:
del self.shards[part_id]
assert not self.sizes[part_id]
del self.sizes[part_id]
assert not self.sizes_detail[part_id]
del self.sizes_detail[part_id]
else:
shards = self.shards.pop(part_id)
size = self.sizes.pop(part_id)
self._shards_available.notify_all()
await self.process(part_id, shards, size)
async def write(self, data: dict[str, ShardType]) -> None:
"""
Writes objects into the local buffers, blocks until ready for more
Parameters
----------
data: dict
A dictionary mapping destinations to the object that should
be written to that destination
Notes
-----
If this buffer has a memory limiter configured, then it will
apply back-pressure to the sender (blocking further receives)
if local resource usage hits the limit, until such time as the
resource usage drops.
"""
if self._exception:
raise self._exception
if not self._accepts_input or self._inputs_done:
raise RuntimeError(f"Trying to put data in closed {self}.")
if not data:
return
sizes = {worker: sizeof(shard) for worker, shard in data.items()}
total_batch_size = sum(sizes.values())
self.bytes_memory += total_batch_size
self.bytes_total += total_batch_size
self.memory_limiter.increase(total_batch_size)
async with self._shards_available:
for worker, shard in data.items():
self.shards[worker].append(shard)
self.sizes_detail[worker].append(sizes[worker])
self.sizes[worker] += sizes[worker]
self._shards_available.notify()
await self.memory_limiter.wait_for_available()
del data
assert total_batch_size
def raise_on_exception(self) -> None:
"""Raises an exception if something went wrong during writing"""
if self._exception:
raise self._exception
async def flush(self) -> None:
"""Wait until all writes are finished.
This closes the buffer such that no new writes are allowed
"""
async with self._flush_lock:
self._accepts_input = False
async with self._shards_available:
self._shards_available.notify_all()
await self._shards_available.wait_for(
lambda: not self.shards or self._exception or self._inputs_done
)
self._inputs_done = True
self._shards_available.notify_all()
await asyncio.gather(*self._tasks)
if not self._exception:
assert not self.bytes_memory, (type(self), self.bytes_memory)
async def close(self) -> None:
"""Flush and close the buffer.
This cleans up all allocated resources.
"""
await self.flush()
if not self._exception:
assert not self.bytes_memory, (type(self), self.bytes_memory)
for t in self._tasks:
t.cancel()
self._accepts_input = False
self._inputs_done = True
self.shards.clear()
self.bytes_memory = 0
async with self._shards_available:
self._shards_available.notify_all()
await asyncio.gather(*self._tasks)
async def __aenter__(self) -> ShardsBuffer:
return self
async def __aexit__(self, exc: Any, typ: Any, traceback: Any) -> None:
await self.close()
| ShardsBuffer |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_base64_pdf_source.py | {
"start": 196,
"end": 321
} | class ____(BaseModel):
data: str
media_type: Literal["application/pdf"]
type: Literal["base64"]
| BetaBase64PDFSource |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 17699,
"end": 21503
} | class ____(MemoryLeakMixin, TestCase):
"""Test list pop. """
def test_list_pop_singleton(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
return l.pop(), len(l)
self.assertEqual(foo(), (0, 0))
def test_list_pop_singleton_index(self):
@njit
def foo(i):
l = listobject.new_list(int32)
l.append(0)
return l.pop(i), len(l)
self.assertEqual(foo(0), (0, 0))
self.assertEqual(foo(-1), (0, 0))
def test_list_pop_multiple(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in (10, 11, 12):
l.append(j)
return l.pop(), len(l)
self.assertEqual(foo(), (12, 2))
def test_list_pop_multiple_index(self):
@njit
def foo(i):
l = listobject.new_list(int32)
for j in (10, 11, 12):
l.append(j)
return l.pop(i), len(l)
for i, n in ((0, 10), (1, 11), (2, 12)):
self.assertEqual(foo(i), (n, 2))
for i, n in ((-3, 10), (-2, 11), (-1, 12)):
self.assertEqual(foo(i), (n, 2))
def test_list_pop_integer_types_as_index(self):
@njit
def foo(i):
l = listobject.new_list(int32)
l.append(0)
return l.pop(i)
# try all signed integers and make sure they are cast
for t in (types.signed_domain
):
self.assertEqual(foo((t(0))), 0)
def test_list_pop_empty_index_error_no_index(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
l.pop()
with self.assertRaises(IndexError) as raises:
foo()
self.assertIn(
"pop from empty list",
str(raises.exception),
)
def test_list_pop_empty_index_error_with_index(self):
self.disable_leak_check()
@njit
def foo(i):
l = listobject.new_list(int32)
l.pop(i)
with self.assertRaises(IndexError) as raises:
foo(-1)
self.assertIn(
"pop from empty list",
str(raises.exception),
)
with self.assertRaises(IndexError) as raises:
foo(0)
self.assertIn(
"pop from empty list",
str(raises.exception),
)
with self.assertRaises(IndexError) as raises:
foo(1)
self.assertIn(
"pop from empty list",
str(raises.exception),
)
def test_list_pop_mutiple_index_error_with_index(self):
self.disable_leak_check()
@njit
def foo(i):
l = listobject.new_list(int32)
for j in (10, 11, 12):
l.append(j)
l.pop(i)
with self.assertRaises(IndexError) as raises:
foo(-4)
self.assertIn(
"list index out of range",
str(raises.exception),
)
with self.assertRaises(IndexError) as raises:
foo(3)
self.assertIn(
"list index out of range",
str(raises.exception),
)
def test_list_pop_singleton_typing_error_on_index(self):
self.disable_leak_check()
@njit
def foo(i):
l = listobject.new_list(int32)
l.append(0)
# slice with a non-{integer,slice}
return l.pop(i)
for i in "xyz", 1.0, 1j:
with self.assertRaises(TypingError) as raises:
foo(i)
self.assertIn(
"argument for pop must be an integer",
str(raises.exception),
)
| TestPop |
python | huggingface__transformers | src/transformers/models/sam/processing_sam.py | {
"start": 1381,
"end": 1572
} | class ____(ProcessingKwargs, total=False):
images_kwargs: SamImagesKwargs
_defaults = {
"images_kwargs": {
"point_pad_value": -10,
}
}
| SamProcessorKwargs |
python | ipython__ipython | IPython/extensions/tests/test_deduperreload.py | {
"start": 14209,
"end": 18252
} | class ____(unittest.TestCase):
"""
Unit tests for autoreload patching logic
"""
def setUp(self) -> None:
self.deduperreloader = DeduperTestReloader()
def test_patching(self):
code1 = squish_text(
"""
def foo():
return 1
"""
)
code2 = squish_text(
"""
def foo():
return 2
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
self.deduperreloader._patch_namespace(mod)
assert mod.foo() == 2
def test_patching_parameters(self):
code1 = squish_text(
"""
def foo(n,s):
return n+s
"""
)
code2 = squish_text(
"""
def foo(n):
return n
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
self.deduperreloader._patch_namespace(mod)
assert mod.foo(2) == 2
def test_add_function(self):
code1 = squish_text(
"""
def foo2(n):
return n
"""
)
code2 = squish_text(
"""
def foo(n):
return 55
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
self.deduperreloader._patch_namespace(mod)
assert mod.foo(2) == 55
assert mod.foo2(2) == 2
def test_two_operations(self):
code1 = squish_text(
"""
def foo(n):
return 1
"""
)
code2 = squish_text(
"""
def foo(n):
return 55
"""
)
code3 = squish_text(
"""
def goo():
return -1
def foo(n):
x = 2
return x+n
def bar():
return 200
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
assert mod.foo(2) == 1
self.deduperreloader._patch_namespace(mod)
assert mod.foo(2) == 55
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code3))
]
self.deduperreloader._patch_namespace(mod)
assert mod.foo(2) == 4
def test_using_outside_param(self):
code1 = squish_text(
"""
x=1
def foo(n):
return 1
"""
)
code2 = squish_text(
"""
x=1
def foo(n):
return 55+x
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
assert mod.foo(2) == 1
self.deduperreloader._patch_namespace(mod)
assert mod.foo(2) == 56
def test_importing_func(self):
code1 = squish_text(
"""
from os import environ
def foo(n):
pass
"""
)
code2 = squish_text(
"""
from os import environ
def foo():
environ._data
return 1
"""
)
self.deduperreloader._to_autoreload.defs_to_reload = [
(("foo",), ast.parse(code2))
]
mod = ModuleType("mod")
exec(code1, mod.__dict__)
self.deduperreloader._patch_namespace(mod)
assert mod.foo() == 1
| AutoreloadPatchingSuite |
python | numba__numba | numba/tests/test_unicode.py | {
"start": 7317,
"end": 90232
} | class ____(BaseTest):
def test_literal(self):
pyfunc = literal_usecase
cfunc = njit(literal_usecase)
self.assertPreciseEqual(pyfunc(), cfunc())
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
# comparing against something that's not unicode
self.assertEqual(pyfunc(a, 1),
cfunc(a, 1), '%s, %s' % (a, 1))
self.assertEqual(pyfunc(1, b),
cfunc(1, b), '%s, %s' % (1, b))
def test_eq_optional(self):
# See issue #7474
@njit
def foo(pred1, pred2):
if pred1 > 0:
resolved1 = 'concrete'
else:
resolved1 = None
if pred2 < 0:
resolved2 = 'concrete'
else:
resolved2 = None
# resolved* are Optionals
if resolved1 == resolved2:
return 10
else:
return 20
for (p1, p2) in product(*((-1, 1),) * 2):
self.assertEqual(foo(p1, p2), foo.py_func(p1, p2))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
# Check comparison to adjacent
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
# and reversed
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_bool(self, flags=no_pyobj_flags):
pyfunc = bool_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_expandtabs(self):
pyfunc = expandtabs_usecase
cfunc = njit(pyfunc)
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
msg = 'Results of "{}".expandtabs() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_expandtabs_with_tabsize(self):
fns = [njit(expandtabs_with_tabsize_usecase),
njit(expandtabs_with_tabsize_kwarg_usecase)]
messages = ['Results of "{}".expandtabs({}) must be equal',
'Results of "{}".expandtabs(tabsize={}) must be equal']
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
for s in cases:
for tabsize in range(-1, 10):
for fn, msg in zip(fns, messages):
self.assertEqual(fn.py_func(s, tabsize), fn(s, tabsize),
msg=msg.format(s, tabsize))
def test_expandtabs_exception_noninteger_tabsize(self):
pyfunc = expandtabs_with_tabsize_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
with self.assertRaises(TypingError) as raises:
cfunc('\t', 2.4)
msg = '"tabsize" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_startswith_default(self):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for prefix in cpython_subs + default_subs + extra_subs:
self.assertEqual(pyfunc(s, prefix), cfunc(s, prefix))
def test_startswith_with_start(self):
pyfunc = startswith_with_start_only_usecase
cfunc = njit(pyfunc)
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for prefix in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
self.assertEqual(pyfunc(s, prefix, start),
cfunc(s, prefix, start))
def test_startswith_with_start_end(self):
pyfunc = startswith_with_start_end_usecase
cfunc = njit(pyfunc)
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for prefix in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
self.assertEqual(pyfunc(s, prefix, start, end),
cfunc(s, prefix, start, end))
def test_startswith_exception_invalid_args(self):
msg_invalid_prefix = \
"The arg 'prefix' should be a string or a tuple of strings"
with self.assertRaisesRegex(TypingError, msg_invalid_prefix):
cfunc = njit(startswith_usecase)
cfunc("hello", (1, "he"))
msg_invalid_start = \
"When specified, the arg 'start' must be an Integer or None"
with self.assertRaisesRegex(TypingError, msg_invalid_start):
cfunc = njit(startswith_with_start_only_usecase)
cfunc("hello", "he", "invalid start")
msg_invalid_end = \
"When specified, the arg 'end' must be an Integer or None"
with self.assertRaisesRegex(TypingError, msg_invalid_end):
cfunc = njit(startswith_with_start_end_usecase)
cfunc("hello", "he", 0, "invalid end")
def test_startswith_tuple(self):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
prefix = (sub_str, 'lo')
self.assertEqual(pyfunc(s, prefix),
cfunc(s, prefix))
def test_startswith_tuple_args(self):
pyfunc = startswith_with_start_end_usecase
cfunc = njit(pyfunc)
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
prefix = (sub_str, 'lo')
self.assertEqual(pyfunc(s, prefix, start, end),
cfunc(s, prefix, start, end))
def test_endswith_default(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_endswith_with_start(self):
pyfunc = endswith_with_start_only_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_endswith_with_start_end(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#LL1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_endswith_tuple(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith({}) must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs),
cfunc(s, tuple_subs),
msg=msg.format(s, tuple_subs))
def test_endswith_tuple_args(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs, start, end),
cfunc(s, tuple_subs, start, end),
msg=msg.format(s, tuple_subs,
start, end))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in extras:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_partition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = partition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_partition(self):
pyfunc = partition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".partition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L202-L231 # noqa: E501
cpython_subs = [
('a' * 100 + '\u0102', ['\u0102', '\u0201', '\u0120', '\u0220']),
('a' * 100 + '\U00100304', ['\U00100304', '\U00100204',
'\U00102004']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', 'a\u0102', 'a\U00100304']),
('\u0102' * 100, ['\U00100304', '\u0102\U00100304']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".find("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_find_with_start_only(self):
pyfunc = find_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".find("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_find_with_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
starts = ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".find("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_find_exception_noninteger_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
accepted = (types.Integer, types.NoneType)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_rpartition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rpartition(self):
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rpartition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_count(self):
pyfunc = count_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_count('{1}') = {2}\n'{0}'.c_count('{1}') = {3}"
for s, sub in UNICODE_COUNT_EXAMPLES:
py_result = pyfunc(s, sub)
c_result = cfunc(s, sub)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, py_result, c_result))
def test_count_with_start(self):
pyfunc = count_with_start_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}) = {3}",
"'{0}'.c_count('{1}', {2}) = {4}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i in range(-18, 18):
py_result = pyfunc(s, sub, i)
c_result = cfunc(s, sub, i)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, py_result,
c_result))
py_result = pyfunc(s, sub, None)
c_result = cfunc(s, sub, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, py_result,
c_result))
def test_count_with_start_end(self):
pyfunc = count_with_start_end_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count('{1}', {2}, {3}) = {5}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i, j in product(range(-18, 18), (-18, 18)):
py_result = pyfunc(s, sub, i, j)
c_result = cfunc(s, sub, i, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, j, py_result,
c_result))
for j in range(-18, 18):
py_result = pyfunc(s, sub, None, j)
c_result = cfunc(s, sub, None, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, j, py_result,
c_result))
py_result = pyfunc(s, sub, None, None)
c_result = cfunc(s, sub, None, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, None, py_result,
c_result))
def test_count_arg_type_check(self):
cfunc = njit(count_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1, 0.5)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1.2, 7)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 12, 1, 7)
self.assertIn('The substring must be a UnicodeType, not',
str(raises.exception))
def test_count_optional_arg_type_check(self):
pyfunc = count_with_start_end_usecase
def try_compile_bad_optional(*args):
bad_sig = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.float64))
njit([bad_sig])(pyfunc)
with self.assertRaises(TypingError) as raises:
try_compile_bad_optional('tú quis?', 'tú', 1.1, 1.1)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count_op('{1}', {2}, {3}) = {5}")
sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.int64),
types.Optional(types.int64))
cfunc_optional = njit([sig_optional])(pyfunc)
py_result = pyfunc('tú quis?', 'tú', 0, 8)
c_result = cfunc_optional('tú quis?', 'tú', 0, 8)
self.assertEqual(py_result, c_result,
error_msg.format('tú quis?', 'tú', 0, 8, py_result,
c_result))
def test_rfind(self):
pyfunc = rfind_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L233-L259 # noqa: E501
cpython_subs = [
('\u0102' + 'a' * 100, ['\u0102', '\u0201', '\u0120', '\u0220']),
('\U00100304' + 'a' * 100, ['\U00100304', '\U00100204',
'\U00102004']),
('abcdefghiabc', ['abc', '']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', '\u0102a', '\U00100304a']),
('\u0102' * 100, ['\U00100304', '\U00100304\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rfind("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_rfind_with_start_only(self):
pyfunc = rfind_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".rfind("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_rfind_with_start_end(self):
pyfunc = rfind_with_start_end_usecase
cfunc = njit(pyfunc)
starts = list(range(-20, 20)) + [None]
ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".rfind("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_rfind_wrong_substr(self):
cfunc = njit(rfind_usecase)
for s in UNICODE_EXAMPLES:
for sub_str in [None, 1, False]:
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str)
msg = 'must be {}'.format(types.UnicodeType)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end(self):
cfunc = njit(rfind_with_start_end_usecase)
accepted_types = (types.Integer, types.NoneType)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
# test wrong start
for start, end in product([0.1, False], [-1, 1]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"start" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
# test wrong end
for start, end in product([-1, 1], [-0.1, True]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"end" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end_optional(self):
s = UNICODE_EXAMPLES[0]
sub_str = s[1:-1]
accepted_types = (types.Integer, types.NoneType)
msg = 'must be {}'.format(accepted_types)
def try_compile_wrong_start_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.intp))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_start_optional(s, sub_str, 0.1, 1)
self.assertIn(msg, str(raises.exception))
def try_compile_wrong_end_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.intp),
types.Optional(types.float64))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_end_optional(s, sub_str, 1, 0.1)
self.assertIn(msg, str(raises.exception))
def test_rindex(self):
pyfunc = rindex_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L284-L308 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rindex("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index(self):
pyfunc = index_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L260-L282 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".index("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index_rindex_with_start_only(self):
pyfuncs = [index_with_start_only_usecase,
rindex_with_start_only_usecase]
messages = ['Results "{}".index("{}", {}) must be equal',
'Results "{}".rindex("{}", {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1))),
(s[:-2], [0] + list(range(-10, 1 - l))),
(s[3:], list(range(4)) + list(range(-10, 4 - l))),
(s, [0] + list(range(-10, 1 - l))),
]
for sub_str, starts in cases:
for start in starts + [None]:
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_index_rindex_with_start_end(self):
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
messages = ['Results of "{}".index("{}", {}, {}) must be equal',
'Results of "{}".rindex("{}", {}, {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1)), list(range(l, 10))),
(s[:-2], [0] + list(range(-10, 1 - l)),
[-2, -1] + list(range(l - 2, 10))),
(s[3:], list(range(4)) + list(range(-10, -1)),
list(range(l, 10))),
(s, [0] + list(range(-10, 1 - l)), list(range(l, 10))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts + [None], ends):
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_index_rindex_exception_substring_not_found(self):
self.disable_leak_check()
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(l + 1, 10)), [l]),
(s[:-2], [0], list(range(l - 2))),
(s[3:], list(range(4, 10)), [l]),
(s, [None], list(range(l))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts, ends):
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func(s, sub_str, start, end)
msg = 'substring not found'
self.assertIn(msg, str(raises.exception))
def test_index_rindex_exception_noninteger_start_end(self):
accepted = (types.Integer, types.NoneType)
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_scalar_kind(self):
# See issue #6135, make sure that getitem returns a char of the minimal
# kind required to represent the "got" item, this is done via the use
# of `hash` in the test function as it is sensitive to kind.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['a\u1234', '¡着']
for s in samples:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_getitem_slice2_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234', '¡¡¡着着着']
for s in samples:
for i in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
for j in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_getitem_slice3_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234',
'a\u1234b\u1234c'
'¡¡¡着着着',
'¡着¡着¡着',
'着a着b着c',
'¡着a¡着b¡着c',
'¡着a着¡c',]
for s in samples:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice_ascii_flag(self):
"""
Make sure ascii flag is False when ascii and non-ascii characters are
mixed in output of Unicode slicing.
"""
@njit
def f(s):
return s[::2]._is_ascii, s[1::2]._is_ascii
s = "¿abc¡Y tú, quién te cre\t\tes?"
self.assertEqual(f(s), (0, 1))
def test_zfill(self):
pyfunc = zfill_usecase
cfunc = njit(pyfunc)
ZFILL_INPUTS = [
'ascii',
'+ascii',
'-ascii',
'-asc ii-',
'12345',
'-12345',
'+12345',
'',
'¡Y tú crs?',
'🐍⚡',
'+🐍⚡',
'-🐍⚡',
'大眼,小手。',
'+大眼,小手。',
'-大眼,小手。',
]
with self.assertRaises(TypingError) as raises:
cfunc(ZFILL_INPUTS[0], 1.1)
self.assertIn('<width> must be an Integer', str(raises.exception))
for s in ZFILL_INPUTS:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_repeat(self, flags=no_pyobj_flags):
pyfunc = repeat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in (-1, 0, 1, 2, 3, 4, 5, 7, 8, 15, 70):
self.assertEqual(pyfunc(a, b),
cfunc(a, b))
self.assertEqual(pyfunc(b, a),
cfunc(b, a))
def test_repeat_exception_float(self):
self.disable_leak_check()
cfunc = njit(repeat_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('hi', 2.5)
self.assertIn(_header_lead + ' Function(<built-in function mul>)',
str(raises.exception))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
# Handle non-integer maxsplit exception
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase,
"'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase,
"'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
# explicit sep=None cases covered in test_split and
# test_split_with_maxsplit
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_split_exception_invalid_keepends(self):
pyfunc = splitlines_with_keepends_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int, types.Boolean, bool)
for ty, keepends in (('none', None), ('unicode_type', 'None')):
with self.assertRaises(TypingError) as raises:
cfunc('\n', keepends)
msg = '"keepends" must be {}, not {}'.format(accepted_types, ty)
self.assertIn(msg, str(raises.exception))
def test_splitlines(self):
pyfunc = splitlines_usecase
cfunc = njit(pyfunc)
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
msg = 'Results of "{}".splitlines() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_splitlines_with_keepends(self):
pyfuncs = [
splitlines_with_keepends_usecase,
splitlines_with_keepends_kwarg_usecase
]
messages = [
'Results of "{}".splitlines({}) must be equal',
'Results of "{}".splitlines(keepends={}) must be equal'
]
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
all_keepends = [True, False, 0, 1, -1, 100]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s, keepends in product(cases, all_keepends):
self.assertEqual(pyfunc(s, keepends), cfunc(s, keepends),
msg=msg.format(s, keepends))
def test_rsplit_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_rsplit_exception_noninteger_maxsplit(self):
pyfunc = rsplit_with_maxsplit_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
msg = '"maxsplit" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rsplit(self):
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rsplit("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_rsplit_with_maxsplit(self):
pyfuncs = [rsplit_with_maxsplit_usecase,
rsplit_with_maxsplit_kwarg_usecase]
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
messages = [
'Results of "{}".rsplit("{}", {}) must be equal',
'Results of "{}".rsplit("{}", maxsplit={}) must be equal'
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for test_str, sep, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, sep, maxsplit),
cfunc(test_str, sep, maxsplit),
msg=msg.format(test_str, sep, maxsplit))
def test_rsplit_whitespace(self):
pyfunc = rsplit_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodetype_db.h#L5996-L6031 # noqa: E501
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
msg = 'Results of "{}".rsplit() must be equal'
for s in CASES:
self.assertEqual(pyfunc(s), cfunc(s), msg.format(s))
def test_join_empty(self):
# Can't pass empty list to nopython mode, so we have to make a
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1, 2, 3])
# This error message is obscure, but indicates the error was trapped
# in the typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn(
"During: resolving callee type: BoundFunction",
exc_message,
)
# could be int32 or int64
self.assertIn("reflected list(int", exc_message)
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_justification(self):
for pyfunc, case_name in [(center_usecase, 'center'),
(ljust_usecase, 'ljust'),
(rjust_usecase, 'rjust')]:
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1)
self.assertIn('The width must be an Integer', str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width),
"'%s'.%s(%d)?" % (s, case_name, width))
def test_justification_fillchar(self):
for pyfunc, case_name in [(center_usecase_fillchar, 'center'),
(ljust_usecase_fillchar, 'ljust'),
(rjust_usecase_fillchar, 'rjust')]:
cfunc = njit(pyfunc)
# allowed fillchar cases
for fillchar in [' ', '+', 'ú', '处']:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1, fillchar)
self.assertIn('The width must be an Integer',
str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width, fillchar),
cfunc(s, width, fillchar),
"'%s'.%s(%d, '%s')?" % (s, case_name,
width,
fillchar))
def test_justification_fillchar_exception(self):
self.disable_leak_check()
for pyfunc in [center_usecase_fillchar,
ljust_usecase_fillchar,
rjust_usecase_fillchar]:
cfunc = njit(pyfunc)
# disallowed fillchar cases
for fillchar in ['', '+0', 'quién', '处着']:
with self.assertRaises(ValueError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fill character must be exactly one',
str(raises.exception))
# forbid fillchar cases with different types
for fillchar in [1, 1.1]:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fillchar must be a UnicodeType',
str(raises.exception))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_isidentifier(self):
def pyfunc(s):
return s.isidentifier()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L695-L708 # noqa: E501
cpython = ['a', 'Z', '_', 'b0', 'bc', 'b_', 'µ',
'𝔘𝔫𝔦𝔠𝔬𝔡𝔢', ' ', '[', '©', '0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isidentifier() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_strip(self):
STRIP_CASES = [
('ass cii', 'ai'),
('ass cii', None),
('asscii', 'ai '),
('asscii ', 'ai '),
(' asscii ', 'ai '),
(' asscii ', 'asci '),
(' asscii ', 's'),
(' ', ' '),
('', ' '),
('', ''),
('', None),
(' ', None),
(' asscii ', 'ai '),
(' asscii ', ''),
(' asscii ', None),
('tú quién te crees?', 'étú? '),
(' tú quién te crees? ', 'étú? '),
(' tú qrees? ', ''),
(' tú quién te crees? ', None),
('大处 着眼,小处着手。大大大处', '大处'),
(' 大处大处 ', ''),
('\t\nabcd\t', '\ta'),
(' 大处大处 ', None),
('\t abcd \t', None),
('\n abcd \n', None),
('\r abcd \r', None),
('\x0b abcd \x0b', None),
('\x0c abcd \x0c', None),
('\u2029abcd\u205F', None),
('\u0085abcd\u2009', None)
]
# form with no parameter
for pyfunc, case_name in [(strip_usecase, 'strip'),
(lstrip_usecase, 'lstrip'),
(rstrip_usecase, 'rstrip')]:
cfunc = njit(pyfunc)
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string),
cfunc(string),
"'%s'.%s()?" % (string, case_name))
# parametrized form
for pyfunc, case_name in [(strip_usecase_chars, 'strip'),
(lstrip_usecase_chars, 'lstrip'),
(rstrip_usecase_chars, 'rstrip')]:
cfunc = njit(pyfunc)
sig1 = types.unicode_type(types.unicode_type,
types.Optional(types.unicode_type))
cfunc_optional = njit([sig1])(pyfunc)
def try_compile_bad_optional(*args):
bad = types.unicode_type(types.unicode_type,
types.Optional(types.float64))
njit([bad])(pyfunc)
for fn in cfunc, try_compile_bad_optional:
with self.assertRaises(TypingError) as raises:
fn('tú quis?', 1.1)
self.assertIn('The arg must be a UnicodeType or None',
str(raises.exception))
for fn in cfunc, cfunc_optional:
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string, chars),
fn(string, chars),
"'%s'.%s('%s')?" % (string, case_name,
chars))
def test_isspace(self):
def pyfunc(s):
return s.isspace()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L613-L621 # noqa: E501
cpython = ['\u2000', '\u200a', '\u2014', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isspace() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_istitle(self):
pyfunc = istitle_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_istitle() = {1}\n'{0}'.c_istitle() = {2}"
unicode_title = [x.title() for x in UNICODE_EXAMPLES]
special = [
'',
' ',
' AA ',
' Ab ',
'1',
'A123',
'A12Bcd',
'+abA',
'12Abc',
'A12abc',
'%^Abc 5 $% Def'
'𐐁𐐩',
'𐐧𐑎',
'𐐩',
'𐑎',
'🐍 Is',
'🐍 NOT',
'👯Is',
'ῼ',
'Greek ῼitlecases ...'
]
ISTITLE_EXAMPLES = UNICODE_EXAMPLES + unicode_title + special
for s in ISTITLE_EXAMPLES:
py_result = pyfunc(s)
c_result = cfunc(s)
self.assertEqual(py_result, c_result,
error_msg.format(s, py_result, c_result))
def test_isprintable(self):
def pyfunc(s):
return s.isprintable()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L710-L723 # noqa: E501
cpython = ['', ' ', 'abcdefg', 'abcdefg\n', '\u0374', '\u0378',
'\ud800', '\U0001F46F', '\U000E0020']
msg = 'Results of "{}".isprintable() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_not(self):
def pyfunc(x):
return not x
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_capitalize(self):
def pyfunc(x):
return x.capitalize()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L800-L815 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F', '\U00010427\U0001044F',
'\U0001044F\U00010427', 'X\U00010427x\U0001044F', 'h\u0130',
'\u1fd2\u0130', 'finnish', 'A\u0345\u03a3']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L926 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".capitalize() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isupper(self):
def pyfunc(x):
return x.isupper()
cfunc = njit(pyfunc)
uppers = [x.upper() for x in UNICODE_EXAMPLES]
extras = ["AA12A", "aa12a", "大AA12A", "大aa12a", "AAADŽA", "A 1 1 大"]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L585-L599 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
fourxcpy = [x * 4 for x in cpython]
for a in UNICODE_EXAMPLES + uppers + extras + cpython + fourxcpy:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_upper(self):
def pyfunc(x):
return x.upper()
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_casefold(self):
def pyfunc(x):
return x.casefold()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L774-L781 # noqa: E501
cpython = ['hello', 'hELlo', 'ß', 'fi', '\u03a3',
'A\u0345\u03a3', '\u00b5']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L924 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".casefold() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalpha(self):
def pyfunc(x):
return x.isalpha()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L630-L640 # noqa: E501
cpython = ['\u1FFc', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalpha() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isascii(self):
def pyfunc(x):
return x.isascii()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L913-L926 # noqa: E501
cpython = ['', '\x00', '\x7f', '\x00\x7f', '\x80', '\xe9', ' ']
msg = 'Results of "{}".isascii() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_title(self):
pyfunc = title
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L813-L828 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F',
'\U0001044F\U0001044F \U0001044F\U0001044F',
'\U00010427\U0001044F \U00010427\U0001044F',
'\U0001044F\U00010427 \U0001044F\U00010427',
'X\U00010427x\U0001044F X\U00010427x\U0001044F',
'fiNNISH', 'A\u03a3 \u1fa1xy', 'A\u03a3A']
msg = 'Results of "{}".title() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_swapcase(self):
def pyfunc(x):
return x.swapcase()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L834-L858 # noqa: E501
cpython = ['\U0001044F', '\U00010427', '\U0001044F\U0001044F',
'\U00010427\U0001044F', '\U0001044F\U00010427',
'X\U00010427x\U0001044F', 'fi', '\u0130', '\u03a3',
'\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'A\u0345\u03a3', 'A\u03a3\u0345', '\u03a3\u0345 ',
'\u03a3', 'ß', '\u1fd2']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L928 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".swapcase() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_islower(self):
pyfunc = islower_usecase
cfunc = njit(pyfunc)
lowers = [x.lower() for x in UNICODE_EXAMPLES]
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L586-L600 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
cpython += [x * 4 for x in cpython]
msg = 'Results of "{}".islower() must be equal'
for s in UNICODE_EXAMPLES + lowers + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalnum(self):
def pyfunc(x):
return x.isalnum()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L624-L628 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalnum() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_lower(self):
pyfunc = lower_usecase
cfunc = njit(pyfunc)
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L748-L758 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U0001044E', '\U0001F46F',
'\U00010427\U00010427', '\U00010427\U0001044F',
'X\U00010427x\U0001044F', '\u0130']
# special cases for sigma from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L759-L768 # noqa: E501
sigma = ['\u03a3', '\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'\u03a3\u0345 ', '\U0008fffe', '\u2177']
extra_sigma = 'A\u03a3\u03a2'
sigma.append(extra_sigma)
msg = 'Results of "{}".lower() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython + sigma:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isnumeric(self):
def pyfunc(x):
return x.isnumeric()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L676-L693 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isnumeric() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdigit(self):
def pyfunc(x):
return x.isdigit()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L664-L674 # noqa: E501
cpython = ['\u2460', '\xbc', '\u0660', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F',
'\U00011065', '\U0001D7F6', '\U00011066', '\U000104A0',
'\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdigit() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdecimal(self):
def pyfunc(x):
return x.isdecimal()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L646-L662 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001F107', '\U0001D7F6', '\U00011066', '\U000104A0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdecimal() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_replace(self):
pyfunc = replace_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
for test_str, old_str, new_str in CASES:
self.assertEqual(pyfunc(test_str, old_str, new_str),
cfunc(test_str, old_str, new_str),
"'%s'.replace('%s', '%s')?" %
(test_str, old_str, new_str))
def test_replace_with_count(self):
pyfunc = replace_with_count_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
count_test = [-1, 1, 0, 5]
for test_str, old_str, new_str in CASES:
for count in count_test:
self.assertEqual(pyfunc(test_str, old_str, new_str, count),
cfunc(test_str, old_str, new_str, count),
"'%s'.replace('%s', '%s', '%s')?" %
(test_str, old_str, new_str, count))
def test_replace_unsupported(self):
def pyfunc(s, x, y, count):
return s.replace(x, y, count)
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 'qqq', 3.5)
msg = 'Unsupported parameters. The parameters must be Integer.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 0, 'qqq', 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 0, 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
| TestUnicode |
python | PrefectHQ__prefect | src/prefect/settings/models/worker.py | {
"start": 185,
"end": 608
} | class ____(PrefectBaseSettings):
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("worker", "webserver")
)
host: str = Field(
default="0.0.0.0",
description="The host address the worker's webserver should bind to.",
)
port: int = Field(
default=8080,
description="The port the worker's webserver should bind to.",
)
| WorkerWebserverSettings |
python | ansible__ansible | lib/ansible/module_utils/facts/virtual/sysctl.py | {
"start": 190,
"end": 5068
} | class ____:
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
virtual_product_facts = {}
host_tech = set()
guest_tech = set()
# We do similar to what we do in linux.py -- We want to allow multiple
# virt techs to show up, but maintain compatibility, so we have to track
# when we would have stopped, even though now we go through everything.
found_virt = False
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|kvm|Bochs|SmartDC).*', out):
guest_tech.add('kvm')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'kvm'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if re.match('.*VMware.*', out):
guest_tech.add('VMware')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'VMware'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if out.rstrip() == 'VirtualBox':
guest_tech.add('virtualbox')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'virtualbox'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if re.match('(HVM domU|XenPVH|XenPV|XenPVHVM).*', out):
guest_tech.add('xen')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'xen'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if out.rstrip() == 'Hyper-V':
guest_tech.add('Hyper-V')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'Hyper-V'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if out.rstrip() == 'Parallels':
guest_tech.add('parallels')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'parallels'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if out.rstrip() == 'RHEV Hypervisor':
guest_tech.add('RHEV')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'RHEV'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if out.rstrip() == 'bhyve':
guest_tech.add('bhyve')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'bhyve'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
if (key == 'security.jail.jailed') and (out.rstrip() == '1'):
guest_tech.add('jails')
if not found_virt:
virtual_product_facts['virtualization_type'] = 'jails'
virtual_product_facts['virtualization_role'] = 'guest'
found_virt = True
virtual_product_facts['virtualization_tech_guest'] = guest_tech
virtual_product_facts['virtualization_tech_host'] = host_tech
return virtual_product_facts
def detect_virt_vendor(self, key):
virtual_vendor_facts = {}
host_tech = set()
guest_tech = set()
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
guest_tech.add('kvm')
virtual_vendor_facts['virtualization_type'] = 'kvm'
virtual_vendor_facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
guest_tech.add('vmm')
virtual_vendor_facts['virtualization_type'] = 'vmm'
virtual_vendor_facts['virtualization_role'] = 'guest'
virtual_vendor_facts['virtualization_tech_guest'] = guest_tech
virtual_vendor_facts['virtualization_tech_host'] = host_tech
return virtual_vendor_facts
| VirtualSysctlDetectionMixin |
python | aio-libs__aiohttp | aiohttp/http_parser.py | {
"start": 22902,
"end": 25712
} | class ____(HttpParser[RawResponseMessage]):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage.
"""
# Lax mode should only be enabled on response parser.
lax = not DEBUG
def feed_data(
self,
data: bytes,
SEP: _SEP | None = None,
*args: Any,
**kwargs: Any,
) -> tuple[list[tuple[RawResponseMessage, StreamReader]], bool, bytes]:
if SEP is None:
SEP = b"\r\n" if DEBUG else b"\n"
return super().feed_data(data, SEP, *args, **kwargs)
def parse_message(self, lines: list[bytes]) -> RawResponseMessage:
line = lines[0].decode("utf-8", "surrogateescape")
try:
version, status = line.split(maxsplit=1)
except ValueError:
raise BadStatusLine(line) from None
try:
status, reason = status.split(maxsplit=1)
except ValueError:
status = status.strip()
reason = ""
if len(reason) > self.max_line_size:
raise LineTooLong(
"Status line is too long", str(self.max_line_size), str(len(reason))
)
# version
match = VERSRE.fullmatch(version)
if match is None:
raise BadStatusLine(line)
version_o = HttpVersion(int(match.group(1)), int(match.group(2)))
# The status code is a three-digit ASCII number, no padding
if len(status) != 3 or not DIGITS.fullmatch(status):
raise BadStatusLine(line)
status_i = int(status)
# read headers
(
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
) = self.parse_headers(lines[1:])
if close is None:
if version_o <= HttpVersion10:
close = True
# https://www.rfc-editor.org/rfc/rfc9112.html#name-message-body-length
elif 100 <= status_i < 200 or status_i in {204, 304}:
close = False
elif hdrs.CONTENT_LENGTH in headers or hdrs.TRANSFER_ENCODING in headers:
close = False
else:
# https://www.rfc-editor.org/rfc/rfc9112.html#section-6.3-2.8
close = True
return RawResponseMessage(
version_o,
status_i,
reason.strip(),
headers,
raw_headers,
close,
compression,
upgrade,
chunked,
)
def _is_chunked_te(self, te: str) -> bool:
# https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.4.2
return te.rsplit(",", maxsplit=1)[-1].strip(" \t").lower() == "chunked"
| HttpResponseParser |
python | getsentry__sentry | tests/sentry/issues/test_ingest.py | {
"start": 15471,
"end": 15757
} | class ____(OccurrenceTestMixin, TestCase):
def test(self) -> None:
data = self.build_occurrence_data(fingerprint=["hi", "bye"])
assert data["fingerprint"] == [
md5(b"hi").hexdigest(),
md5(b"bye").hexdigest(),
]
| ProcessOccurrenceDataTest |
python | PrefectHQ__prefect | tests/server/schemas/test_core.py | {
"start": 1457,
"end": 1948
} | class ____:
async def test_block_document_requires_name(self):
with pytest.raises(
ValueError, match="(Names must be provided for block documents.)"
):
schemas.core.BlockDocument(block_schema_id=uuid4(), block_type_id=uuid4())
async def test_anonymous_block_document_does_not_require_name(self):
assert schemas.core.BlockDocument(
block_schema_id=uuid4(), block_type_id=uuid4(), is_anonymous=True
)
| TestBlockDocument |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/scrollbar_thumb_height.py | {
"start": 231,
"end": 653
} | class ____(ScrollView, can_focus=True):
def __init__(self, height: int, border_title: str) -> None:
super().__init__()
self.virtual_size = Size(0, height)
self.border_title = border_title
def render_line(self, y: int) -> Strip:
return Strip(
[
Segment(f"Welcome to line {self.scroll_offset.y + y}", self.rich_style),
]
)
| TestScrollView |
python | pydantic__pydantic | pydantic/types.py | {
"start": 8424,
"end": 9142
} | class ____(BaseModel):
non_negative_int: NonNegativeInt
m = Model(non_negative_int=0)
print(repr(m))
#> Model(non_negative_int=0)
try:
Model(non_negative_int=-1)
except ValidationError as e:
print(e.errors())
'''
[
{
'type': 'greater_than_equal',
'loc': ('non_negative_int',),
'msg': 'Input should be greater than or equal to 0',
'input': -1,
'ctx': {'ge': 0},
'url': 'https://errors.pydantic.dev/2/v/greater_than_equal',
}
]
'''
```
"""
StrictInt = Annotated[int, Strict()]
"""An integer that must be validated in strict mode.
```python
from pydantic import BaseModel, StrictInt, ValidationError
| Model |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 846233,
"end": 847230
} | class ____(sgqlc.types.Type, HovercardContext):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("relevant_organizations", "total_organization_count")
relevant_organizations = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationConnection),
graphql_name="relevantOrganizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
total_organization_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalOrganizationCount"
)
| OrganizationsHovercardContext |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 326,
"end": 359
} | class ____(Mixin1):
item = 1
| A1 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 17096,
"end": 18021
} | class ____(RecordTransformation):
properties_field: str = None
def __init__(self, properties_field: str = None) -> None:
self.properties_field = properties_field
def transform(
self,
record: Record,
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
updated_record = {}
to_transform = record[self.properties_field] if self.properties_field else record
for result in transform_property_names(to_transform.keys()):
updated_record[result.transformed_name] = to_transform[result.source_name]
if self.properties_field:
record[self.properties_field].clear()
record[self.properties_field].update(updated_record)
else:
record.clear()
record.update(updated_record)
| PropertiesTransformation |
python | huggingface__transformers | tests/models/edgetam_video/test_modeling_edgetam_video.py | {
"start": 2030,
"end": 22152
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.video_model = EdgeTamVideoModel.from_pretrained("yonigozlan/EdgeTAM-hf").to(torch.float32)
self.processor = Sam2VideoProcessor.from_pretrained("yonigozlan/EdgeTAM-hf")
self.video_model.to(torch_device)
self.video_model.eval()
def tearDown(self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
backend_empty_cache(torch_device)
def test_inference_mask_generation_video_one_point(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks([low_res_masks], [raw_video.shape[-3:-1]], binarize=False)[
0
]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-28.3880, -28.3880, -27.9277], [-27.5260, -27.5260, -27.2455], [-25.5902, -25.5902, -25.7136]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-28.3880, -28.3880], [-27.5260, -27.5260]]]],
[[[[-15.3350, -15.3350], [-15.0002, -15.0002]]]],
[[[[-14.8729, -14.8729], [-14.6724, -14.6724]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350]]]],
input_labels=[[[1]]],
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
print(f"VIDEO_TEST2 - ACTUAL frames[:3, :, :, :2, :2]: {frames[:3, :, :, :2, :2]}")
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-28.3880, -28.3880], [-27.5260, -27.5260]]]],
[[[[-15.3350, -15.3350], [-15.0002, -15.0002]]]],
[[[[-14.8729, -14.8729], [-14.6724, -14.6724]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_mask_generation_video_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-17.3081, -17.3081, -16.9805], [-16.8430, -16.8430, -16.6766], [-15.7986, -15.7986, -15.9941]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-17.3081, -17.3081], [-16.8430, -16.8430]]]],
[[[[-14.9302, -14.9302], [-14.8802, -14.8802]]]],
[[[[-14.4372, -14.4372], [-14.3697, -14.3697]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-17.3245, -17.3245, -16.9231], [-16.8773, -16.8773, -16.6082], [-15.8731, -15.8731, -15.9011]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-17.3245, -17.3245], [-16.8773, -16.8773]]]],
[[[[-16.2826, -16.2826], [-15.9087, -15.9087]]]],
[[[[-15.8716, -15.8716], [-15.3992, -15.3992]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_one_point_one_bb(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_boxes=[[[300, 0, 500, 400]]],
input_points=[[[[460, 60]]]],
input_labels=[[[1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-13.9780, -13.9780, -13.7824], [-13.7642, -13.7642, -13.6000], [-13.2842, -13.2842, -13.1904]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
# higher tolerance due to errors propagating from frame to frame
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-13.9780, -13.9780], [-13.7642, -13.7642]]]],
[[[[-16.0142, -16.0142], [-15.5600, -15.5600]]]],
[[[[-16.7568, -16.7568], [-16.2460, -16.2460]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.6233, -12.6233], [-12.1809, -12.1809]], [[-13.4556, -13.4556], [-12.9549, -12.9549]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.6233, -12.6233], [-12.1809, -12.1809]]], [[[-13.4556, -13.4556], [-12.9549, -12.9549]]]],
[[[[-12.5589, -12.5589], [-12.4450, -12.4450]]], [[[-12.2181, -12.2181], [-12.0188, -12.0188]]]],
[[[[-15.3170, -15.3170], [-15.0254, -15.0254]]], [[[-11.4912, -11.4912], [-11.3171, -11.3171]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_video_from_mask_input(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
# get input_mask
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
# set mask as input
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_id,
input_masks=self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = sam2_video_output.pred_masks
self.assertEqual(low_res_masks.shape, (1, 1, 256, 256))
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[0, 0, :3, :3],
torch.tensor(
[[-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000], [-10.0000, -10.0000, -10.0000]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-10.0000, -10.0000], [-10.0000, -10.0000]]]],
[[[[-17.4083, -17.4083], [-17.2256, -17.2256]]]],
[[[[-13.8533, -13.8533], [-13.7759, -13.7759]]]],
],
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
def test_inference_propagate_on_streamed_video(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(inference_device=torch_device)
video_res_masks = []
max_frame_num_to_track = 3
for frame_idx, frame in enumerate(raw_video):
if frame_idx >= max_frame_num_to_track:
break
inputs = self.processor(images=frame, device=torch_device, return_tensors="pt")
if frame_idx == 0:
self.processor.add_inputs_to_inference_session(
inference_session,
frame_idx=0,
obj_ids=1,
input_points=[[[[210, 350], [250, 220]]]],
input_labels=[[[1, 1]]],
original_size=inputs.original_sizes[0],
)
sam2_video_output = self.video_model(inference_session=inference_session, frame=inputs.pixel_values[0])
video_res_masks.append(
self.processor.post_process_masks(
[sam2_video_output.pred_masks], inputs.original_sizes, binarize=False
)[0]
)
video_res_masks = torch.stack(video_res_masks, dim=0)
self.assertEqual(
video_res_masks.shape, (max_frame_num_to_track, 1, 1, raw_video.shape[-3], raw_video.shape[-2])
)
# higher tolerance due to errors propagating from frame to frame
print(f"VIDEO_TEST8 - ACTUAL video_res_masks[:3, :, :, :2, :2]: {video_res_masks[:3, :, :, :2, :2]}")
torch.testing.assert_close(
video_res_masks[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-17.3081, -17.3081], [-16.8430, -16.8430]]]],
[[[[-14.9302, -14.9302], [-14.8802, -14.8802]]]],
[[[[-14.4372, -14.4372], [-14.3697, -14.3697]]]],
]
).to(torch_device),
atol=1e-2,
rtol=1e-2,
)
| EdgeTamVideoModelIntegrationTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1018572,
"end": 1019344
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseDefaultRepositoryPermissionSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated base repository permission
setting.
"""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the base repository
permission setting.
"""
| UpdateEnterpriseDefaultRepositoryPermissionSettingPayload |
python | numpy__numpy | numpy/lib/_index_tricks_impl.py | {
"start": 14875,
"end": 18871
} | class ____(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then
an N x 1 (column) matrix is produced.
If the result is 2-D then both provide the same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> import numpy as np
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, ..., 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> a = np.array([[0, 1, 2], [3, 4, 5]])
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
__slots__ = ()
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
| RClass |
python | django__django | django/contrib/admin/filters.py | {
"start": 15036,
"end": 17497
} | class ____(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = "%s__exact" % field_path
self.lookup_kwarg_isnull = "%s__isnull" % field_path
self.lookup_val = params.get(self.lookup_kwarg)
self.lookup_val_isnull = get_last_value_from_parameters(
params, self.lookup_kwarg_isnull
)
super().__init__(field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def get_facet_counts(self, pk_attname, filtered_qs):
return {
f"{i}__c": models.Count(
pk_attname,
filter=models.Q(
(self.lookup_kwarg, value)
if value is not None
else (self.lookup_kwarg_isnull, True)
),
)
for i, (value, _) in enumerate(self.field.flatchoices)
}
def choices(self, changelist):
add_facets = changelist.add_facets
facet_counts = self.get_facet_queryset(changelist) if add_facets else None
yield {
"selected": self.lookup_val is None,
"query_string": changelist.get_query_string(
remove=[self.lookup_kwarg, self.lookup_kwarg_isnull]
),
"display": _("All"),
}
none_title = ""
for i, (lookup, title) in enumerate(self.field.flatchoices):
if add_facets:
count = facet_counts[f"{i}__c"]
title = f"{title} ({count})"
if lookup is None:
none_title = title
continue
yield {
"selected": self.lookup_val is not None
and str(lookup) in self.lookup_val,
"query_string": changelist.get_query_string(
{self.lookup_kwarg: lookup}, [self.lookup_kwarg_isnull]
),
"display": title,
}
if none_title:
yield {
"selected": bool(self.lookup_val_isnull),
"query_string": changelist.get_query_string(
{self.lookup_kwarg_isnull: "True"}, [self.lookup_kwarg]
),
"display": none_title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
| ChoicesFieldListFilter |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/combinatory_ports.py | {
"start": 807,
"end": 943
} | class ____(Base):
def __init__(self, base: Base) -> None:
self.b: Base = base
def method(self):
self.b.method()
| B |
python | google__pytype | pytype/tests/test_final.py | {
"start": 11354,
"end": 13794
} | class ____(test_base.BaseTest):
"""Test Final in pyi files."""
_FINAL_ATTR = """
from typing import Final
class A:
x: Final[int] = ...
"""
def test_attribute(self):
with self.DepTree([("foo.pyi", self._FINAL_ATTR)]):
err = self.CheckWithErrors("""
from foo import A
a = A()
a.x = 10 # final-error[e]
""")
self.assertErrorSequences(
err, {"e": ["attribute", "x", "annotated with Final"]}
)
def test_override_attr_in_base(self):
with self.DepTree([("foo.pyi", self._FINAL_ATTR)]):
err = self.CheckWithErrors("""
from foo import A
class B(A): # final-error[e]
x = 20
""")
self.assertErrorSequences(
err,
{
"e": [
"Class B",
"overrides",
"final class attribute",
"x",
"base class A",
]
},
)
def test_override_attr_in_mro(self):
foo = """
from typing import Final
class A:
x: Final[int] = ...
class B(A):
pass
"""
with self.DepTree([("foo.pyi", foo)]):
err = self.CheckWithErrors("""
from foo import B
class C(B): # final-error[e]
x = 20
""")
self.assertErrorSequences(
err,
{
"e": [
"Class C",
"overrides",
"final class attribute",
"x",
"base class A",
]
},
)
def test_match_assignment_against_annotation(self):
foo = """
from typing import Final
k: Final[float] = ...
"""
with self.DepTree([("foo.pyi", foo)]):
err = self.CheckWithErrors("""
from foo import k
x: float = k
y: str = k # annotation-type-mismatch[e]
""")
self.assertErrorSequences(
err, {"e": ["annotation for y", "str", "Final[float]"]}
)
def test_attribute_access(self):
foo = """
from typing import Final, List
k: Final[List[str]] = ...
"""
with self.DepTree([("foo.pyi", foo)]):
err = self.CheckWithErrors("""
from foo import k
a = k.count('a')
b = k.random() # attribute-error[e]
""")
self.assertErrorSequences(
err, {"e": ["No attribute", "random", "Final[list[str]]"]}
)
if __name__ == "__main__":
test_base.main()
| TestFinalInPyi |
python | milvus-io__pymilvus | tests/test_decorators.py | {
"start": 6771,
"end": 11363
} | class ____:
"""Test the traceback functionality in error_handler decorator."""
@patch("pymilvus.decorators.LOGGER")
def test_error_handler_includes_traceback_for_milvus_exception(self, mock_logger):
"""Test that error_handler logs traceback for MilvusException."""
@error_handler(func_name="test_func")
def func_that_raises_milvus_exception():
def inner_func():
raise MilvusException(ErrorCode.UNEXPECTED_ERROR, "test error")
inner_func()
with pytest.raises(MilvusException):
func_that_raises_milvus_exception()
# Verify LOGGER.error was called
assert mock_logger.error.called
# Get the logged message
log_message = mock_logger.error.call_args[0][0]
# Check that traceback information is in the log message
assert "Traceback:" in log_message
assert "inner_func" in log_message
assert "test_func" in log_message
@patch("pymilvus.decorators.LOGGER")
def test_error_handler_includes_traceback_for_grpc_error(self, mock_logger):
"""Test that error_handler logs traceback for grpc.RpcError."""
@error_handler(func_name="test_grpc_func")
def func_that_raises_grpc_error():
def inner_func():
raise MockUnavailableError()
inner_func()
with pytest.raises(grpc.RpcError):
func_that_raises_grpc_error()
# Verify LOGGER.error was called
assert mock_logger.error.called
# Get the logged message
log_message = mock_logger.error.call_args[0][0]
# Check that traceback information is in the log message
assert "Traceback:" in log_message
assert "inner_func" in log_message
assert "test_grpc_func" in log_message
@patch("pymilvus.decorators.LOGGER")
def test_error_handler_includes_traceback_for_generic_exception(self, mock_logger):
"""Test that error_handler logs traceback for generic Exception."""
@error_handler(func_name="test_generic_func")
def func_that_raises_generic_exception():
def inner_func():
raise ValueError("test generic error")
inner_func()
with pytest.raises(MilvusException):
func_that_raises_generic_exception()
# Verify LOGGER.error was called
assert mock_logger.error.called
# Get the logged message
log_message = mock_logger.error.call_args[0][0]
# Check that traceback information is in the log message
assert "Traceback:" in log_message
assert "inner_func" in log_message
assert "ValueError" in log_message
@patch("pymilvus.decorators.LOGGER")
def test_error_handler_traceback_shows_call_stack(self, mock_logger):
"""Test that traceback shows the complete call stack."""
@error_handler(func_name="outer_func")
def outer_function():
def middle_function():
def inner_function():
raise MilvusException(ErrorCode.UNEXPECTED_ERROR, "deep error")
inner_function()
middle_function()
with pytest.raises(MilvusException):
outer_function()
# Verify LOGGER.error was called
assert mock_logger.error.called
# Get the logged message
log_message = mock_logger.error.call_args[0][0]
# Verify the complete call stack is present
assert "Traceback:" in log_message
assert "outer_function" in log_message
assert "middle_function" in log_message
assert "inner_function" in log_message
@pytest.mark.asyncio
@patch("pymilvus.decorators.LOGGER")
async def test_async_error_handler_includes_traceback(self, mock_logger):
"""Test that async error_handler logs traceback."""
@error_handler(func_name="test_async_func")
async def async_func_that_raises():
def inner_func():
raise MilvusException(ErrorCode.UNEXPECTED_ERROR, "async test error")
inner_func()
with pytest.raises(MilvusException):
await async_func_that_raises()
# Verify LOGGER.error was called
assert mock_logger.error.called
# Get the logged message
log_message = mock_logger.error.call_args[0][0]
# Check that traceback information is in the log message
assert "Traceback:" in log_message
assert "inner_func" in log_message
assert "test_async_func" in log_message
| TestErrorHandlerTraceback |
python | weaviate__weaviate-python-client | weaviate/collections/backups/sync.py | {
"start": 186,
"end": 263
} | class ____(_CollectionBackupExecutor[ConnectionSync]):
pass
| _CollectionBackup |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 17979,
"end": 18895
} | class ____:
@classmethod
def setup_class(cls):
cls.dagbag = DagBag(dag_folder="/dev/null", include_examples=False)
cls.dag = DAG(
dag_id=TEST_DAG_ID,
schedule=None,
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
)
def setup_method(self):
self.mock_ti = MagicMock()
self.mock_context = {"ti": self.mock_ti, "task": self.mock_ti.task}
self.extra_links_manager_mock = Mock()
self.extra_links_manager_mock.attach_mock(self.mock_ti, "ti")
def tearDown(self):
self.mock_ti = MagicMock()
self.mock_context = {"ti": self.mock_ti, "task": self.mock_ti.task}
self.extra_links_manager_mock = Mock()
self.extra_links_manager_mock.attach_mock(self.mock_ti, "ti")
@classmethod
def tearDownClass(cls):
clear_db_runs()
clear_db_xcom()
| DataprocTestBase |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 6318,
"end": 8324
} | class ____(BaseModel):
"""
Response from the matchmaker after sending a simulation request.
"""
success: bool
"""
Whether the simulation was successful.
"""
error: Optional[str] = None
"""
Error message if the simulation failed.
"""
state_block: Optional[HexInt] = Field(None, alias="stateBlock")
"""
The block number of the simulated block.
"""
mev_gas_price: HexInt = Field(alias="mevGasPrice")
"""
The profit of the simulated block.
"""
profit: HexInt
"""
The profit of the simulated block.
"""
refundable_value: Optional[HexInt] = Field(None, alias="refundableValue")
"""
The refundable value of the simulated block.
"""
gas_used: Optional[HexInt] = Field(None, alias="gasUsed")
"""
The gas used by the simulated block.
"""
logs: Optional[list[SimBundleLogs]] = None
"""
Logs returned by `mev_simBundle`.
"""
exec_error: Optional[str] = Field(None, alias="execError")
"""
Error message if the bundle execution failed.
"""
revert: Optional[HexBytes] = None
"""
Contains the return data if the transaction reverted
"""
def decode_logs(self, *events: EventABI):
try:
ecosystem = ManagerAccessMixin.provider.network.ecosystem
except ProviderNotConnectedError:
# Assume Ethereum (since we are in ape-ethereum after all).
ecosystem = ManagerAccessMixin.network_manager.ethereum
return ecosystem.decode_logs(list(self.transaction_logs), *events)
@property
def transaction_logs(self, *events: EventABI) -> Iterator[dict]:
yield from _get_transaction_logs_from_sim_logs(self.logs or [])
def _get_transaction_logs_from_sim_logs(logs: list[SimBundleLogs]) -> Iterator[dict]:
for bundle_log in logs:
yield from (bundle_log.tx_logs or [])
yield from _get_transaction_logs_from_sim_logs(bundle_log.bundle_logs or [])
| SimulationReport |
python | jina-ai__jina | tests/integration/graphql/test_graphql.py | {
"start": 1117,
"end": 1257
} | class ____(Executor):
@requests(on='/slow')
def foo(self, docs: DocumentArray, **kwargs):
time.sleep(SLOW_EXEC_DELAY)
| SlowExec |
python | scrapy__scrapy | tests/mockserver/dns.py | {
"start": 251,
"end": 817
} | class ____:
"""
Implements twisted.internet.interfaces.IResolver partially
"""
def _resolve(self, name):
record = dns.Record_A(address=b"127.0.0.1")
answer = dns.RRHeader(name=name, payload=record)
return [answer], [], []
def query(self, query, timeout=None):
if query.type == dns.A:
return defer.succeed(self._resolve(query.name.name))
return defer.fail(error.DomainError())
def lookupAllRecords(self, name, timeout=None):
return defer.succeed(self._resolve(name))
| MockDNSResolver |
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 21642,
"end": 24877
} | class ____:
aggregates: dict[str, AggregateDefinition]
conditional_aggregates: dict[str, ConditionalAggregateDefinition]
formulas: dict[str, FormulaDefinition]
columns: dict[str, ResolvedAttribute]
contexts: dict[str, VirtualColumnDefinition]
trace_item_type: TraceItemType.ValueType
filter_aliases: Mapping[str, Callable[[SnubaParams, SearchFilter], list[SearchFilter]]]
alias_to_column: Callable[[str], str | None] | None
column_to_alias: Callable[[str], str | None] | None
def attribute_key_to_tuple(attribute_key: AttributeKey) -> tuple[str, AttributeKey.Type.ValueType]:
return (attribute_key.name, attribute_key.type)
def count_argument_resolver_optimized(
always_present_attributes: list[AttributeKey],
) -> Callable[[ResolvedArgument], AttributeKey]:
always_present_attributes_set = {
attribute_key_to_tuple(attribute) for attribute in always_present_attributes
}
def count_argument_resolver(resolved_argument: ResolvedArgument) -> AttributeKey:
if not isinstance(resolved_argument, AttributeKey):
raise InvalidSearchQuery("Aggregates accept attribute keys only")
if attribute_key_to_tuple(resolved_argument) in always_present_attributes_set:
return AttributeKey(name="sentry.project_id", type=AttributeKey.Type.TYPE_INT)
return resolved_argument
return count_argument_resolver
def validate_trace_metric_aggregate_arguments(
arguments: list[ValueArgumentDefinition | AttributeArgumentDefinition],
) -> None:
if len(arguments) != 4:
raise InvalidSearchQuery(
f"Trace metric aggregates expects exactly 4 arguments to be defined, got {len(arguments)}"
)
if not isinstance(arguments[0], AttributeArgumentDefinition):
raise InvalidSearchQuery(
"Trace metric aggregates expect argument 0 to be of type AttributeArgumentDefinition"
)
for i in range(1, 4):
if not isinstance(arguments[i], ValueArgumentDefinition):
raise InvalidSearchQuery(
f"Trace metric aggregates expects argument {i} to be of type ValueArgumentDefinition"
)
def extract_trace_metric_aggregate_arguments(
resolved_arguments: ResolvedArguments,
) -> tuple[str | None, MetricType | None, str | None]:
metric_name = None
metric_type = None
metric_unit = None
if all(
isinstance(resolved_argument, str) and resolved_argument != ""
for resolved_argument in resolved_arguments[1:]
):
# a metric was passed
metric_name = cast(str, resolved_arguments[1])
metric_type = cast(MetricType, resolved_arguments[2])
metric_unit = None if resolved_arguments[3] == "-" else cast(str, resolved_arguments[3])
elif all(resolved_argument == "" for resolved_argument in resolved_arguments[1:]):
# no metrics were specified, assume we query all metrics
pass
else:
raise InvalidSearchQuery(
f"Trace metric aggregates expect the full metric to be specified, got name:{resolved_arguments[1]} type:{resolved_arguments[2]} unit:{resolved_arguments[3]}"
)
return metric_name, metric_type, metric_unit
| ColumnDefinitions |
python | catalyst-team__catalyst | catalyst/contrib/data/reader.py | {
"start": 143,
"end": 1245
} | class ____:
"""Reader abstraction for all Readers.
Applies a function to an element of your data.
For example to a row from csv, or to an image, etc.
All inherited classes have to implement `__call__`.
"""
def __init__(self, input_key: str, output_key: str):
"""
Args:
input_key: input key to use from annotation dict
output_key: output key to use to store the result,
default: ``input_key``
"""
self.input_key = input_key
self.output_key = output_key
def __call__(self, element):
"""
Reads a row from your annotations dict and transfer it to data,
needed by your network for example open image by path,
or read string and tokenize it.
Args:
element: elem in your dataset
Returns:
Data object used for your neural network # noqa: DAR202
Raises:
NotImplementedError: you should implement it
"""
raise NotImplementedError("You cannot apply a transformation using `IReader`")
| IReader |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py | {
"start": 5994,
"end": 8492
} | class ____(Benchmark):
r"""
Deceptive objective function.
This class defines the Deceptive [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Deceptive}}(x) = - \left [\frac{1}{n}
\sum_{i=1}^{n} g_i(x_i) \right ]^{\beta}
Where :math:`\beta` is a fixed non-linearity factor; in this exercise,
:math:`\beta = 2`. The function :math:`g_i(x_i)` is given by:
.. math::
g_i(x_i) = \begin{cases}
- \frac{x}{\alpha_i} + \frac{4}{5} &
\textrm{if} \hspace{5pt} 0 \leq x_i \leq \frac{4}{5} \alpha_i \\
\frac{5x}{\alpha_i} -4 &
\textrm{if} \hspace{5pt} \frac{4}{5} \alpha_i \le x_i \leq \alpha_i \\
\frac{5(x - \alpha_i)}{\alpha_i-1} &
\textrm{if} \hspace{5pt} \alpha_i \le x_i \leq \frac{1 + 4\alpha_i}{5} \\
\frac{x - 1}{1 - \alpha_i} &
\textrm{if} \hspace{5pt} \frac{1 + 4\alpha_i}{5} \le x_i \leq 1
\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[0, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha_i` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: this function was taken from the Gavana website. The following code
is based on his code. His code and the website don't match, the equations
are wrong.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
self.global_optimum = [alpha]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0)
beta = 2.0
g = zeros((self.N, ))
for i in range(self.N):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8 * alpha[i]:
g[i] = -x[i] / alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0 * x[i] / alpha[i] - 4.0
elif x[i] < (1.0 + 4 * alpha[i]) / 5.0:
g[i] = 5.0 * (x[i] - alpha[i]) / (alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0) / (1.0 - alpha[i]) + 4.0 / 5.0
else:
g[i] = x[i] - 1.0
return -((1.0 / self.N) * sum(g)) ** beta
| Deceptive |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players_async.py | {
"start": 1560,
"end": 1853
} | class ____(AsyncComputerPlayer):
async def get_computer_move(self, game_state: GameState) -> Move | None:
if game_state.game_not_started:
return game_state.make_random_move()
else:
return find_best_move_precomputed(game_state)
| AsyncMinimaxComputerPlayer |
python | pytorch__pytorch | torch/utils/tensorboard/writer.py | {
"start": 1079,
"end": 6443
} | class ____:
"""Writes protocol buffers to event files to be consumed by TensorBoard.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix="") -> None:
"""Create a `FileWriter` and an event file.
On construction the writer creates a new event file in `log_dir`.
The other arguments to the constructor control the asynchronous writes to
the event file.
Args:
log_dir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
Default is ten items.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk. Default is every two minutes.
filename_suffix: A string. Suffix added to all event filenames
in the log_dir directory. More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
"""
# Sometimes PosixPath is passed in and we need to coerce it to
# a string in all cases
# TODO: See if we can remove this in the future if we are
# actually the ones passing in a PosixPath
log_dir = str(log_dir)
self.event_writer = EventFileWriter(
log_dir, max_queue, flush_secs, filename_suffix
)
def get_logdir(self):
"""Return the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event, step=None, walltime=None) -> None:
"""Add an event to the event file.
Args:
event: An `Event` protocol buffer.
step: Number. Optional global step value for training process
to record with the event.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
event.wall_time = time.time() if walltime is None else walltime
if step is not None:
# Make sure step is converted from numpy or other formats
# since protobuf might not convert depending on version
event.step = int(step)
self.event_writer.add_event(event)
def add_summary(self, summary, global_step=None, walltime=None) -> None:
"""Add a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
Args:
summary: A `Summary` protocol buffer.
global_step: Number. Optional global step value for training process
to record with the summary.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
event = event_pb2.Event(summary=summary)
self.add_event(event, global_step, walltime)
def add_graph(self, graph_profile, walltime=None) -> None:
"""Add a `Graph` and step stats protocol buffer to the event file.
Args:
graph_profile: A `Graph` and step stats protocol buffer.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
graph = graph_profile[0]
stepstats = graph_profile[1]
event = event_pb2.Event(graph_def=graph.SerializeToString())
self.add_event(event, None, walltime)
trm = event_pb2.TaggedRunMetadata(
tag="step1", run_metadata=stepstats.SerializeToString()
)
event = event_pb2.Event(tagged_run_metadata=trm)
self.add_event(event, None, walltime)
def add_onnx_graph(self, graph, walltime=None) -> None:
"""Add a `Graph` protocol buffer to the event file.
Args:
graph: A `Graph` protocol buffer.
walltime: float. Optional walltime to override the default (current)
_get_file_writerfrom time.time())
"""
event = event_pb2.Event(graph_def=graph.SerializeToString())
self.add_event(event, None, walltime)
def flush(self) -> None:
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self) -> None:
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self) -> None:
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
# pyrefly: ignore [missing-attribute]
self.event_writer.reopen()
| FileWriter |
python | PyCQA__pylint | tests/functional/u/used/used_before_assignment_py313.py | {
"start": 292,
"end": 487
} | class ____[**P = [int, Y]]: ...
type Alias[T = Y] = T | None
# https://github.com/pylint-dev/pylint/issues/9884
def func[T = Y](x: T) -> None: # [redefined-outer-name] FALSE POSITIVE
...
| Good3 |
python | getsentry__sentry | src/sentry_plugins/trello/client.py | {
"start": 405,
"end": 3576
} | class ____(ApiClient):
base_url = "https://api.trello.com/1"
plugin_name = "trello"
def __init__(self, api_key, token=None, **kwargs):
self.api_key = api_key
self.token = token
super().__init__(**kwargs)
def request(self, method="GET", path="", data=None, params=None, **kwargs):
params = {} if params is None else params.copy()
params["token"] = self.token
params["key"] = self.api_key
return self._request(method, path, data=data, params=params, **kwargs)
def get_organization_boards(self, org_id_or_name, fields=None):
"""
Return boards for an organization/team
"""
return self.request(path=ORG_BOARD_PATH % (org_id_or_name), params={"fields": fields})
def get_member_boards(self, fields=None):
"""
Return boards for a user
"""
return self.request(path=MEMBER_BOARD_PATH, params={"fields": fields})
def get_boards(self, org=None):
"""
Return boards for an organization/team if set, otherwise return boards for user
"""
if org:
return self.get_organization_boards(org, fields="name")
return self.get_member_boards(fields="name")
def get_organization_list(self, fields=None):
"""
Return organization list for user
"""
return self.request(path=MEMBER_ORG_PATH, params={"fields": fields})
def get_lists_of_board(self, board_id, fields=None):
"""
Return the lists on a given board
"""
return self.request(path=LISTS_OF_BOARD_PATH % (board_id), params={"fields": fields})
def new_card(self, name, id_list, desc=None):
"""
Create a Trello card
"""
return self.request(
method="POST", path=NEW_CARD_PATH, data={"name": name, "idList": id_list, "desc": desc}
)
def get_organization_options(self):
"""
Return organization options to use in a Django form
"""
organizations = self.get_organization_list(fields="name")
return [(org["id"], org["name"]) for org in organizations]
def get_cards(self, query, org_id=None):
"""
Return the cards matching a query, limited to an org if passed in
"""
params = {
"query": query,
"modelTypes": "cards",
"cards_limit": 100,
"partial": "true",
"card_fields": CARD_FIELDS,
}
if org_id:
params["idOrganizations"] = org_id
response = self.request(path=SEARCH_PATH, params=params)
return response["cards"]
def get_card(self, card_id_or_short_link):
"""
Return a card from an ID or short link
"""
return self.request(
path=SINGLE_CARD_PATH % card_id_or_short_link, params={"fields": CARD_FIELDS}
)
def create_comment(self, card_id_or_short_link, comment):
"""
Create a comment on a card
"""
return self.request(
method="POST", path=ADD_COMMENT_PATH % card_id_or_short_link, params={"text": comment}
)
| TrelloApiClient |
python | doocs__leetcode | lcci/16.17.Contiguous Sequence/Solution.py | {
"start": 0,
"end": 192
} | class ____:
def maxSubArray(self, nums: List[int]) -> int:
ans = f = -inf
for x in nums:
f = max(f, 0) + x
ans = max(ans, f)
return ans
| Solution |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/callbacks/finetuning_handler.py | {
"start": 5665,
"end": 7576
} | class ____(BaseFinetuningHandler):
"""
Callback handler for Gradient AI fine-tuning.
This handler will collect all messages
sent to the LLM, along with their responses. It will then save these messages
in a `.jsonl` format that can be used for fine-tuning with Gradient AI's API.
"""
def get_finetuning_events(self) -> Dict[str, Dict[str, Any]]:
events_dict = {}
for event_id, event in self._finetuning_events.items():
events_dict[event_id] = {"messages": event[:-1], "response": event[-1]}
return events_dict
def save_finetuning_events(self, path: str) -> None:
"""
Save the finetuning events to a file.
This saved format can be used for fine-tuning with OpenAI's API.
The structure for each json line is as follows:
{
"inputs": "<full_prompt_str>"
},
...
"""
from llama_index.core.base.llms.generic_utils import messages_to_history_str
events_dict = self.get_finetuning_events()
json_strs = []
for event in events_dict.values():
all_messages = event["messages"] + [event["response"]]
# TODO: come up with model-specific message->prompt serialization format
prompt_str = messages_to_history_str(all_messages)
input_dict = {"inputs": prompt_str}
json_strs.append(json.dumps(input_dict))
with open(path, "w") as f:
f.write("\n".join(json_strs))
print(f"Wrote {len(json_strs)} examples to {path}")
def start_trace(self, trace_id: Optional[str] = None) -> None:
"""Run when an overall trace is launched."""
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
"""Run when an overall trace is exited."""
| GradientAIFineTuningHandler |
python | davidhalter__jedi | test/completion/complex.py | {
"start": 69,
"end": 546
} | class ____(object):
global time
asdf = time
def asdfy():
return Foo
xorz = getattr(asdfy()(), 'asdf')
#? time
xorz
def args_returner(*args):
return args
#? tuple()
args_returner(1)[:]
#? int()
args_returner(1)[:][0]
def kwargs_returner(**kwargs):
return kwargs
# TODO This is not really correct, needs correction probably at some point, but
# at least it doesn't raise an error.
#? int()
kwargs_returner(a=1)[:]
#?
kwargs_returner(b=1)[:][0]
| Foo |
python | keras-team__keras | keras/src/optimizers/adam_test.py | {
"start": 178,
"end": 3674
} | class ____(testing.TestCase):
def test_config(self):
optimizer = Adam(
learning_rate=0.5,
beta_1=0.5,
beta_2=0.67,
epsilon=1e-5,
amsgrad=True,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adam(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adam(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adam(amsgrad=True)
x = backend.Variable(np.ones([10], dtype="float32"))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
golden = np.tile(
[[0.999], [0.9982], [0.9974], [0.9965], [0.9955]], (1, 10)
)
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adam(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adam(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
@pytest.mark.requires_trainable_backend
def test_ema(self):
# TODO: test correctness
model = keras.Sequential([keras.layers.Dense(10)])
model.compile(optimizer=Adam(use_ema=True), loss="mse")
x = keras.ops.zeros((1, 5))
y = keras.ops.zeros((1, 10))
model.fit(x, y)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The IndexedSlices test can only run with TF backend.",
)
def test_clipnorm_indexed_slices(self):
# https://github.com/keras-team/keras/issues/18985
model = keras.Sequential(
[
keras.layers.Embedding(10, 4),
keras.layers.Flatten(),
keras.layers.Dense(2),
]
)
model.compile(optimizer=Adam(clipnorm=100), loss="mse")
x = keras.ops.ones((8, 5))
y = keras.ops.zeros((8, 2))
model.fit(x, y, verbose=0)
| AdamTest |
python | tensorflow__tensorflow | tensorflow/python/keras/mixed_precision/test_util.py | {
"start": 8077,
"end": 8267
} | class ____(regularizers.Regularizer):
def __call__(self, x):
assert x.dtype == dtypes.float32
return array_ops.identity(x)
def get_config(self):
return {}
| IdentityRegularizer |
python | bokeh__bokeh | src/bokeh/server/auth_provider.py | {
"start": 6237,
"end": 7751
} | class ____(AuthProvider):
''' An AuthProvider configured from a Python module.
The following properties return the corresponding values from the module if
they exist, or None otherwise:
* ``get_login_url``,
* ``get_user``
* ``get_user_async``
* ``login_url``
* ``logout_url``
The ``login_handler`` property will return a ``LoginHandler`` class from the
module, or None otherwise.
The ``logout_handler`` property will return a ``LogoutHandler`` class from
the module, or None otherwise.
'''
def __init__(self, module_path: PathLike) -> None:
if not isfile(module_path):
raise ValueError(f"no file exists at module_path: {module_path!r}")
self._module = load_auth_module(module_path)
super().__init__()
@property
def get_user(self):
return getattr(self._module, 'get_user', None)
@property
def get_user_async(self):
return getattr(self._module, 'get_user_async', None)
@property
def login_url(self):
return getattr(self._module, 'login_url', None)
@property
def get_login_url(self):
return getattr(self._module, 'get_login_url', None)
@property
def login_handler(self):
return getattr(self._module, 'LoginHandler', None)
@property
def logout_url(self):
return getattr(self._module, 'logout_url', None)
@property
def logout_handler(self):
return getattr(self._module, 'LogoutHandler', None)
| AuthModule |
python | doocs__leetcode | solution/3000-3099/3029.Minimum Time to Revert Word to Initial State I/Solution.py | {
"start": 0,
"end": 238
} | class ____:
def minimumTimeToInitialState(self, word: str, k: int) -> int:
n = len(word)
for i in range(k, n, k):
if word[i:] == word[:-i]:
return i // k
return (n + k - 1) // k
| Solution |
python | bokeh__bokeh | release/action.py | {
"start": 605,
"end": 1023
} | class ____:
""""""
kind: ActionResult
ui: UIResultFuncType
def __init__(self, message: str, details: Sequence[str] | None = None) -> None:
self.message = message
self.details = details
def __str__(self) -> str:
return self.ui(self.message, self.details)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.message!r}, details=...)"
| ActionReturn |
python | pytorch__pytorch | test/export/random_dag.py | {
"start": 2054,
"end": 3076
} | class ____:
"""
Abstract base class for generating a nn.Module.
Users should subclass this class and implement the gen_init_body() and
gen_forward_body() methods. The gen_init_body() method should return a
block of code that initializes the nn.Module. The gen_forward_body() method
should return a block of code that defines the forward() of the nn.Module.
"""
def gen_init_body(self, i: int):
raise NotImplementedError
def gen_forward_body(self, i: int):
raise NotImplementedError
def gen_nn_module(self, i: int):
def gen_nn_module_body():
code = Block()
code.new_line("def __init__(self):")
code.new_block(self.gen_init_body(i))
code.new_line("def forward(self, x):")
code.new_block(self.gen_forward_body(i))
return code
code = Block()
code.new_line(f"class N{i}(torch.nn.Module):")
code.new_block(gen_nn_module_body())
return code
| NNModuleGenerator |
python | PyCQA__pylint | tests/test_func.py | {
"start": 1129,
"end": 3465
} | class ____:
INPUT_DIR: str | None = None
DEFAULT_PACKAGE = "input"
package = DEFAULT_PACKAGE
linter = linter
module: str | None = None
depends: list[tuple[str, str]] | None = None
output: str | None = None
def _test_functionality(self) -> None:
tocheck = [self.package + "." + self.module] if self.module else []
if self.depends:
tocheck += [
self.package + f".{name.replace('.py', '')}" for name, _ in self.depends
]
# given that TESTS_DIR could be treated as a namespace package
# when under the current directory, cd to it so that "tests." is not
# prepended to module names in the output of cyclic-import
with _test_cwd(TESTS_DIR):
self._test(tocheck)
def _check_result(self, got: str) -> None:
error_msg = (
f"Wrong output for '{self.output}':\n"
"You can update the expected output automatically with: '"
f"python tests/test_func.py {UPDATE_OPTION}'\n\n"
)
assert self._get_expected() == got, error_msg
def _test(self, tocheck: list[str]) -> None:
if self.module and INFO_TEST_RGX.match(self.module):
self.linter.enable("I")
else:
self.linter.disable("I")
try:
self.linter.check(tocheck)
except Exception as ex:
print(f"Exception: {ex} in {tocheck}:: {', '.join(ex.args)}")
# This is legacy code we're trying to remove, not worth it to type correctly
ex.file = tocheck # type: ignore[attr-defined]
print(ex)
# This is legacy code we're trying to remove, not worth it to type correctly
ex.__str__ = exception_str # type: ignore[assignment]
raise
assert isinstance(self.linter.reporter, GenericTestReporter)
self._check_result(self.linter.reporter.finalize())
def _has_output(self) -> bool:
return isinstance(self.module, str) and not self.module.startswith(
"func_noerror_"
)
def _get_expected(self) -> str:
if self._has_output() and self.output:
with open(self.output, encoding="utf-8") as fobj:
return fobj.read().strip() + "\n"
else:
return ""
| LintTestUsingModule |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/openid/tests.py | {
"start": 472,
"end": 6592
} | class ____(fetchers.Urllib2Fetcher):
def fetch(self, url, body=None, headers=None):
if url == "https://steamcommunity.com/openid":
return fetchers.HTTPResponse(
final_url="https://steamcommunity.com/openid",
status=HTTPStatus.OK,
headers={"content-type": "application/xrds+xml;charset=utf-8"},
body='<?xml version="1.0" encoding="UTF-8"?>\n<xrds:XRDS xmlns:xrds="xri://$xrds" xmlns="xri://$xrd*($v*2.0)">\n\t<XRD>\n\t\t<Service priority="0">\n\t\t\t<Type>http://specs.openid.net/auth/2.0/server</Type>\t\t\n\t\t\t<URI>https://steamcommunity.com/openid/login</URI>\n\t\t</Service>\n\t</XRD>\n</xrds:XRDS>',
)
if url == "https://steamcommunity.com/openid/login":
return fetchers.HTTPResponse(
final_url="https://steamcommunity.com/openid/login",
status=HTTPStatus.OK,
headers={"content-type": "text/plain;charset=utf-8"},
body="ns:http://specs.openid.net/auth/2.0\nerror_code:unsupported-type\nerror:Associations not supported\n",
)
if url == "https://discovery-failure.com/":
raise urllib.error.URLError
ret = super().fetch(url, body=body, headers=headers)
breakpoint()
return ret
@pytest.fixture(autouse=True)
def setup_fetcher():
old_fetcher = fetchers.getDefaultFetcher()
fetchers.setDefaultFetcher(TestFetcher())
yield
fetchers.setDefaultFetcher(old_fetcher)
def test_discovery_failure(client):
"""
This used to generate a server 500:
DiscoveryFailure: No usable OpenID services found
for http://www.google.com/
"""
resp = client.post(
reverse("openid_login"), dict(openid="https://discovery-failure.com/")
)
assert "openid" in resp.context["form"].errors
def test_login(client, db):
# Location: https://s.yimg.com/wm/mbr/html/openid-eol-0.0.1.html
resp = client.post(
reverse(views.login), dict(openid="https://steamcommunity.com/openid")
)
assert "steamcommunity.com/openid/login" in resp["location"]
with patch(
"allauth.socialaccount.providers.openid.views._openid_consumer"
) as consumer_mock:
consumer_client = Mock()
complete = Mock()
consumer_mock.return_value = consumer_client
consumer_client.complete = complete
complete_response = Mock()
complete.return_value = complete_response
complete_response.status = consumer.SUCCESS
complete_response.identity_url = "http://dummy/john/"
with patch(
"allauth.socialaccount.providers.openid.utils.SRegResponse"
) as sr_mock:
with patch(
"allauth.socialaccount.providers.openid.utils.FetchResponse"
) as fr_mock:
sreg_mock = Mock()
ax_mock = Mock()
sr_mock.fromSuccessResponse = sreg_mock
fr_mock.fromSuccessResponse = ax_mock
sreg_mock.return_value = {}
ax_mock.return_value = {AXAttribute.PERSON_FIRST_NAME: ["raymond"]}
resp = client.post(reverse("openid_callback"))
assert resp["location"] == "/accounts/profile/"
get_user_model().objects.get(first_name="raymond")
social_account = SocialAccount.objects.get(
uid=complete_response.identity_url
)
account = social_account.get_provider_account()
assert account.to_str() == complete_response.identity_url
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"openid": {
"SERVERS": [
dict(
id="yahoo",
name="Yahoo",
openid_url="http://me.yahoo.com",
extra_attributes=[
(
"phone",
"http://axschema.org/contact/phone/default",
True,
)
],
)
]
}
}
)
def test_login_with_extra_attributes(client, db):
with patch("allauth.socialaccount.providers.openid.views.QUERY_EMAIL", True):
resp = client.post(
reverse(views.login), dict(openid="https://steamcommunity.com/openid")
)
assert "steamcommunity.com/openid/login" in resp["location"]
with patch(
"allauth.socialaccount.providers.openid.views._openid_consumer"
) as consumer_mock:
consumer_client = Mock()
complete = Mock()
endpoint = Mock()
consumer_mock.return_value = consumer_client
consumer_client.complete = complete
complete_response = Mock()
complete.return_value = complete_response
complete_response.endpoint = endpoint
complete_response.endpoint.server_url = "http://me.yahoo.com"
complete_response.status = consumer.SUCCESS
complete_response.identity_url = "http://dummy/john/"
with patch(
"allauth.socialaccount.providers.openid.utils.SRegResponse"
) as sr_mock:
with patch(
"allauth.socialaccount.providers.openid.utils.FetchResponse"
) as fr_mock:
sreg_mock = Mock()
ax_mock = Mock()
sr_mock.fromSuccessResponse = sreg_mock
fr_mock.fromSuccessResponse = ax_mock
sreg_mock.return_value = {}
ax_mock.return_value = {
AXAttribute.CONTACT_EMAIL: ["raymond@example.com"],
AXAttribute.PERSON_FIRST_NAME: ["raymond"],
"http://axschema.org/contact/phone/default": ["123456789"],
}
resp = client.post(reverse("openid_callback"))
assert resp["location"] == "/accounts/profile/"
socialaccount = SocialAccount.objects.get(user__first_name="raymond")
assert socialaccount.extra_data.get("phone") == "123456789"
| TestFetcher |
python | cython__cython | Demos/benchmarks/chaos.py | {
"start": 1937,
"end": 4607
} | class ____(object):
"""Class for representing B-Splines and NURBS of arbitrary degree"""
def __init__(self, points, degree = 3, knots = None):
"""Creates a Spline. points is a list of GVector, degree is the degree of the Spline."""
if knots is None:
self.knots = GetKnots(points, degree)
else:
if len(points) > len(knots) - degree + 1:
raise ValueError("too many control points")
elif len(points) < len(knots) - degree + 1:
raise ValueError("not enough control points")
last = knots[0]
for cur in knots[1:]:
if cur < last:
raise ValueError("knots not strictly increasing")
last = cur
self.knots = knots
self.points = points
self.degree = degree
def GetDomain(self):
"""Returns the domain of the B-Spline"""
return (self.knots[self.degree - 1],
self.knots[len(self.knots) - self.degree])
@cython.locals(ik=cython.long, ii=cython.long, I=cython.long,
ua=cython.long, ub=cython.long, u=cython.double,
dom=(cython.long, cython.long))
def __call__(self, u):
"""Calculates a point of the B-Spline using de Boors Algorithm"""
dom = self.GetDomain()
if u < dom[0] or u > dom[1]:
raise ValueError("Function value not in domain")
if u == dom[0]:
return self.points[0]
if u == dom[1]:
return self.points[-1]
I = self.GetIndex(u)
d = [self.points[I - self.degree + 1 + ii]
for ii in range(self.degree + 1)]
U = self.knots
for ik in range(1, self.degree + 1):
for ii in range(I - self.degree + ik + 1, I + 2):
ua = U[ii + self.degree - ik]
ub = U[ii - 1]
co1 = (ua - u) / (ua - ub)
co2 = (u - ub) / (ua - ub)
index = ii - I + self.degree - ik - 1
d[index] = d[index].linear_combination(d[index + 1], co1, co2)
return d[0]
@cython.locals(ii=cython.long, I=cython.long, dom=(cython.long, cython.long))
def GetIndex(self, u):
dom = self.GetDomain()
for ii in range(self.degree - 1, len(self.knots) - self.degree):
if self.knots[ii] <= u < self.knots[ii + 1]:
I = ii
break
else:
I = dom[1] - 1
return I
def __len__(self):
return len(self.points)
def __repr__(self):
return "Spline(%r, %r, %r)" % (self.points, self.degree, self.knots)
| Spline |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 4622,
"end": 4983
} | class ____(NamedTuple):
# the key should contain an information about used proxy / TLS
# to prevent reusing wrong connections from a pool
host: str
port: int | None
is_ssl: bool
ssl: SSLContext | bool | Fingerprint
proxy: URL | None
proxy_auth: BasicAuth | None
proxy_headers_hash: int | None # hash(CIMultiDict)
| ConnectionKey |
python | jazzband__django-pipeline | pipeline/compilers/stylus.py | {
"start": 116,
"end": 485
} | class ____(SubProcessCompiler):
output_extension = "css"
def match_file(self, filename):
return filename.endswith(".styl")
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (settings.STYLUS_BINARY, settings.STYLUS_ARGUMENTS, infile)
return self.execute_command(command, cwd=dirname(infile))
| StylusCompiler |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_field_without_pending_app/migrations/0002_delete_without_pending.py | {
"start": 190,
"end": 499
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_delete_field_without_pending_app", "0001_initial"),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="field",
deletion_action=DeletionAction.DELETE,
),
]
| Migration |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 7310,
"end": 7679
} | class ____(TypedDict):
"""Stored run executor options per run configuration settings."""
# Name of the last used run executor for the current run configuration.
executor: Optional[str]
# Unique identifier for the currently selected parameters. None
# if using default or transient settings.
selected: Optional[str]
| StoredRunConfigurationExecutor |
python | keras-team__keras | keras/src/layers/core/lambda_layer.py | {
"start": 300,
"end": 9272
} | class ____(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary expressions can be used
as a `Layer` when constructing Sequential
and Functional API models. `Lambda` layers are best suited for simple
operations or quick experimentation. For more advanced use cases,
prefer writing new subclasses of `Layer`.
WARNING: `Lambda` layers have (de)serialization limitations!
The main reason to subclass `Layer` instead of using a
`Lambda` layer is saving and inspecting a model. `Lambda` layers
are saved by serializing the Python bytecode, which is fundamentally
non-portable and potentially unsafe.
They should only be loaded in the same environment where
they were saved. Subclassed layers can be saved in a more portable way
by overriding their `get_config()` method. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Example:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
Args:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument
can usually be inferred if not explicitly provided.
Can be a tuple or function. If a tuple, it only specifies
the first dimension onward; sample dimension is assumed
either the same as the input:
`output_shape = (input_shape[0], ) + output_shape` or,
the input is `None` and the sample dimension is also `None`:
`output_shape = (None, ) + output_shape`.
If a function, it specifies the
entire shape as a function of the input shape:
`output_shape = f(input_shape)`.
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor
that will be returned as output mask regardless
of what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
"""
def __init__(
self, function, output_shape=None, mask=None, arguments=None, **kwargs
):
super().__init__(**kwargs)
self.arguments = arguments or {}
self.function = function
if mask is not None:
self.supports_masking = True
else:
self.supports_masking = False
self.mask = mask
self._output_shape = output_shape
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
function_args = inspect.getfullargspec(function).args
self._fn_expects_training_arg = "training" in function_args
self._fn_expects_mask_arg = "mask" in function_args
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Leverage backend shape inference
try:
inputs = tree.map_shape_structure(
lambda x: backend.KerasTensor(x, dtype=self.compute_dtype),
input_shape,
)
output_spec = backend.compute_output_spec(self.call, inputs)
return tree.map_structure(lambda x: x.shape, output_spec)
except:
raise NotImplementedError(
"We could not automatically infer the shape of "
"the Lambda's output. Please specify the `output_shape` "
"argument for this Lambda layer."
)
if callable(self._output_shape):
return self._output_shape(input_shape)
# Output shapes are passed directly and don't include batch dimension.
batch_size = tree.flatten(input_shape)[0]
def _add_batch(shape):
return (batch_size,) + shape
return tree.map_shape_structure(_add_batch, self._output_shape)
def call(self, inputs, mask=None, training=None):
# We must copy for thread safety,
# but it only needs to be a shallow copy.
kwargs = {k: v for k, v in self.arguments.items()}
if self._fn_expects_mask_arg:
kwargs["mask"] = mask
if self._fn_expects_training_arg:
kwargs["training"] = training
return self.function(inputs, **kwargs)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
config = {
"function": self._serialize_function_to_config(self.function),
}
if self._output_shape is not None:
if callable(self._output_shape):
output_shape = self._serialize_function_to_config(
self._output_shape
)
else:
output_shape = self._output_shape
config["output_shape"] = output_shape
if self.mask is not None:
if callable(self.mask):
mask = self._serialize_function_to_config(self.mask)
else:
mask = serialization_lib.serialize_keras_object(self.mask)
config["mask"] = mask
config["arguments"] = serialization_lib.serialize_keras_object(
self.arguments
)
base_config = super().get_config()
return {**base_config, **config}
def _serialize_function_to_config(self, fn):
if isinstance(fn, types.LambdaType) and fn.__name__ == "<lambda>":
code, defaults, closure = python_utils.func_dump(fn)
return {
"class_name": "__lambda__",
"config": {
"code": code,
"defaults": defaults,
"closure": closure,
},
}
elif callable(fn):
return serialization_lib.serialize_keras_object(fn)
raise ValueError(
"Invalid input type for serialization. "
f"Received: {fn} of type {type(fn)}."
)
@staticmethod
def _raise_for_lambda_deserialization(safe_mode):
if safe_mode:
raise ValueError(
"Requested the deserialization of a `Lambda` layer whose "
"`function` is a Python lambda. This carries a potential risk "
"of arbitrary code execution and thus it is disallowed by "
"default. If you trust the source of the artifact, you can "
"override this error by passing `safe_mode=False` to the "
"loading function, or calling "
"`keras.config.enable_unsafe_deserialization()."
)
@classmethod
def from_config(cls, config, custom_objects=None, safe_mode=None):
safe_mode = safe_mode or serialization_lib.in_safe_mode()
fn_config = config["function"]
if (
isinstance(fn_config, dict)
and "class_name" in fn_config
and fn_config["class_name"] == "__lambda__"
):
cls._raise_for_lambda_deserialization(safe_mode)
inner_config = fn_config["config"]
fn = python_utils.func_load(
inner_config["code"],
defaults=inner_config["defaults"],
closure=inner_config["closure"],
)
config["function"] = fn
else:
config["function"] = serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
)
if "output_shape" in config:
fn_config = config["output_shape"]
if (
isinstance(fn_config, dict)
and "class_name" in fn_config
and fn_config["class_name"] == "__lambda__"
):
cls._raise_for_lambda_deserialization(safe_mode)
inner_config = fn_config["config"]
fn = python_utils.func_load(
inner_config["code"],
defaults=inner_config["defaults"],
closure=inner_config["closure"],
)
config["output_shape"] = fn
else:
output_shape = serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
)
if isinstance(output_shape, list) and all(
isinstance(e, (int, type(None))) for e in output_shape
):
output_shape = tuple(output_shape)
config["output_shape"] = output_shape
if "arguments" in config:
config["arguments"] = serialization_lib.deserialize_keras_object(
config["arguments"], custom_objects=custom_objects
)
return cls(**config)
| Lambda |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 997,
"end": 1333
} | class ____(enum.Enum):
"""this is enum class"""
#: doc for val1
val1 = 12
val2 = 23 #: doc for val2
val3 = 34
"""doc for val3"""
val4 = 34
def say_hello(self):
"""a method says hello to you."""
@classmethod
def say_goodbye(cls):
"""a classmethod says good-bye to you."""
| EnumCls |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 108904,
"end": 133317
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5,
image_ops.ResizeMethod.GAUSSIAN, image_ops.ResizeMethod.MITCHELLCUBIC
]
# Some resize methods, such as Gaussian, are non-interpolating in that they
# change the image even if there is no scale change, for some test, we only
# check the value on the value preserving methods.
INTERPOLATING_METHODS = [
image_ops.ResizeMethod.BILINEAR, image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC, image_ops.ResizeMethod.AREA,
image_ops.ResizeMethod.LANCZOS3, image_ops.ResizeMethod.LANCZOS5
]
TYPES = [
np.uint8, np.int8, np.uint16, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64
]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images_v2(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images_v2(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, method, nptype):
if (method == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
@test_util.disable_xla("align_corners=False not supported by XLA")
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
self.METHODS[0])
yshape = array_ops.shape(y)
newshape = self.evaluate(yshape)
self.assertAllEqual(single_shape, newshape)
# half_pixel_centers unsupported in ResizeBilinear
@test_util.disable_xla("b/127616992")
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
def resize_func(t, new_size, method):
return image_ops.resize_images_v2(t, new_size, method)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = resize_func(image, [6, 4], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.cached_session():
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = resize_func(image, [6, 4], self.METHODS[0])
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(single_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = resize_func(image, new_size, image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, 4.0], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [None, 4], image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = resize_func(image, [6, None], image_ops.ResizeMethod.BILINEAR)
def testReturnDtypeV1(self):
# Shape inference in V1.
with ops.Graph().as_default():
target_shapes = [[6, 4], [3, 2],
[
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int32)
]]
for nptype in self.TYPES:
image = array_ops.placeholder(nptype, shape=[1, 6, 4, 1])
for method in self.METHODS:
for target_shape in target_shapes:
y = image_ops.resize_images_v2(image, target_shape, method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
def testReturnDtypeV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def test_dtype(image, target_shape, target_method):
y = image_ops.resize_images_v2(image, target_shape, target_method)
if method == image_ops.ResizeMethod.NEAREST_NEIGHBOR:
expected_dtype = image.dtype
else:
expected_dtype = dtypes.float32
self.assertEqual(y.dtype, expected_dtype)
target_shapes = [[6, 4],
[3, 2],
[tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)]]
for nptype in self.TYPES:
image = tensor_spec.TensorSpec(shape=[1, 6, 4, 1], dtype=nptype)
for method in self.METHODS:
for target_shape in target_shapes:
with test_util.run_functions_eagerly(run_func_eagerly):
test_dtype.get_concrete_function(image, target_shape, method)
# half_pixel_centers not supported by XLA
@test_util.disable_xla("b/127616992")
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for method in self.METHODS:
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [height, width], method)
yshape = array_ops.shape(y)
resized, newshape = self.evaluate([y, yshape])
self.assertAllEqual(img_shape, newshape)
if method in self.INTERPOLATING_METHODS:
self.assertAllClose(resized, img_np, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [
127, 127, 64, 64, 127, 127, 64, 64, 64, 64, 127, 127, 64, 64, 127, 127,
50, 50, 100, 100, 50, 50, 100, 100
]
expected_data = [127, 64, 64, 127, 50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for method in self.METHODS:
if test.is_gpu_available() and self.shouldRunOnGPU(method, nptype):
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(
image, [target_height, target_width], method)
expected = np.array(expected_data).reshape(target_shape)
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 56.0, 40.0, 32.0, 56.0, 52.0, 44.0, 40.0, 40.0, 44.0, 52.0, 56.0,
36.5, 45.625, 63.875, 73.0, 45.5, 56.875, 79.625, 91.0, 50.0, 62.5,
87.5, 100.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0, 64.0, 64.0, 32.0, 32.0, 32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0, 50.0, 50.0, 100.0, 100.0, 50.0, 50.0, 100.0,
100.0
]
expected_data[image_ops.ResizeMethod.LANCZOS3] = [
75.8294, 59.6281, 38.4313, 22.23, 60.6851, 52.0037, 40.6454, 31.964,
35.8344, 41.0779, 47.9383, 53.1818, 24.6968, 43.0769, 67.1244, 85.5045,
35.7939, 56.4713, 83.5243, 104.2017, 44.8138, 65.1949, 91.8603, 112.2413
]
expected_data[image_ops.ResizeMethod.LANCZOS5] = [
77.5699, 60.0223, 40.6694, 23.1219, 61.8253, 51.2369, 39.5593, 28.9709,
35.7438, 40.8875, 46.5604, 51.7041, 21.5942, 43.5299, 67.7223, 89.658,
32.1213, 56.784, 83.984, 108.6467, 44.5802, 66.183, 90.0082, 111.6109
]
expected_data[image_ops.ResizeMethod.GAUSSIAN] = [
61.1087, 54.6926, 41.3074, 34.8913, 54.6926, 51.4168, 44.5832, 41.3074,
41.696, 45.2456, 52.6508, 56.2004, 39.4273, 47.0526, 62.9602, 70.5855,
47.3008, 57.3042, 78.173, 88.1764, 51.4771, 62.3638, 85.0752, 95.9619
]
expected_data[image_ops.ResizeMethod.BICUBIC] = [
70.1453, 59.0252, 36.9748, 25.8547, 59.3195, 53.3386, 41.4789, 35.4981,
36.383, 41.285, 51.0051, 55.9071, 30.2232, 42.151, 65.8032, 77.731,
41.6492, 55.823, 83.9288, 98.1026, 47.0363, 62.2744, 92.4903, 107.7284
]
expected_data[image_ops.ResizeMethod.MITCHELLCUBIC] = [
66.0382, 56.6079, 39.3921, 29.9618, 56.7255, 51.9603, 43.2611, 38.4959,
39.1828, 43.4664, 51.2864, 55.57, 34.6287, 45.1812, 64.4458, 74.9983,
43.8523, 56.8078, 80.4594, 93.4149, 48.9943, 63.026, 88.6422, 102.6739
]
for nptype in self.TYPES:
for method in expected_data:
with self.cached_session():
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
method)
resized = self.evaluate(y)
expected = np.array(expected_data[method]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-04)
# XLA doesn't implement half_pixel_centers
@test_util.disable_xla("b/127616992")
def testLegacyBicubicMethodsMatchNewMethods(self):
img_shape = [1, 3, 2, 1]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
methods_to_test = ((gen_image_ops.resize_bilinear, "triangle"),
(gen_image_ops.resize_bicubic, "keyscubic"))
for legacy_method, new_method in methods_to_test:
with self.cached_session():
img_np = np.array(data, dtype=np.float32).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
legacy_result = legacy_method(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
half_pixel_centers=True)
scale = (
constant_op.constant([target_height, target_width],
dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(image)[1:3], dtype=dtypes.float32))
new_result = gen_image_ops.scale_and_translate(
image,
constant_op.constant([target_height, target_width],
dtype=dtypes.int32),
scale,
array_ops.zeros([2]),
kernel_type=new_method,
antialias=False)
self.assertAllClose(
self.evaluate(legacy_result), self.evaluate(new_result), atol=1e-04)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [
128, 64, 32, 16, 8, 4, 4, 8, 16, 32, 64, 128, 128, 64, 32, 16, 8, 4, 5,
10, 15, 20, 25, 30, 30, 25, 20, 15, 10, 5, 5, 10, 15, 20, 25, 30
]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [
73, 33, 23, 39, 73, 33, 23, 39, 14, 16, 19, 21, 14, 16, 19, 21
]
with self.cached_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images_v2(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = self.evaluate(y)
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.cached_session():
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
gpu_val = self.evaluate(out_op)
with self.cached_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images_v2(
image, new_size, image_ops.ResizeMethod.NEAREST_NEIGHBOR)
cpu_val = self.evaluate(out_op)
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
@test_util.disable_xla("align_corners=False not supported by XLA")
def testBfloat16MultipleOps(self):
target_height = 8
target_width = 12
img = np.random.uniform(0, 100, size=(30, 10, 2)).astype(np.float32)
img_bf16 = ops.convert_to_tensor(img, dtype="bfloat16")
new_size = constant_op.constant([target_height, target_width])
img_methods = [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR, image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA
]
for method in img_methods:
out_op_bf16 = image_ops.resize_images_v2(img_bf16, new_size, method)
out_op_f32 = image_ops.resize_images_v2(img, new_size, method)
bf16_val = self.evaluate(out_op_bf16)
f32_val = self.evaluate(out_op_f32)
self.assertAllClose(bf16_val, f32_val, rtol=1e-2, atol=1e-2)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
value[use_gpu] = self.evaluate(out_op)
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
# Shape function requires placeholders and a graph.
with ops.Graph().as_default():
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
def testNameScope(self):
# Testing name scope requires placeholders and a graph.
with ops.Graph().as_default():
with self.cached_session():
single_image = array_ops.placeholder(dtypes.float32, shape=[50, 60, 3])
y = image_ops.resize_images(single_image, [55, 66])
self.assertTrue(y.op.name.startswith("resize"))
def _ResizeImageCall(self, x, max_h, max_w, preserve_aspect_ratio,
use_tensor_inputs):
if use_tensor_inputs:
target_max = ops.convert_to_tensor([max_h, max_w])
x_tensor = ops.convert_to_tensor(x)
else:
target_max = (max_h, max_w)
x_tensor = x
def resize_func(t,
target_max=target_max,
preserve_aspect_ratio=preserve_aspect_ratio):
return image_ops.resize_images(
t, ops.convert_to_tensor(target_max),
preserve_aspect_ratio=preserve_aspect_ratio)
with self.cached_session():
return self.evaluate(resize_func(x_tensor))
def _assertResizeEqual(self,
x,
x_shape,
y,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertResizeCheckShape(self,
x,
x_shape,
target_shape,
y_shape,
preserve_aspect_ratio=True,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width = target_shape
x = np.array(x).reshape(x_shape)
y = np.zeros(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageCall(x, target_height, target_width,
preserve_aspect_ratio, use_tensor_inputs)
self.assertShapeEqual(y, ops.convert_to_tensor(y_tf))
def testPreserveAspectRatioMultipleImages(self):
x_shape = [10, 100, 80, 10]
x = np.random.uniform(size=x_shape)
for preserve_aspect_ratio in [True, False]:
with self.subTest(preserve_aspect_ratio=preserve_aspect_ratio):
expect_shape = [10, 250, 200, 10] if preserve_aspect_ratio \
else [10, 250, 250, 10]
self._assertResizeCheckShape(
x,
x_shape, [250, 250],
expect_shape,
preserve_aspect_ratio=preserve_aspect_ratio)
def testPreserveAspectRatioNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeEqual(x, x_shape, x, x_shape)
def testPreserveAspectRatioSmaller(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [50, 50, 10])
def testPreserveAspectRatioSmallerMultipleImages(self):
x_shape = [10, 100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [75, 50], [10, 50, 50, 10])
def testPreserveAspectRatioLarger(self):
x_shape = [100, 100, 10]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [150, 200], [150, 150, 10])
def testPreserveAspectRatioSameRatio(self):
x_shape = [1920, 1080, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [3840, 2160], [3840, 2160, 3])
def testPreserveAspectRatioSquare(self):
x_shape = [299, 299, 3]
x = np.random.uniform(size=x_shape)
self._assertResizeCheckShape(x, x_shape, [320, 320], [320, 320, 3])
def testLargeDim(self):
with self.session():
with self.assertRaises(errors.InvalidArgumentError):
x = np.ones((5, 1, 1, 2))
v = image_ops.resize_images_v2(x, [1610637938, 1610637938],
image_ops.ResizeMethod.BILINEAR)
_ = self.evaluate(v)
| ResizeImagesV2Test |
python | PyCQA__isort | tests/unit/utils.py | {
"start": 64,
"end": 211
} | class ____(TextIOWrapper):
def seek(self, *args, **kwargs):
raise ValueError("underlying stream is not seekable")
| UnseekableTextIOWrapper |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 61786,
"end": 61874
} | class ____(Qwen3VLMoeTextRotaryEmbedding):
pass
| Qwen3OmniMoeThinkerTextRotaryEmbedding |
python | walkccc__LeetCode | solutions/1790. Check if One String Swap Can Make Strings Equal/1790.py | {
"start": 0,
"end": 401
} | class ____:
# Similar to 859. Buddy Strings
def areAlmostEqual(self, s1: str, s2: str) -> bool:
diffIndices = [i for i, (a, b) in enumerate(zip(s1, s2))
if a != b]
return not diffIndices or (len(diffIndices) == 2 and
s1[diffIndices[0]] == s2[diffIndices[1]] and
s1[diffIndices[1]] == s2[diffIndices[0]])
| Solution |
python | ray-project__ray | doc/source/ray-overview/examples/e2e-xgboost/dist_xgboost/infer.py | {
"start": 675,
"end": 3245
} | class ____:
def __init__(self, loader):
# pass in loader function from the outer scope to
# make it easier to mock during testing
_, self.model = loader()
def __call__(self, batch: pd.DataFrame) -> pd.DataFrame:
# remove the target column for inference
target = batch.pop("target")
dmatrix = xgboost.DMatrix(batch)
predictions = self.model.predict(dmatrix)
results = pd.DataFrame({"prediction": predictions, "target": target})
return results
def confusion_matrix_batch(batch, threshold=0.5):
# apply a threshold to the predictions to get binary labels
batch["prediction"] = (batch["prediction"] > threshold).astype(int)
result = {}
cm = confusion_matrix(batch["target"], batch["prediction"], labels=[0, 1])
result["TN"] = cm[0, 0]
result["FP"] = cm[0, 1]
result["FN"] = cm[1, 0]
result["TP"] = cm[1, 1]
return pd.DataFrame(result, index=[0])
def main():
_, _, test_dataset = prepare_data()
preprocessor, _ = load_model_and_preprocessor()
# Apply the transformation to each batch
test_dataset = test_dataset.map_batches(
transform_with_preprocessor,
fn_kwargs={"preprocessor": preprocessor},
batch_format="pandas",
batch_size=1000,
)
# Make predictions
test_predictions = test_dataset.map_batches(
Validator,
fn_constructor_kwargs={"loader": load_model_and_preprocessor},
compute=ray.data.ActorPoolStrategy(size=4), # Number of model replicas
batch_format="pandas",
)
# Calculate confusion matrix
test_results = test_predictions.map_batches(
confusion_matrix_batch, batch_format="pandas", batch_size=1000
)
# Calculate metrics
# Sum all confusion matrix values across batches
cm_sums = test_results.sum(["TN", "FP", "FN", "TP"])
# Extract confusion matrix components
tn = cm_sums["sum(TN)"]
fp = cm_sums["sum(FP)"]
fn = cm_sums["sum(FN)"]
tp = cm_sums["sum(TP)"]
# Calculate metrics
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp) if (tp + fp) > 0 else 0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0
f1 = (
2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
)
metrics = {"precision": precision, "recall": recall, "f1": f1, "accuracy": accuracy}
print("Validation results:")
for key, value in metrics.items():
print(f"{key}: {value:.4f}")
if __name__ == "__main__":
main()
| Validator |
python | dask__dask | dask/rewrite.py | {
"start": 2472,
"end": 3011
} | class ____(tuple):
"""A Discrimination Net node."""
__slots__ = ()
def __new__(cls, edges=None, patterns=None):
edges = edges if edges else {}
patterns = patterns if patterns else []
return tuple.__new__(cls, (edges, patterns))
@property
def edges(self):
"""A dictionary, where the keys are edges, and the values are nodes"""
return self[0]
@property
def patterns(self):
"""A list of all patterns that currently match at this node"""
return self[1]
| Node |
python | encode__django-rest-framework | tests/test_validation.py | {
"start": 7696,
"end": 8049
} | class ____(serializers.Serializer):
pin = serializers.CharField(
validators=[RegexValidator(regex=re.compile('^[0-9]{4,6}$'),
message='A PIN is 4-6 digits')])
expected_repr = """
RegexSerializer():
pin = CharField(validators=[<django.core.validators.RegexValidator object>])
""".strip()
| RegexSerializer |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 10279,
"end": 10450
} | class ____:
def test_month_abbreviation(self):
assert "juill" in self.locale.month_abbreviations
@pytest.mark.usefixtures("lang_locale")
| TestFrenchCanadianLocale |
python | kamyu104__LeetCode-Solutions | Python/set-mismatch.py | {
"start": 29,
"end": 594
} | class ____(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
x_xor_y = 0
for i in xrange(len(nums)):
x_xor_y ^= nums[i] ^ (i+1)
bit = x_xor_y & ~(x_xor_y-1)
result = [0] * 2
for i, num in enumerate(nums):
result[bool(num & bit)] ^= num
result[bool((i+1) & bit)] ^= i+1
if result[0] not in nums:
result[0], result[1] = result[1], result[0]
return result
# Time: O(n)
# Space: O(1)
| Solution |
python | python__mypy | mypy/fixup.py | {
"start": 9118,
"end": 16600
} | class ____(TypeVisitor[None]):
def __init__(self, modules: dict[str, MypyFile], allow_missing: bool) -> None:
self.modules = modules
self.allow_missing = allow_missing
def visit_instance(self, inst: Instance) -> None:
# TODO: Combine Instances that are exactly the same?
type_ref = inst.type_ref
if type_ref is None:
return # We've already been here.
inst.type_ref = None
inst.type = lookup_fully_qualified_typeinfo(
self.modules, type_ref, allow_missing=self.allow_missing
)
# TODO: Is this needed or redundant?
# Also fix up the bases, just in case.
for base in inst.type.bases:
if base.type is NOT_READY:
base.accept(self)
for a in inst.args:
a.accept(self)
if inst.last_known_value is not None:
inst.last_known_value.accept(self)
if inst.extra_attrs:
for v in inst.extra_attrs.attrs.values():
v.accept(self)
def visit_type_alias_type(self, t: TypeAliasType) -> None:
type_ref = t.type_ref
if type_ref is None:
return # We've already been here.
t.type_ref = None
t.alias = lookup_fully_qualified_alias(
self.modules, type_ref, allow_missing=self.allow_missing
)
for a in t.args:
a.accept(self)
def visit_any(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_callable_type(self, ct: CallableType) -> None:
if ct.fallback:
ct.fallback.accept(self)
for argt in ct.arg_types:
# argt may be None, e.g. for __self in NamedTuple constructors.
if argt is not None:
argt.accept(self)
if ct.ret_type is not None:
ct.ret_type.accept(self)
for v in ct.variables:
v.accept(self)
if ct.type_guard is not None:
ct.type_guard.accept(self)
if ct.type_is is not None:
ct.type_is.accept(self)
def visit_overloaded(self, t: Overloaded) -> None:
for ct in t.items:
ct.accept(self)
def visit_erased_type(self, o: Any) -> None:
# This type should exist only temporarily during type inference
raise RuntimeError("Shouldn't get here", o)
def visit_deleted_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_none_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_uninhabited_type(self, o: Any) -> None:
pass # Nothing to descend into.
def visit_partial_type(self, o: Any) -> None:
raise RuntimeError("Shouldn't get here", o)
def visit_tuple_type(self, tt: TupleType) -> None:
if tt.items:
for it in tt.items:
it.accept(self)
if tt.partial_fallback is not None:
tt.partial_fallback.accept(self)
def visit_typeddict_type(self, tdt: TypedDictType) -> None:
if tdt.items:
for it in tdt.items.values():
it.accept(self)
if tdt.fallback is not None:
if tdt.fallback.type_ref is not None:
if (
lookup_fully_qualified(
tdt.fallback.type_ref,
self.modules,
raise_on_missing=not self.allow_missing,
)
is None
):
# We reject fake TypeInfos for TypedDict fallbacks because
# the latter are used in type checking and must be valid.
tdt.fallback.type_ref = "typing._TypedDict"
tdt.fallback.accept(self)
def visit_literal_type(self, lt: LiteralType) -> None:
lt.fallback.accept(self)
def visit_type_var(self, tvt: TypeVarType) -> None:
if tvt.values:
for vt in tvt.values:
vt.accept(self)
tvt.upper_bound.accept(self)
tvt.default.accept(self)
def visit_param_spec(self, p: ParamSpecType) -> None:
p.upper_bound.accept(self)
p.default.accept(self)
p.prefix.accept(self)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> None:
t.tuple_fallback.accept(self)
t.upper_bound.accept(self)
t.default.accept(self)
def visit_unpack_type(self, u: UnpackType) -> None:
u.type.accept(self)
def visit_parameters(self, p: Parameters) -> None:
for argt in p.arg_types:
if argt is not None:
argt.accept(self)
for var in p.variables:
var.accept(self)
def visit_unbound_type(self, o: UnboundType) -> None:
for a in o.args:
a.accept(self)
def visit_union_type(self, ut: UnionType) -> None:
if ut.items:
for it in ut.items:
it.accept(self)
def visit_type_type(self, t: TypeType) -> None:
t.item.accept(self)
def lookup_fully_qualified_typeinfo(
modules: dict[str, MypyFile], name: str, *, allow_missing: bool
) -> TypeInfo:
stnode = lookup_fully_qualified(name, modules, raise_on_missing=not allow_missing)
node = stnode.node if stnode else None
if isinstance(node, TypeInfo):
return node
else:
# Looks like a missing TypeInfo during an initial daemon load, put something there
assert (
allow_missing
), "Should never get here in normal mode, got {}:{} instead of TypeInfo".format(
type(node).__name__, node.fullname if node else ""
)
return missing_info(modules)
def lookup_fully_qualified_alias(
modules: dict[str, MypyFile], name: str, *, allow_missing: bool
) -> TypeAlias:
stnode = lookup_fully_qualified(name, modules, raise_on_missing=not allow_missing)
node = stnode.node if stnode else None
if isinstance(node, TypeAlias):
return node
elif isinstance(node, TypeInfo):
if node.special_alias:
# Already fixed up.
return node.special_alias
if node.tuple_type:
alias = TypeAlias.from_tuple_type(node)
elif node.typeddict_type:
alias = TypeAlias.from_typeddict_type(node)
else:
assert allow_missing
return missing_alias()
node.special_alias = alias
return alias
else:
# Looks like a missing TypeAlias during an initial daemon load, put something there
assert (
allow_missing
), "Should never get here in normal mode, got {}:{} instead of TypeAlias".format(
type(node).__name__, node.fullname if node else ""
)
return missing_alias()
_SUGGESTION: Final = "<missing {}: *should* have gone away during fine-grained update>"
def missing_info(modules: dict[str, MypyFile]) -> TypeInfo:
suggestion = _SUGGESTION.format("info")
dummy_def = ClassDef(suggestion, Block([]))
dummy_def.fullname = suggestion
info = TypeInfo(SymbolTable(), dummy_def, "<missing>")
obj_type = lookup_fully_qualified_typeinfo(modules, "builtins.object", allow_missing=False)
info.bases = [Instance(obj_type, [])]
info.mro = [info, obj_type]
return info
def missing_alias() -> TypeAlias:
suggestion = _SUGGESTION.format("alias")
return TypeAlias(AnyType(TypeOfAny.special_form), suggestion, "<missing>", line=-1, column=-1)
| TypeFixer |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 1540,
"end": 1755
} | class ____(TypedDict):
startingTimestamp: datetime
resolvingTimestamp: datetime
brokenNotice: MonitorEnvBrokenDetectionSerializerResponse | None
@register(MonitorIncident)
| MonitorIncidentSerializerResponse |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 113195,
"end": 116027
} | class ____(fixtures.MappedTest, testing.AssertsExecutionResults):
"""test support for custom datatypes that return a non-__bool__ value
when compared via __eq__(), eg. ticket 3469"""
@classmethod
def define_tables(cls, metadata):
from sqlalchemy import TypeDecorator
class NoBool:
def __nonzero__(self):
raise NotImplementedError("not supported")
class MyWidget:
def __init__(self, text):
self.text = text
def __eq__(self, other):
return NoBool()
cls.MyWidget = MyWidget
class MyType(TypeDecorator):
impl = String(50)
cache_ok = True
def process_bind_param(self, value, dialect):
if value is not None:
value = value.text
return value
def process_result_value(self, value, dialect):
if value is not None:
value = MyWidget(value)
return value
Table(
"test",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("value", MyType),
Column("unrelated", String(50)),
)
@classmethod
def setup_classes(cls):
class Thing(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Thing = cls.classes.Thing
cls.mapper_registry.map_imperatively(Thing, cls.tables.test)
def test_update_against_none(self):
Thing = self.classes.Thing
s = fixture_session()
s.add(Thing(value=self.MyWidget("foo")))
s.commit()
t1 = s.query(Thing).first()
t1.value = None
s.commit()
eq_(s.query(Thing.value).scalar(), None)
def test_update_against_something_else(self):
Thing = self.classes.Thing
s = fixture_session()
s.add(Thing(value=self.MyWidget("foo")))
s.commit()
t1 = s.query(Thing).first()
t1.value = self.MyWidget("bar")
s.commit()
eq_(s.query(Thing.value).scalar().text, "bar")
def test_no_update_no_change(self):
Thing = self.classes.Thing
s = fixture_session()
s.add(Thing(value=self.MyWidget("foo"), unrelated="unrelated"))
s.commit()
t1 = s.query(Thing).first()
t1.unrelated = "something else"
self.assert_sql_execution(
testing.db,
s.commit,
CompiledSQL(
"UPDATE test SET unrelated=:unrelated "
"WHERE test.id = :test_id",
[{"test_id": 1, "unrelated": "something else"}],
),
)
eq_(s.query(Thing.value).scalar().text, "foo")
| TypeWoBoolTest |
python | django__django | tests/queries/tests.py | {
"start": 173080,
"end": 173450
} | class ____(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related("parent").defer("parent__created")
self.assertSequenceEqual(qs, [c])
self.assertIs(qs[0].parent.parent_bool, True)
| Ticket21203Tests |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0122_add_httpheader_option.py | {
"start": 150,
"end": 1212
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0121_remove_requirements_file"),
]
operations = [
migrations.AlterField(
model_name="httpheader",
name="name",
field=models.CharField(
choices=[
("access_control_allow_origin", "Access-Control-Allow-Origin"),
("access_control_allow_headers", "Access-Control-Allow-Headers"),
("access_control_expose_headers", "Access-Control-Expose-Headers"),
("content_security_policy", "Content-Security-Policy"),
("feature_policy", "Feature-Policy"),
("permissions_policy", "Permissions-Policy"),
("referrer_policy", "Referrer-Policy"),
("x_frame_options", "X-Frame-Options"),
("x_content_type_options", "X-Content-Type-Options"),
],
max_length=128,
),
),
]
| Migration |
python | gabrielfalcao__HTTPretty | httpretty/core.py | {
"start": 5666,
"end": 11505
} | class ____(BaseHTTPRequestHandler, BaseClass):
r"""Represents a HTTP request. It takes a valid multi-line,
``\r\n`` separated string with HTTP headers and parse them out using
the internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with :py:class:`io.BytesIO`
instances so that we guarantee that it won't make any I/O, neither
for writing nor reading.
It has some convenience attributes:
``headers`` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
``protocol`` -> the protocol of this host, inferred from the port
of the underlying fake TCP socket.
``host`` -> the hostname of this request.
``url`` -> the full url of this request.
``path`` -> the path of the request.
``method`` -> the HTTP method used in this request.
``querystring`` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
``body`` -> the request body as a string.
``parsed_body`` -> the request body parsed by ``parse_request_body``.
.. testcode::
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
"""
def __init__(self, headers, body='', sock=None, path_encoding = 'iso-8859-1'):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.created_at = time.time()
self.raw_headers = utf8(headers.strip())
self._body = utf8(body)
self.connection = sock
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = io.BytesIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
# Creating `wfile` as an empty BytesIO, just to avoid any
# real I/O calls
self.wfile = io.BytesIO()
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
if not self.parse_request():
return
# Now 2 convenient attributes for the HTTPretty API:
# - `path`
# - `querystring` holds a dictionary with the parsed query string
# - `parsed_body` a string
try:
self.path = self.path.encode(path_encoding)
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
"""a dictionary containing parsed request body or None if
HTTPrettyRequest doesn't know how to parse it. It currently
supports parsing body data that was sent under the
``content`-type` headers values: ``application/json`` or
``application/x-www-form-urlencoded``
"""
self.parsed_body = self.parse_request_body(self._body)
@property
def method(self):
"""the HTTP method used in this request"""
return self.command
@property
def protocol(self):
"""the protocol used in this request"""
proto = ''
if not self.connection:
return ''
elif self.connection.is_http:
proto = 'http'
if self.connection.is_secure:
proto = 'https'
return proto
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self._body)
def __nonzero__(self):
return bool(self.body) or bool(self.raw_headers)
@property
def url(self):
"""the full url of this recorded request"""
return "{}://{}{}".format(self.protocol, self.host, self.path)
@property
def host(self):
return self.headers.get('Host') or '<unknown>'
def __str__(self):
tmpl = '<HTTPrettyRequest("{}", "{}", headers={}, body={})>'
return tmpl.format(
self.method,
self.url,
dict(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
"""parses an UTF-8 encoded query string into a dict of string lists
:param qs: a querystring
:returns: a dict of lists
"""
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
"""Attempt to parse the post based on the content-type passed.
Return the regular body if not
:param body: string
:returns: a python object such as dict or list in case the deserialization suceeded. Else returns the given param ``body``
"""
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except Exception:
return body
| HTTPrettyRequest |
python | pyodide__pyodide | src/py/_pyodide/jsbind.py | {
"start": 1868,
"end": 9736
} | class ____:
def unpack_generic_alias(self, x: _GenericAlias) -> Any:
if isinstance(x, _UnionGenericAlias):
if len(x.__args__) != 2:
return None
e0 = x.__args__[0]
e1 = x.__args__[1]
e0isNone = e0 == type(None) # noqa: E721
e1isNone = e1 == type(None) # noqa: E721
if (not e0isNone) and (not e1isNone):
return None
if e0isNone:
x = e1
if e1isNone:
x = e0
if isinstance(x, GenericAlias) and x.__name__ in ["Future", "Awaitable"]:
arg = x.__args__[0]
return create_promise_converter(self.js2py_annotation(arg))
if isinstance(x, _AnnotatedAlias):
return x.__metadata__[0]
return None
def js2py_annotation(self, annotation: Any) -> "Js2PyConverter":
if isinstance(annotation, (_GenericAlias, GenericAlias)): # noqa: UP038
annotation = self.unpack_generic_alias(annotation)
if annotation is None:
return None
if isinstance(annotation, Js2PyConverter):
return annotation
res = getattr(annotation, "js2py", None)
if res:
return res
if issubclass(annotation, BindClass):
return js2py_bind(annotation)
return None
def py2js_annotation(self, annotation: Any) -> "Py2JsConverter":
if isinstance(annotation, _GenericAlias):
annotation = self.unpack_generic_alias(annotation)
if annotation is None:
return None
if isinstance(annotation, Py2JsConverter):
return annotation
res = getattr(annotation, "py2js", None)
if res:
return res
return None
type_converter = TypeConverter()
def get_attr_sig_prop(attr_sig):
"""Helper for get_attr_sig in case that the attribute we're looking up is a
property with annotation.
"""
# If the attribute is marked with BindClass, then we should attach bind it
# to the resulting proxy.
if isinstance(attr_sig, BindClass):
return (False, attr_sig)
# Otherwise, make it into a converter.
if converter := type_converter.js2py_annotation(attr_sig):
return (True, converter)
return (False, None)
def get_attr_sig_method_helper(sig, attr):
"""Check if sig has a method named attr. If so, get the appropriate
signature.
Returns: None or a valid get_attr_sig return value.
"""
res_attr = getattr_static(sig, attr, None)
# If it isn't a static method, it has one too many arguments. Easiest way to
# communicate this to func_to_sig is to use __get__ to bind an argument. We
# have to do this manually because `sig` is a class not an instance.
if res_attr and callable(res_attr):
# The argument to __get__ doesn't matter.
res_attr = res_attr.__get__(sig)
if res_attr:
return res_attr
sig_getattr = getattr(sig, "__getattr__", None)
if not sig_getattr:
return None
if not hasattr(sig_getattr, "_type_hints"):
sig_getattr._type_hints = get_type_hints(sig_getattr)
if not sig_getattr._type_hints:
return None
attr_sig = sig_getattr._type_hints.get("return")
if not attr_sig:
return None
return attr_sig
def get_attr_sig_method(sig, attr):
if not hasattr(sig, "_method_cache"):
sig._method_cache = {}
if res_attr := sig._method_cache.get(attr, None):
return res_attr
res = get_attr_sig_method_helper(sig, attr)
sig._method_cache[attr] = res
return res
def get_attr_sig(sig, attr):
"""Called from JsProxy_GetAttr when the proxy has a signature.
Must return a pair:
(False, sig) -- if the result is a JsProxy bind sig to it
(True, converter) -- apply converter to the result
"""
# Look up type hints and cache them if we haven't yet. We could use
# `functools.cache` for this, but it seems to keep `sig` alive for longer
# than necessary.
# TODO: Make a cache decorator that uses a weakmap.
if not hasattr(sig, "_type_hints"):
sig._type_hints = get_type_hints(sig, include_extras=True)
# See if there is an attribute type hint
if prop_sig := sig._type_hints.get(attr, None):
return get_attr_sig_prop(prop_sig)
if res := get_attr_sig_method(sig, attr):
return (False, res)
return (False, None)
no_default = Parameter.empty
def func_to_sig(f):
"""Called from jsproxy_call.c when we're about to call a callable.
Has to return an appropriate JsFuncSignature.
"""
cache_name = "_js_sig"
if getattr(f, "__qualname__", None) == "type":
cls = f.__args__[0]
cache = cls
else:
if isclass(f):
cache = f.__call__
f = f.__call__.__get__(f)
else:
cache = f
if ismethod(cache):
# We can't add extra attributes to a methodwrapper.
cache = cache.__func__
cache_name = "_js_meth_sig"
cls = None
if res := getattr(cache, cache_name, None):
return res
if cls:
f = cls.__init__.__get__(cls)
res = func_to_sig_inner(f, cls)
setattr(cache, cache_name, res)
return res
def func_to_sig_inner(f, cls):
sig = signature(f)
posparams = []
posparams_defaults = []
posparams_nmandatory = 0
varpos = None
kwparam_names = []
kwparam_converters = []
kwparam_defaults = []
varkwd = None
types = get_type_hints(f, include_extras=True)
should_construct = bool(cls)
for p in sig.parameters.values():
converter = (
type_converter.py2js_annotation(types.get(p.name, None)) or py2js_default
)
match p.kind:
case Parameter.POSITIONAL_ONLY:
posparams.append(converter)
if p.default == Parameter.empty:
posparams_nmandatory += 1
else:
posparams_defaults.append(p.default)
case Parameter.POSITIONAL_OR_KEYWORD:
raise RuntimeError("Don't currently handle POS_OR_KWD args")
case Parameter.KEYWORD_ONLY:
kwparam_names.append(p.name)
kwparam_converters.append(converter)
kwparam_defaults.append(p.default)
case Parameter.VAR_POSITIONAL:
varpos = converter
case Parameter.VAR_KEYWORD:
varkwd = converter
case _:
raise RuntimeError("Unreachable")
if len(kwparam_names) > 64:
# We use a bitflag to check which kwparams have been passed to fill in
# defaults / raise type error.
raise RuntimeError("Cannot handle function with more than 64 kwonly args")
result = type_converter.js2py_annotation(types.get("return", cls))
if iscoroutinefunction(f):
if result is None:
result = js2py_default
result = create_promise_converter(result)
elif result is None:
result = js2py_default_call_result
return JsFuncSignature(
f,
should_construct,
posparams_nmandatory,
tuple(posparams),
tuple(posparams_defaults),
varpos,
tuple(kwparam_names),
tuple(kwparam_converters),
tuple(kwparam_defaults),
varkwd,
result,
)
def _default_sig_stencil(*args, **kwargs):
pass
default_signature = func_to_sig_inner(_default_sig_stencil, None)
def bind_class_sig(sig):
"""Called from JsProxy_bind_class.
Just replace sig with type[sig]. This is consistent with what we'd get from
a function return value: if a function returns a class then it should be typed:
def f() -> type[A]:
...
"""
return type[sig]
| TypeConverter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gridly/source_gridly/helpers.py | {
"start": 319,
"end": 4896
} | class ____(object):
base_url = "https://api.gridly.com/v1/"
@staticmethod
def view_detail_url(view_id: str) -> str:
return Helpers.base_url + f"views/{view_id}"
@staticmethod
def view_list_url(grid_id: str) -> str:
return Helpers.base_url + f"views?gridId={grid_id}"
@staticmethod
def grid_detail_url(grid_id: str) -> str:
return Helpers.base_url + f"grids/{grid_id}"
@staticmethod
def get_views(auth: TokenAuthenticator, grid_id: str) -> Dict[str, Any]:
url = Helpers.view_list_url(grid_id)
try:
response = requests.get(url, headers=auth.get_auth_header())
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise Exception("Invalid API Key")
elif e.response.status_code == 404:
raise Exception(f"Grid id '{grid_id}' not found")
else:
raise Exception(f"Error getting listing views of grid '{grid_id}'")
return response.json()
@staticmethod
def get_grid(auth: TokenAuthenticator, grid_id: str) -> Dict[str, Any]:
url = Helpers.grid_detail_url(grid_id)
try:
response = requests.get(url, headers=auth.get_auth_header())
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise Exception("Invalid API Key")
elif e.response.status_code == 404:
raise Exception(f"Grid '{grid_id}' not found")
else:
raise Exception(f"Error getting grid {grid_id}: {e}")
return response.json()
@staticmethod
def get_view(auth: TokenAuthenticator, view_id: str) -> Dict[str, Any]:
url = Helpers.view_detail_url(view_id)
try:
response = requests.get(url, headers=auth.get_auth_header())
response.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise Exception("Invalid API Key")
elif e.response.status_code == 404:
raise Exception(f"View '{view_id}' not found")
else:
raise Exception(f"Error getting view {view_id}: {e}")
return response.json()
@staticmethod
def to_airbyte_data_type(column_type: str) -> str:
if column_type == "number":
return "number"
elif column_type == "boolean":
return "boolean"
else:
return "string"
@staticmethod
def get_json_schema(view: Dict[str, Any]) -> Dict[str, str]:
columns = view.get("columns", {})
properties = {}
for column in columns:
column_id = column.get("id")
column_type = column.get("type", "singleLine")
properties[column_id] = {"type": ["null", Helpers.to_airbyte_data_type(column_type)]}
json_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": properties,
}
return json_schema
@staticmethod
def get_airbyte_stream(view: Dict[str, Any]) -> AirbyteStream:
view_name = view.get("name")
columns = view.get("columns", {})
properties = {}
for column in columns:
column_id = column.get("id")
column_type = column.get("type", "singleLine")
properties[column_id] = {"type": ["null", Helpers.to_airbyte_data_type(column_type)]}
json_schema = Helpers.get_json_schema(view)
return AirbyteStream(
name=view_name,
json_schema=json_schema,
supported_sync_modes=[SyncMode.full_refresh],
supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append_dedup],
)
@staticmethod
def transform_record(record: Dict[str, Any], schema: Dict[str, Any]) -> Dict[str, Any]:
schema_properties = schema.get("properties")
columns = [k for k, v in schema_properties.items()]
cells = record.get("cells")
transformed_record = {}
if "_recordId" in columns:
transformed_record["_recordId"] = record.get("id")
if "_path" in columns:
transformed_record["_path"] = record.get("path", "")
for cell in cells:
transformed_record[cell.get("columnId")] = cell.get("value")
return transformed_record
| Helpers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.