language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/coordinates/attributes.py | {
"start": 15409,
"end": 17631
} | class ____(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
doc : str
Description of the frame attribute for help and documentation
"""
def __init__(self, frame, default=None, secondary_attribute="", **kwargs):
self._frame = frame
super().__init__(default, secondary_attribute, **kwargs)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from .sky_coordinate import SkyCoord
if value is None:
return None, False
elif isinstance(value, SkyCoord) and isinstance(value.frame, self._frame):
return value.frame, True
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
| CoordinateAttribute |
python | apache__airflow | providers/apache/kafka/tests/integration/apache/kafka/operators/test_consume.py | {
"start": 2752,
"end": 4954
} | class ____:
"""
test ConsumeFromTopicOperator
"""
def test_consumer_operator_test_1(self):
"""test consumer works with string import"""
TOPIC = "operator.consumer.test.integration.test_1"
p = Producer(**{"bootstrap.servers": "broker:29092"})
p.produce(TOPIC, TOPIC)
assert len(p) == 1
x = p.flush()
assert x == 0
operator = ConsumeFromTopicOperator(
kafka_config_id=TOPIC,
topics=[TOPIC],
apply_function="integration.apache.kafka.operators.test_consume._basic_message_tester",
apply_function_kwargs={"test": TOPIC},
task_id="test",
poll_timeout=10,
)
x = operator.execute(context={})
def test_consumer_operator_test_2(self):
"""test consumer works with direct binding"""
TOPIC = "operator.consumer.test.integration.test_2"
p = Producer(**{"bootstrap.servers": "broker:29092"})
p.produce(TOPIC, TOPIC)
assert len(p) == 1
x = p.flush()
assert x == 0
operator = ConsumeFromTopicOperator(
kafka_config_id=TOPIC,
topics=[TOPIC],
apply_function=_basic_message_tester,
apply_function_kwargs={"test": TOPIC},
task_id="test",
poll_timeout=10,
)
x = operator.execute(context={})
def test_consumer_operator_test_3(self):
"""test consumer works in batch mode"""
TOPIC = "operator.consumer.test.integration.test_3"
p = Producer(**{"bootstrap.servers": "broker:29092"})
for _ in range(20):
p.produce(TOPIC, TOPIC)
assert len(p) == 20
x = p.flush()
assert x == 0
operator = ConsumeFromTopicOperator(
kafka_config_id=TOPIC,
topics=[TOPIC],
apply_function_batch=_batch_tester,
apply_function_kwargs={"test_string": TOPIC},
task_id="test",
poll_timeout=10,
commit_cadence="end_of_batch",
max_messages=30,
max_batch_size=10,
)
x = operator.execute(context={})
| TestConsumeFromTopic |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 506217,
"end": 507516
} | class ____(sgqlc.types.Type):
"""A calendar of contributions made on GitHub by a user."""
__schema__ = github_schema
__field_names__ = ("colors", "is_halloween", "months", "total_contributions", "weeks")
colors = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(String))), graphql_name="colors")
"""A list of hex color codes used in this calendar. The darker the
color, the more contributions it represents.
"""
is_halloween = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isHalloween")
"""Determine if the color set was chosen because it's currently
Halloween.
"""
months = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("ContributionCalendarMonth"))), graphql_name="months"
)
"""A list of the months of contributions in this calendar."""
total_contributions = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalContributions")
"""The count of total contributions in the calendar."""
weeks = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("ContributionCalendarWeek"))), graphql_name="weeks"
)
"""A list of the weeks of contributions in this calendar."""
| ContributionCalendar |
python | lepture__authlib | authlib/integrations/base_client/sync_app.py | {
"start": 3938,
"end": 5916
} | class ____(_RequestMixin, OAuth1Base):
def request(self, method, url, token=None, **kwargs):
with self._get_oauth_client() as session:
return self._send_token_request(session, method, url, token, kwargs)
def create_authorization_url(self, redirect_uri=None, **kwargs):
"""Generate the authorization url and state for HTTP redirect.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: dict
"""
if not self.authorize_url:
raise RuntimeError('Missing "authorize_url" value')
if self.authorize_params:
kwargs.update(self.authorize_params)
with self._get_oauth_client() as client:
client.redirect_uri = redirect_uri
params = self.request_token_params or {}
request_token = client.fetch_request_token(self.request_token_url, **params)
log.debug(f"Fetch request token: {request_token!r}")
url = client.create_authorization_url(self.authorize_url, **kwargs)
state = request_token["oauth_token"]
return {"url": url, "request_token": request_token, "state": state}
def fetch_access_token(self, request_token=None, **kwargs):
"""Fetch access token in one step.
:param request_token: A previous request token for OAuth 1.
:param kwargs: Extra parameters to fetch access token.
:return: A token dict.
"""
with self._get_oauth_client() as client:
if request_token is None:
raise MissingRequestTokenError()
# merge request token with verifier
token = {}
token.update(request_token)
token.update(kwargs)
client.token = token
params = self.access_token_params or {}
token = client.fetch_access_token(self.access_token_url, **params)
return token
| OAuth1Mixin |
python | realpython__materials | web-scraping-with-scrapy-and-mongodb/books/books/spiders/book.py | {
"start": 50,
"end": 1362
} | class ____(scrapy.Spider):
name = "book"
allowed_domains = ["books.toscrape.com"]
start_urls = ["https://books.toscrape.com/"]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(
url, callback=self.parse, errback=self.log_error
)
def parse(self, response):
"""
@url https://books.toscrape.com
@returns items 20 20
@returns request 1 50
@scrapes url title price
"""
for book in response.css("article.product_pod"):
item = BooksItem()
item["url"] = book.css("h3 > a::attr(href)").get()
item["title"] = book.css("h3 > a::attr(title)").get()
item["price"] = book.css(".price_color::text").get()
yield item
next_page = response.css("li.next > a::attr(href)").get()
if next_page:
next_page_url = response.urljoin(next_page)
self.logger.info(
f"Navigating to next page with URL {next_page_url}."
)
yield scrapy.Request(
url=next_page_url,
callback=self.parse,
errback=self.log_error,
)
def log_error(self, failure):
self.logger.error(repr(failure))
| BookSpider |
python | walkccc__LeetCode | solutions/2477. Minimum Fuel Cost to Report to the Capital/2477.py | {
"start": 0,
"end": 491
} | class ____:
def minimumFuelCost(self, roads: list[list[int]], seats: int) -> int:
ans = 0
tree = [[] for _ in range(len(roads) + 1)]
for u, v in roads:
tree[u].append(v)
tree[v].append(u)
def dfs(u: int, prev: int) -> int:
nonlocal ans
people = 1 + sum(dfs(v, u) for v in tree[u] if v != prev)
if u > 0:
# the number of cars needed
ans += int(math.ceil(people / seats))
return people
dfs(0, -1)
return ans
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pytest_style/is_pytest_test.py | {
"start": 79,
"end": 237
} | class ____:
def test_this_too_is_a_test(self, a=1): ...
def testAndOfCourseThis(self, a=1): ...
# No errors
def this_is_not_a_test(a=1): ...
| TestClass |
python | PyCQA__pylint | tests/functional/t/typing_generic.py | {
"start": 271,
"end": 330
} | class ____(Generic[T], metaclass=ABCMeta):
"""Base"""
| Base |
python | openai__openai-python | src/openai/types/beta/realtime/response_function_call_arguments_done_event.py | {
"start": 216,
"end": 793
} | class ____(BaseModel):
arguments: str
"""The final arguments as a JSON string."""
call_id: str
"""The ID of the function call."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the function call item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
type: Literal["response.function_call_arguments.done"]
"""The event type, must be `response.function_call_arguments.done`."""
| ResponseFunctionCallArgumentsDoneEvent |
python | pennersr__django-allauth | allauth/usersessions/app_settings.py | {
"start": 0,
"end": 820
} | class ____:
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def ADAPTER(self):
return self._setting(
"ADAPTER", "allauth.usersessions.adapter.DefaultUserSessionsAdapter"
)
@property
def TRACK_ACTIVITY(self):
"""Whether or not sessions are to be actively tracked. When tracking is
enabled, the last seen IP address and last seen timestamp will be kept
track of.
"""
return self._setting("TRACK_ACTIVITY", False)
_app_settings = AppSettings("USERSESSIONS_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 8342,
"end": 9022
} | class ____(Web3Exception):
"""
Raised when a JSON-RPC response contains an error field.
"""
def __init__(
self,
message: str,
rpc_response: RPCResponse | None = None,
user_message: str | None = None,
) -> None:
if user_message is None:
user_message = (
"An RPC error was returned by the node. Check the message provided in "
"the error and any available logs for more information."
)
super().__init__(
message,
user_message=user_message,
)
self.message = message
self.rpc_response = rpc_response
| Web3RPCError |
python | doocs__leetcode | solution/1200-1299/1243.Array Transformation/Solution.py | {
"start": 0,
"end": 450
} | class ____:
def transformArray(self, arr: List[int]) -> List[int]:
f = True
while f:
f = False
t = arr[:]
for i in range(1, len(t) - 1):
if t[i] > t[i - 1] and t[i] > t[i + 1]:
arr[i] -= 1
f = True
if t[i] < t[i - 1] and t[i] < t[i + 1]:
arr[i] += 1
f = True
return arr
| Solution |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 15309,
"end": 16169
} | class ____(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return (o.long() / self.div_mask).long()
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
# %% ../../nbs/05_data.transforms.ipynb 114
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# %% ../../nbs/05_data.transforms.ipynb 115
@docs
| IntToFloatTensor |
python | getsentry__sentry | src/sentry/preprod/api/endpoints/project_preprod_check_for_updates.py | {
"start": 1242,
"end": 8897
} | class ____(ProjectEndpoint):
owner = ApiOwner.EMERGE_TOOLS
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (ProjectDistributionPermission,)
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.ORGANIZATION: RateLimit(limit=100, window=60),
}
}
)
def get(self, request: Request, project: Project) -> Response:
"""
Check for updates for a preprod artifact
"""
provided_main_binary_identifier = request.GET.get("main_binary_identifier")
provided_app_id = request.GET.get("app_id")
provided_platform = request.GET.get("platform")
provided_build_version = request.GET.get("build_version")
provided_build_number = request.GET.get("build_number")
provided_build_configuration = request.GET.get("build_configuration")
provided_codesigning_type = request.GET.get("codesigning_type")
if not provided_app_id or not provided_platform or not provided_build_version:
return Response({"error": "Missing required parameters"}, status=400)
if not provided_main_binary_identifier and not provided_build_number:
return Response(
{"error": "Either main_binary_identifier or build_number must be provided"},
status=400,
)
build_configuration = None
if provided_build_configuration:
try:
build_configuration = PreprodBuildConfiguration.objects.get(
project=project,
name=provided_build_configuration,
)
except PreprodBuildConfiguration.DoesNotExist:
return Response({"error": "Invalid build configuration"}, status=400)
preprod_artifact = None
current = None
update = None
# Common filter logic
def get_base_filters() -> dict[str, Any]:
filter_kwargs: dict[str, Any] = {
"project": project,
"app_id": provided_app_id,
}
if provided_platform == "android":
filter_kwargs["artifact_type__in"] = [
PreprodArtifact.ArtifactType.AAB,
PreprodArtifact.ArtifactType.APK,
]
elif provided_platform == "ios":
filter_kwargs["artifact_type"] = PreprodArtifact.ArtifactType.XCARCHIVE
if provided_codesigning_type:
filter_kwargs["extras__codesigning_type"] = provided_codesigning_type
return filter_kwargs
try:
current_filter_kwargs = get_base_filters()
current_filter_kwargs.update(
{
"build_version": provided_build_version,
}
)
# Add main_binary_identifier filter if provided
if provided_main_binary_identifier:
current_filter_kwargs["main_binary_identifier"] = provided_main_binary_identifier
# Add build_number filter if provided
if provided_build_number is not None:
try:
current_filter_kwargs["build_number"] = int(provided_build_number)
except ValueError:
return Response({"error": "Invalid build_number format"}, status=400)
if build_configuration:
current_filter_kwargs["build_configuration"] = build_configuration
preprod_artifact = PreprodArtifact.objects.filter(**current_filter_kwargs).latest(
"date_added"
)
except PreprodArtifact.DoesNotExist:
logger.warning(
"No artifact found for binary identifier with version %s", provided_build_version
)
if preprod_artifact and preprod_artifact.build_version and preprod_artifact.build_number:
current = InstallableBuildDetails(
id=str(preprod_artifact.id),
build_version=preprod_artifact.build_version,
build_number=preprod_artifact.build_number,
release_notes=(
preprod_artifact.extras.get("release_notes")
if preprod_artifact.extras
else None
),
app_name=preprod_artifact.app_name,
download_url=get_download_url_for_artifact(preprod_artifact),
created_date=preprod_artifact.date_added.isoformat(),
)
# Get the update object - find the highest version available
# Get all build versions for this app and platform
new_build_filter_kwargs = get_base_filters()
if preprod_artifact:
new_build_filter_kwargs["build_configuration"] = preprod_artifact.build_configuration
if preprod_artifact.extras:
codesigning_type = preprod_artifact.extras.get("codesigning_type")
if codesigning_type:
new_build_filter_kwargs["extras__codesigning_type"] = codesigning_type
elif build_configuration:
new_build_filter_kwargs["build_configuration"] = build_configuration
all_versions = (
PreprodArtifact.objects.filter(**new_build_filter_kwargs)
.values_list("build_version", flat=True)
.distinct()
)
# Find the highest semver version
highest_version = None
for version in all_versions:
if version:
try:
parsed_version = parse_version(version)
if highest_version is None or parsed_version > parse_version(highest_version):
highest_version = version
except Exception:
# Skip invalid version strings
continue
# Get all artifacts for the highest version
if highest_version:
new_build_filter_kwargs["build_version"] = highest_version
potential_artifacts = PreprodArtifact.objects.filter(**new_build_filter_kwargs)
# Filter for installable artifacts and get the one with highest build_number
installable_artifacts = [
artifact for artifact in potential_artifacts if is_installable_artifact(artifact)
]
if len(installable_artifacts) > 0:
best_artifact = max(
installable_artifacts, key=lambda a: (a.build_number, a.date_added)
)
if not preprod_artifact or preprod_artifact.id != best_artifact.id:
if best_artifact.build_version and best_artifact.build_number:
update = InstallableBuildDetails(
id=str(best_artifact.id),
build_version=best_artifact.build_version,
build_number=best_artifact.build_number,
release_notes=(
best_artifact.extras.get("release_notes")
if best_artifact.extras
else None
),
app_name=best_artifact.app_name,
download_url=get_download_url_for_artifact(best_artifact),
created_date=best_artifact.date_added.isoformat(),
)
return Response(CheckForUpdatesApiResponse(current=current, update=update).dict())
| ProjectPreprodArtifactCheckForUpdatesEndpoint |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_training.py | {
"start": 30102,
"end": 31967
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_parity_with_shared_params(self):
self.run_subtests(
{
"use_activation_checkpointing": [False, True],
},
self._test_train_shared_params,
)
def _test_train_shared_params(
self,
use_activation_checkpointing: bool,
):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=3, dropout_p=0.0, weight_tying=True)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for module in model.modules():
if isinstance(module, TransformerBlock):
if use_activation_checkpointing:
checkpoint(module)
replicate(module)
replicate(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank + 1)
for iter_idx in range(10):
inp = torch.randint(
0, model_args.vocab_size, (2, 16), device=device_type.type
)
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(inp).sum())
losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
for _optim in (ref_optim, optim):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
_optim.step()
self.assertEqual(losses[0], losses[1])
| TestReplicateSharedParams |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/toys/user_computed_data_versions/__init__.py | {
"start": 5546,
"end": 6326
} | class ____(TypedDict):
assets: Sequence[AssetInfo]
source_assets: Sequence[SourceAssetInfo]
SCHEMA: Schema = {
"assets": [
{"key": "alpha", "code_version": "lib/v1", "dependencies": set()},
{"key": "beta", "code_version": "lib/v1", "dependencies": {"alpha"}},
{"key": "epsilon", "code_version": "lib/v1", "dependencies": {"delta"}},
],
"source_assets": [
{"key": "delta"},
],
}
assets = [external_asset(asset_spec) for asset_spec in SCHEMA["assets"]]
source_assets = [
external_source_asset(source_asset_spec) for source_asset_spec in SCHEMA["source_assets"]
]
defs = Definitions(
assets=[*assets, *source_assets],
jobs=[define_asset_job("external_system_job", AssetSelection.assets("alpha", "beta"))],
)
| Schema |
python | jmcnamara__XlsxWriter | xlsxwriter/sharedstrings.py | {
"start": 369,
"end": 2824
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX sharedStrings file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.string_table = None
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the sst element.
self._write_sst()
# Write the sst strings.
self._write_sst_strings()
# Close the sst tag.
self._xml_end_tag("sst")
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_sst(self) -> None:
# Write the <sst> element.
xmlns = "http://schemas.openxmlformats.org/spreadsheetml/2006/main"
attributes = [
("xmlns", xmlns),
("count", self.string_table.count),
("uniqueCount", self.string_table.unique_count),
]
self._xml_start_tag("sst", attributes)
def _write_sst_strings(self) -> None:
# Write the sst string elements.
for string in self.string_table.string_array:
self._write_si(string)
def _write_si(self, string) -> None:
# Write the <si> element.
attributes = []
# Convert control character to a _xHHHH_ escape.
string = self._escape_control_characters(string)
# Add attribute to preserve leading or trailing whitespace.
if _preserve_whitespace(string):
attributes.append(("xml:space", "preserve"))
# Write any rich strings without further tags.
if string.startswith("<r>") and string.endswith("</r>"):
self._xml_rich_si_element(string)
else:
self._xml_si_element(string, attributes)
# A metadata class to store Excel strings between worksheets.
| SharedStrings |
python | tiangolo__fastapi | docs_src/handling_errors/tutorial005.py | {
"start": 500,
"end": 626
} | class ____(BaseModel):
title: str
size: int
@app.post("/items/")
async def create_item(item: Item):
return item
| Item |
python | getsentry__sentry | src/sentry/tasks/summaries/utils.py | {
"start": 2432,
"end": 4452
} | class ____:
accepted_error_count = 0
dropped_error_count = 0
accepted_transaction_count = 0
dropped_transaction_count = 0
accepted_replay_count = 0
dropped_replay_count = 0
new_substatus_count = 0
ongoing_substatus_count = 0
escalating_substatus_count = 0
regression_substatus_count = 0
total_substatus_count = 0
def __init__(self, project):
self.project = project
self.key_errors_by_id: list[tuple[int, int]] = []
self.key_errors_by_group: list[tuple[Group, int]] = []
# Array of (transaction_name, count_this_week, p95_this_week, count_last_week, p95_last_week)
self.key_transactions = []
# Array of (Group, count)
self.key_performance_issues = []
self.key_replay_events = []
# Dictionary of { timestamp: count }
self.error_count_by_day = {}
# Dictionary of { timestamp: count }
self.transaction_count_by_day = {}
# Dictionary of { timestamp: count }
self.replay_count_by_day = {}
def __repr__(self) -> str:
return "\n".join(
[
f"{self.key_errors_by_group}, ",
f"Errors: [Accepted {self.accepted_error_count}, Dropped {self.dropped_error_count}]",
f"Transactions: [Accepted {self.accepted_transaction_count} Dropped {self.dropped_transaction_count}]",
f"Replays: [Accepted {self.accepted_replay_count} Dropped {self.dropped_replay_count}]",
]
)
def check_if_project_is_empty(self):
return (
not self.key_errors_by_group
and not self.key_transactions
and not self.key_performance_issues
and not self.accepted_error_count
and not self.dropped_error_count
and not self.accepted_transaction_count
and not self.dropped_transaction_count
and not self.accepted_replay_count
and not self.dropped_replay_count
)
| ProjectContext |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 143761,
"end": 144582
} | class ____:
def __init__(self, klass):
self.klass = klass
self.temps_allocated = {}
self.temps_free = {}
self.temps_count = 0
def reset(self):
for type, cnames in self.temps_allocated.items():
self.temps_free[type] = list(cnames)
def allocate_temp(self, type):
if type not in self.temps_allocated:
self.temps_allocated[type] = []
self.temps_free[type] = []
elif self.temps_free[type]:
return self.temps_free[type].pop(0)
cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
self.temps_allocated[type].append(cname)
self.temps_count += 1
return cname
| ClosureTempAllocator |
python | davidhalter__parso | parso/python/tree.py | {
"start": 20764,
"end": 22146
} | class ____(Flow):
type = 'if_stmt'
__slots__ = ()
def get_test_nodes(self):
"""
E.g. returns all the `test` nodes that are named as x, below:
if x:
pass
elif x:
pass
"""
for i, c in enumerate(self.children):
if c in ('elif', 'if'):
yield self.children[i + 1]
def get_corresponding_test_node(self, node):
"""
Searches for the branch in which the node is and returns the
corresponding test node (see function above). However if the node is in
the test node itself and not in the suite return None.
"""
start_pos = node.start_pos
for check_node in reversed(list(self.get_test_nodes())):
if check_node.start_pos < start_pos:
if start_pos < check_node.end_pos:
return None
# In this case the node is within the check_node itself,
# not in the suite
else:
return check_node
def is_node_after_else(self, node):
"""
Checks if a node is defined after `else`.
"""
for c in self.children:
if c == 'else':
if node.start_pos > c.start_pos:
return True
else:
return False
| IfStmt |
python | pydata__xarray | asv_bench/benchmarks/groupby.py | {
"start": 4709,
"end": 4943
} | class ____(Resample):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds1d = self.ds1d.chunk({"time": 50})
self.ds2d = self.ds2d.chunk({"time": 50, "z": 4})
| ResampleDask |
python | huggingface__transformers | src/transformers/models/lxmert/modeling_lxmert.py | {
"start": 28693,
"end": 29926
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = LxmertPredictionHeadTransform(config)
# Decide the use of visual losses
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
if config.visual_attr_loss:
visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
if config.visual_feat_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
}
self.visual_losses = visual_losses
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_dict = nn.ModuleDict(
{key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
output = {}
for key in self.visual_losses:
output[key] = self.decoder_dict[key](hidden_states)
return output
| LxmertVisualObjHead |
python | facebook__pyre-check | tools/pysa_integration_tests/runner_lib.py | {
"start": 921,
"end": 12303
} | class ____(enum.IntEnum):
# 1-29 reserved for pyre and pysa client, see client/commands/commands.py
TEST_COMPARISON_DIFFERS = 30
TEST_MODEL_VERIFICATION_ERROR = 31
def is_test_function(define: str, code: int) -> bool:
return f"test_{code}_" in define
def is_test_class_method(define: str, code: int) -> bool:
define_split = define.split(".")
if len(define_split) < 2:
return False
return f"Test{code}" in define_split[-2]
def validate_test_functions_and_class_names(current_directory: Path) -> None:
LOG.info(
"Ensure all functions and classes in test_XXX files meet the expected format"
)
test_function_pattern = re.compile(r"test_\d{4}(_no)?_flag_\w+")
helper_function_pattern = re.compile(r"helper\w+")
test_class_pattern = re.compile(r"Test\d{4}\w+")
helper_class_pattern = re.compile(r"Helper\w+")
test_paths = [
path
for path in current_directory.glob("**/*.py")
if re.match(r"test(_\w+)?\.py$", path.name)
]
for test_path in test_paths:
parsed_ast = ast.parse(test_path.read_text())
functions = []
classes = []
for node in parsed_ast.body:
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
functions.append(node)
elif isinstance(node, ast.ClassDef):
classes.append(node)
# Note: this only iterates on top level functions, excluding methods
# and nested functions.
for function in functions:
function_name = function.name
LOG.debug(f"Validating function: {function_name}")
if helper_function_pattern.match(function_name):
continue
if test_function_pattern.match(function_name):
# Sanity check that there is at least one test annotation.
if not any(
isinstance(decorator, ast.Call)
and isinstance(decorator.func, ast.Name)
and decorator.func.id in ("ExpectIssue", "ExpectNoIssue")
for decorator in function.decorator_list
):
raise TestConfigurationException(
f"Test function {function_name} does NOT have any test annotation (`ExpectIssue`, `ExpectNoIssue`)"
)
continue
raise TestConfigurationException(
f"Expected test function {function_name} to have the "
+ "format test_####_flag_XXXX or test_####_no_flag_XXXX, "
+ "to indicate that issue #### is being tested, or "
+ "helperXXX to indicate it is an unrelated helper "
+ "function."
)
for klass in classes:
class_name = klass.name
LOG.debug(f"Validating class: {class_name}")
if not (
test_class_pattern.match(class_name)
or helper_class_pattern.match(class_name)
):
raise TestConfigurationException(
f"Expected test class {class_name} to have the "
+ "format Test####XXXX to indicate that issue #### "
+ "is being tested, or HelperXXXX to indicate it is "
+ "an unrelated helper class."
)
def normalized_json_dump(
results: str, ignore_positions: bool, filter_issues: bool
) -> str:
"""
Returns a normalised JSON string from results keeping only essential items.
Removes all keys that are not salient to determining if results have changed
when `ignore_positions` is true. Filters issues down to issues that have
the code we intend to test for if `filter_issues` is true.
"""
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing analysis results:\n{pretty_error}"
)
if filter_issues:
# Filter down to only issues that have the code that we intended to
# test for. This prevents the introduction of new rules or false
# positives from breaking existing tests.
normalized = [
issue
for issue in normalized
if is_test_function(issue["define"], issue["code"])
or is_test_class_method(issue["define"], issue["code"])
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if ignore_positions:
salient_keys = {"code", "define", "description", "path", "name"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def run_pysa(
*,
save_results_to: Optional[Path] = None,
save_errors_to: Optional[Path] = None,
target: Optional[str] = None,
number_of_workers: Optional[int] = None,
skip_model_verification: bool = False,
isolation_prefix: Optional[str] = None,
repository_root: Optional[Path] = None,
excludes: Optional[Sequence[str]] = None,
run_from_source: bool = False,
typeshed: Optional[Path] = None,
compact_ocaml_heap: bool = False,
check_invariants: bool = False,
maximum_trace_length: Optional[int] = None,
maximum_tito_depth: Optional[int] = None,
passthrough_args: Optional[Sequence[str]] = None,
working_directory: Optional[Path] = None,
silent: bool = False,
shard_taint_output: bool = False,
error_help: Optional[str] = None,
) -> str:
"""Run pysa for the given test and produce a list of errors in JSON."""
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.append("--noninteractive")
if isolation_prefix is not None:
command.extend(["--isolation-prefix", isolation_prefix])
if number_of_workers is not None:
command.append(f"--number-of-workers={number_of_workers}")
if typeshed is not None:
command.extend(["--typeshed", typeshed.absolute().as_posix()])
if target is not None:
command.append(f"--target={target}")
if excludes is not None:
for exclude in excludes:
command.extend(["--exclude", exclude])
command.append("analyze")
if skip_model_verification:
command.append("--no-verify")
if repository_root is not None:
command.extend(["--repository-root", str(repository_root)])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if compact_ocaml_heap:
command.append("--compact-ocaml-heap")
if check_invariants:
command.append("--check-invariants")
if maximum_trace_length is not None:
command.append(f"--maximum-trace-length={maximum_trace_length}")
if maximum_tito_depth is not None:
command.append(f"--maximum-tito-depth={maximum_tito_depth }")
if shard_taint_output:
command.append("--output-format=sharded-json")
if passthrough_args is not None:
command.extend(passthrough_args)
LOG.info(f"Running `{' '.join(command)}`")
try:
process = subprocess.run(
command,
check=True,
text=True,
stdout=subprocess.PIPE,
stderr=(subprocess.DEVNULL if silent else None),
cwd=working_directory,
)
except subprocess.CalledProcessError as exception:
LOG.error(f"`pyre analyze` failed with return code {exception.returncode}")
sys.stdout.write(exception.stdout)
if error_help is not None:
sys.stdout.write("\n")
sys.stdout.write(error_help)
sys.exit(exception.returncode)
if save_results_to is not None:
errors = (save_results_to / "errors.json").read_text()
else:
errors = process.stdout
if save_errors_to is not None:
save_errors_to.write_text(errors)
return errors
def compare_to_expected_json(
*,
actual_results: str,
expected_results_path: Path,
test_result_directory: Path,
filter_issues: bool,
ignore_positions: bool,
write_actual_results_on_failure: bool,
error_help: Optional[str] = None,
) -> None:
"""
Compare the errors from `run_pysa` to a set of expected
errors from a JSON file.
"""
if not os.path.isfile(expected_results_path):
raise FileNotFoundError(
f"Could NOT find expected result file `{expected_results_path}`"
)
expected_results = expected_results_path.read_text()
normalized_pysa_results = normalized_json_dump(
actual_results, ignore_positions=ignore_positions, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, ignore_positions=ignore_positions, filter_issues=filter_issues
)
if normalized_pysa_results == normalized_expected_results:
LOG.info("Run produced expected results")
return
actual_results_path = test_result_directory / "result.actual"
(test_result_directory / "full_result.json").write_text(actual_results)
actual_results_path.write_text(
normalized_json_dump(
actual_results, ignore_positions=False, filter_issues=filter_issues
)
)
if ignore_positions:
actual_invariant_results_path = (
test_result_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
test_result_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
else:
actual_invariant_results_path = test_result_directory / "result.actual"
expected_invariant_results_path = expected_results_path
if ignore_positions:
sys.stdout.write("Output differs from expected:\n")
else:
sys.stdout.write("Output differs from expected (after stripping locations):\n")
sys.stdout.flush()
subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
]
)
if error_help is not None:
sys.stdout.write("\n")
sys.stdout.write(error_help)
if write_actual_results_on_failure:
sys.stdout.write("Contents of result.actual:\n")
sys.stdout.write("---\n")
sys.stdout.write(actual_results_path.read_text())
sys.stdout.write("---\n")
sys.exit(ExitCode.TEST_COMPARISON_DIFFERS.value)
@dataclass(frozen=True)
| ExitCode |
python | gevent__gevent | src/greentest/3.9/test_socket.py | {
"start": 230402,
"end": 239676
} | class ____(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
time.sleep(1)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
| SendfileUsingSendTest |
python | OmkarPathak__pygorithm | tests/test_pathing.py | {
"start": 2224,
"end": 2624
} | class ____(SimplePathfindingTestCaseTimed):
def find_path(self, my_graph, v1, v2):
my_pathfinder = astar.BiDirectionalAStar()
def my_heuristic(graph, v1, v2):
dx = v2[0] - v1[0]
dy = v2[1] - v1[1]
return math.sqrt(dx * dx + dy * dy)
return my_pathfinder.find_path(my_graph, v1, v2, my_heuristic) | TestAStarBiDirectionalTimed |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_nonempty_geometries.py | {
"start": 1696,
"end": 6278
} | class ____(ColumnMapExpectation):
"""Expect values in a column to be shapely geometries that aren't empty (however, they can be null).
Args:
column (str): \
The column name.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"nonempty_geometries": [
mapping(Point(0, 0)),
mapping(Polygon([(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)])),
None,
],
"empty_geometries": [
mapping(MultiPolygon([])),
mapping(Polygon([])),
mapping(LineString([])),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "nonempty_geometries",
"mostly": 1,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "empty_geometries",
"mostly": 0.2,
},
"out": {"success": False},
},
],
},
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.nonempty_geometries"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"geospatial",
"hackathon-2022",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@mmi333", # Don't forget to add your github handle here!
],
"requirements": ["shapely"],
}
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: dict = None,
**kwargs,
) -> List[
Union[
dict,
str,
RenderedStringTemplateContent,
RenderedTableContent,
RenderedBulletListContent,
RenderedGraphContent,
Any,
]
]:
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"mostly",
],
)
if params["mostly"] is None:
template_str = "values must be shapely geometries that aren't empty"
else:
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
if __name__ == "__main__":
ExpectColumnValuesToBeNonemptyGeometries().print_diagnostic_checklist()
| ExpectColumnValuesToBeNonemptyGeometries |
python | openai__openai-python | src/openai/resources/audio/transcriptions.py | {
"start": 49666,
"end": 49925
} | class ____:
def __init__(self, transcriptions: Transcriptions) -> None:
self._transcriptions = transcriptions
self.create = _legacy_response.to_raw_response_wrapper(
transcriptions.create,
)
| TranscriptionsWithRawResponse |
python | pypa__pipenv | pipenv/patched/pip/_internal/commands/search.py | {
"start": 979,
"end": 1108
} | class ____(TypedDict):
name: str
summary: str
versions: List[str]
logger = logging.getLogger(__name__)
| TransformedHit |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 6843,
"end": 7330
} | class ____(Benchmark):
""" Benchmark for the methods which take an argument
"""
params = [['__and__', '__or__', '__xor__'],
['int16', 'int32', 'int64']]
param_names = ['methods', 'npdtypes']
timeout = 10
def setup(self, methname, npdtypes):
values = get_squares_().get(npdtypes)
self.xargs = [values[0], values[1]]
def time_ndarray_meth(self, methname, npdtypes):
getattr(operator, methname)(*self.xargs)
| MethodsV1IntOnly |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/sanitize.py | {
"start": 1345,
"end": 19550
} | class ____:
attribute = ...
def __init__(self, value):
self.instance = value
self.attribute = value
def return_taint_sanitize(arg: T) -> T:
"""Identity function that returns the argument unmodified, but is marked as
'Sanitize' in the accompanying .pysa file
"""
return arg
def test1():
tainted = object()
tainted.id = _test_source()
test2(tainted)
test3(tainted)
def test2(tainted_other):
tainted = return_taint_sanitize(tainted_other)
_test_sink(tainted.id)
def test3(colliding_name):
colliding_name = return_taint_sanitize(colliding_name)
_test_sink(colliding_name.id)
def source_with_tito(x):
return x
def sanitize_sources(x):
_test_sink(x)
return source_with_tito(x)
def sanitize_sinks(x):
_test_sink(x)
return source_with_tito(x)
def sanitize_tito(x):
_test_sink(x)
return source_with_tito(x)
def a_source():
...
def b_source():
...
def sanitize_test_a_source():
if 1 > 2:
x = a_source()
else:
x = b_source()
return x
def sanitize_test_b_source():
if 1 > 2:
x = a_source()
else:
x = b_source()
return x
def sanitize_a_and_b_source():
if 1 > 2:
x = a_source()
else:
x = b_source()
return x
def a_sink(x):
...
def b_sink(x):
...
def sanitize_a_sink(x):
if 1 > 2:
a_sink(x)
else:
b_sink(x)
def sanitize_b_sink(x):
if 1 > 2:
a_sink(x)
else:
b_sink(x)
def sanitize_a_and_b_sinks(x):
if 1 > 2:
a_sink(x)
else:
b_sink(x)
def sanitize_a_source_tito(x):
return x
def no_propagation_with_sanitize_a_source_tito():
a = a_source()
b = sanitize_a_source_tito(a)
return b
def propagation_of_b_with_sanitize_a_source_tito():
b = b_source()
tito = sanitize_a_source_tito(b)
return tito
def propagation_of_sanitize_a_source_tito(x):
return sanitize_a_source_tito(x)
def no_issue_through_propagation_of_sanitize_a_source_tito():
x = a_source()
y = propagation_of_sanitize_a_source_tito(x)
a_sink(y)
def propagation_of_sanitize_a_source_in_sink_trace(x):
y = propagation_of_sanitize_a_source_tito(x)
a_sink(y)
def no_issue_propagation_of_sanitize_a_source_in_sink_trace():
x = a_source()
propagation_of_sanitize_a_source_in_sink_trace(x)
def sanitize_b_source_tito(x):
return x
def sanitize_test_source_tito(x):
return x
def combine_sanitize_a_source_b_source_in_sink_trace(x):
y = sanitize_b_source_tito(x)
propagation_of_sanitize_a_source_in_sink_trace(y)
def sanitize_a_sink_tito(x):
return x
def no_propagation_of_a_sink(x):
y = sanitize_a_sink_tito(x)
a_sink(y)
def propagation_of_b_sink(x):
y = sanitize_a_sink_tito(x)
b_sink(y)
def combine_sanitize_a_source_a_sink_tito(x):
y = sanitize_a_source_tito(x)
z = sanitize_a_sink_tito(y)
return z
def no_issue_through_combine_sanitize_a_source_a_sink_tito():
x = a_source()
y = combine_sanitize_a_source_a_sink_tito(x)
a_sink(y)
def propagation_of_sanitize_a_sink_in_source_trace():
x = a_source()
y = sanitize_a_sink_tito(x)
return y
def no_issue_propagation_of_sanitize_a_sink_in_source_trace():
x = propagation_of_sanitize_a_sink_in_source_trace()
a_sink(x)
def sanitize_b_sink_tito(x):
return x
def combine_sanitize_a_sink_b_sink_in_source_trace():
x = propagation_of_sanitize_a_sink_in_source_trace()
y = sanitize_b_sink_tito(x)
return y
def sanitize_a_source_tito_with_sink(x):
a_sink(x)
return x
def sanitize_with_user_declared_source():
return 0
def sanitize_with_user_declared_sink(x):
return
def test4():
x = a_source()
y = sanitize_a_source_tito_with_sink(x) # flow here
a_sink(y) # no flow here
def sanitize_b_sink_tito(x):
return x
def no_issue_fixpoint_sanitize_sources():
if 1 > 2:
x = a_source()
return sanitize_a_sink_tito(x)
else:
x = _test_source()
y = sanitize_a_sink_tito(x)
return sanitize_b_sink_tito(y)
def no_issue_fixpoint_sanitize_sinks(x):
if 1 > 2:
a_sink(x)
else:
y = sanitize_a_source_tito(x)
b_sink(y)
def no_issue_fixpoint_sanitize():
# Example where we need a fixpoint in the implementation to sanitize everything.
# Sources: {NotSink[TestA]@TestA, NotSink[TestB]:NotSink[TestA]@Test}
# Sinks: {TestA, NotSource[A]@TestB}
x = no_issue_fixpoint_sanitize_sources()
no_issue_fixpoint_sanitize_sinks(x)
def partial_issue_sources():
if 1 > 2:
x = a_source()
return sanitize_a_sink_tito(x)
else:
return a_source()
def partial_issue_sinks(x):
if 1 > 2:
a_sink(x)
else:
y = sanitize_a_source_tito(x)
a_sink(y)
def partial_issue_sanitize():
# Sources: {NotSink[TestA]@TestA, TestA}
# Sinks: {TestA, NotSource[testA]@TestA}
x = partial_issue_sources()
partial_issue_sinks(x)
def sanitize_test_a_source_attribute():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_a_source(x)
_test_sink(c.attribute)
def sanitize_test_a_source_attribute_in_sink_trace(x):
c = C_sanitized_a_source(x)
_test_sink(c.attribute)
def no_issue_sanitize_test_a_source_attribute_in_sink_trace():
x = a_source()
sanitize_test_a_source_attribute_in_sink_trace(x)
def issue_sanitize_test_a_source_attribute_in_sink_trace():
x = b_source()
sanitize_test_a_source_attribute_in_sink_trace(x)
def sanitize_test_a_source_attribute_in_tito(x):
c = C_sanitized_a_source(x)
return c.attribute
def sanitize_test_b_source_attribute():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_b_source(x)
_test_sink(c.attribute)
def sanitize_test_ab_sources_attribute():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_ab_sources(x)
_test_sink(c.attribute) # should only trigger Test -> Test
def sanitize_test_all_sources_attribute():
if 1 > 2:
x = a_source()
elif 2 > 3:
x = b_source()
else:
x = _test_source()
c = C_sanitized_all_sources(x)
_test_sink(c.attribute)
def sanitize_test_a_source_instance():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_a_source(x)
_test_sink(c.instance)
def sanitize_test_b_source_instance():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_b_source(x)
_test_sink(c.instance)
def sanitize_test_ab_sources_instance():
if 1 > 2:
x = a_source()
else:
x = b_source()
c = C_sanitized_ab_sources(x)
_test_sink(c.instance) # should only trigger Test -> Test
def sanitize_test_all_sources_instance():
if 1 > 2:
x = a_source()
elif 2 > 3:
x = b_source()
else:
x = _test_source()
c = C_sanitized_all_sources(x)
_test_sink(c.instance)
def sanitize_a_sink_attribute(c: C_sanitized_a_sink):
if 1 > 2:
a_sink(c.attribute)
else:
b_sink(c.attribute)
def sanitize_a_sink_attribute_in_source_trace():
x = a_source()
y = C_sanitized_a_sink(x)
return y.attribute
def no_issue_sanitize_a_sink_attribute_in_source_trace():
x = sanitize_a_sink_attribute_in_source_trace()
a_sink(x)
def issue_sanitize_a_sink_attribute_in_source_trace():
x = sanitize_a_sink_attribute_in_source_trace()
b_sink(x)
def sanitize_b_sink_attribute(c: C_sanitized_b_sink):
if 1 > 2:
a_sink(c.attribute)
else:
b_sink(c.attribute)
def sanitize_ab_sinks_attribute(c: C_sanitized_ab_sinks):
if 1 > 2:
a_sink(c.attribute)
else:
b_sink(c.attribute)
def sanitize_all_sinks_attribute(c: C_sanitized_all_sinks):
if 1 > 2:
a_sink(c.attribute)
elif 2 > 3:
b_sink(c.attribute)
else:
_test_sink(c.attribute)
def sanitize_a_sink_instance(c: C_sanitized_a_sink):
if 1 > 2:
a_sink(c.instance)
else:
b_sink(c.instance)
def sanitize_b_sink_instance(c: C_sanitized_b_sink):
if 1 > 2:
a_sink(c.instance)
else:
b_sink(c.instance)
def sanitize_ab_sinks_instance(c: C_sanitized_ab_sinks):
if 1 > 2:
a_sink(c.instance)
else:
b_sink(c.instance)
def sanitize_all_sinks_instance(c: C_sanitized_all_sinks):
if 1 > 2:
a_sink(c.instance)
elif 2 > 3:
b_sink(c.instance)
else:
_test_sink(c.instance)
def sanitize_test_a_sink_attribute():
sanitize_a_sink_attribute(_test_source())
def sanitize_test_b_sink_attribute():
sanitize_b_sink_attribute(_test_source())
def sanitize_test_ab_sinks_attribute():
sanitize_ab_sinks_attribute(_test_source())
def sanitize_test_all_sinks_attribute():
sanitize_all_sinks_attribute(_test_source()) # should not trigger
c = C_sanitized_all_sinks({})
c.attribute = _test_source() # should trigger Test -> Test
def sanitize_test_a_sink_instance():
sanitize_a_sink_instance(_test_source())
def sanitize_test_b_sink_instance():
sanitize_b_sink_instance(_test_source())
def sanitize_test_ab_sinks_instance():
sanitize_ab_sinks_instance(_test_source())
def sanitize_test_all_sinks_instance():
sanitize_all_sinks_instance(_test_source()) # should not trigger
c = C_sanitized_all_sinks({})
c.instance = _test_source() # should trigger Test -> Test
def sanitize_parameter(x, y):
_test_sink(x)
_test_sink(y)
return source_with_tito(x) + source_with_tito(y)
def sanitize_parameter_all_tito(x, y):
_test_sink(x)
_test_sink(y)
return source_with_tito(x) + source_with_tito(y)
def sanitize_parameter_no_user_controlled(x, y):
if 1 > 2:
return x
elif 2 > 3:
return y
elif 3 > 4:
_sql(x)
else:
_rce(y)
def propagation_of_sanitize_parameter_no_user_controlled(a, b):
sanitize_parameter_no_user_controlled(b, a)
def no_issue_propagation_of_sanitize_parameter_no_user_controlled():
x = _user_controlled()
propagation_of_sanitize_parameter_no_user_controlled(0, x)
def issue_propagation_of_sanitize_parameter_no_user_controlled():
x = _cookies()
propagation_of_sanitize_parameter_no_user_controlled(0, x)
def sanitize_parameter_no_sql(x):
if 1 > 2:
_sql(x)
elif 2 > 3:
_rce(x)
else:
return x
def sanitize_parameter_no_rce(x):
if 1 > 2:
_sql(x)
elif 2 > 3:
_rce(x)
else:
return x
def sanitize_parameter_no_user_controlled_tito(x, y):
if 1 > 2:
return x
else:
return y
def no_propagation_with_sanitize_parameter_no_user_controlled_tito():
a = _user_controlled()
b = sanitize_parameter_no_user_controlled_tito(a, 0)
return b
def propagation_of_cookies_with_sanitize_parameter_no_user_controlled_tito():
b = _cookies()
tito = sanitize_parameter_no_user_controlled_tito(b, 0)
return tito
def propagation_of_sanitize_parameter_no_user_controlled_tito(a, b):
return sanitize_parameter_no_user_controlled_tito(b, a)
def propagation_of_sanitize_parameter_no_user_controlled_tito_in_sink_trace(x):
y = propagation_of_sanitize_parameter_no_user_controlled_tito(0, x)
_sql(y)
def no_issue_propagation_of_sanitize_parameter_no_user_controlled_tito_in_sink_trace():
x = _user_controlled()
propagation_of_sanitize_parameter_no_user_controlled_tito_in_sink_trace(x)
def issue_propagation_of_sanitize_parameter_no_user_controlled_tito_in_sink_trace():
x = _cookies()
propagation_of_sanitize_parameter_no_user_controlled_tito_in_sink_trace(x)
def sanitize_parameter_no_sql_tito(x, y):
if 1 > 2:
return x
else:
return y
def no_propagation_with_sanitize_parameter_no_sql_tito(x):
y = sanitize_parameter_no_sql_tito(x, 0)
_sql(y)
def propagation_of_rce_with_sanitize_parameter_no_sql_tito(x):
y = sanitize_parameter_no_sql_tito(x, 0)
_rce(y)
def propagation_of_sanitize_parameter_no_sql_tito(a, b):
return sanitize_parameter_no_sql_tito(b, a)
def propagation_of_sanitize_parameter_no_sql_tito_in_source_trace():
x = _user_controlled()
return propagation_of_sanitize_parameter_no_sql_tito(0, x)
def no_issue_propagation_of_sanitize_parameter_no_sql_tito_in_source_trace():
x = propagation_of_sanitize_parameter_no_sql_tito_in_source_trace()
_sql(x)
def issue_propagation_of_sanitize_parameter_no_sql_tito_in_source_trace():
x = propagation_of_sanitize_parameter_no_sql_tito_in_source_trace()
_rce(x)
def sanitize_parameter_with_user_declared_sink(x):
return
def sanitize_return(x):
_test_sink(x)
return source_with_tito(x)
def sanitize_return_no_user_controlled(x):
if 1 > 2:
return _user_controlled()
elif 2 > 3:
return _cookies()
else:
return x
def sanitize_return_no_sql(x):
return x
def propagation_of_sanitize_return_no_sql(x):
return sanitize_return_no_sql(x)
def propagation_of_sanitize_return_no_sql_in_source_trace():
x = _user_controlled()
y = propagation_of_sanitize_return_no_sql(x)
return y
def no_issue_propagation_of_sanitize_return_no_sql_in_source_trace():
x = propagation_of_sanitize_return_no_sql_in_source_trace()
_sql(x)
def issue_propagation_of_sanitize_return_no_sql_in_source_trace():
x = propagation_of_sanitize_return_no_sql_in_source_trace()
_rce(x)
def sanitize_return_no_cookies():
if 1 > 2:
x = _user_controlled()
else:
x = _cookies()
return x
def sanitize_return_no_user_controlled_cookies():
if 1 > 2:
x = _user_controlled()
else:
x = _cookies()
return x
def sanitize_return_no_rce():
return _user_controlled()
def propagation_of_sanitize_return_no_rce():
return sanitize_return_no_rce()
def no_issue_propagation_of_sanitize_return_no_rce():
x = propagation_of_sanitize_return_no_rce()
_rce(x)
def issue_propagation_of_sanitize_return_no_rce():
x = propagation_of_sanitize_return_no_rce()
_sql(x)
def sanitize_return_with_user_declared_source(x):
return 0
def sanitize_all_parameters(x, y):
_test_sink(x)
_test_sink(y)
return source_with_tito(x) + source_with_tito(y)
def sanitize_all_parameters_all_tito(x, y):
_test_sink(x)
_test_sink(y)
return source_with_tito(x) + source_with_tito(y)
def sanitize_all_parameters_no_user_controlled(x):
_test_sink(x)
return x
def propagation_of_sanitize_all_parameters_no_user_controlled(x):
sanitize_all_parameters_no_user_controlled(x)
def no_issue_propagation_of_sanitize_all_parameters_no_user_controlled():
x = _user_controlled()
propagation_of_sanitize_all_parameters_no_user_controlled(x)
def issue_propagation_of_sanitize_all_parameters_no_user_controlled():
x = _cookies()
propagation_of_sanitize_all_parameters_no_user_controlled(x)
def sanitize_all_parameters_no_sql(x):
if 1 > 2:
_sql(x)
elif 2 > 3:
_rce(x)
else:
return x
def sanitize_all_parameters_no_rce(x):
if 1 > 2:
_sql(x)
elif 2 > 3:
_rce(x)
else:
return x
def sanitize_all_parameters_no_user_controlled_tito(x):
return x
def no_propagation_with_sanitize_all_parameters_no_user_controlled_tito():
a = _user_controlled()
b = sanitize_all_parameters_no_user_controlled_tito(a)
return b
def propagation_of_cookies_with_sanitize_all_parameters_no_user_controlled_tito():
b = _cookies()
tito = sanitize_all_parameters_no_user_controlled_tito(b)
return tito
def propagation_of_sanitize_user_controlled_tito_in_sink_trace(x):
y = sanitize_all_parameters_no_user_controlled_tito(x)
_sql(y)
def sanitize_all_parameters_no_sql_tito(x):
return x
def no_propagation_with_sanitize_all_parameters_no_sql_tito(x):
y = sanitize_all_parameters_no_sql_tito(x)
_sql(y)
def propagation_of_rce_with_sanitize_all_parameters_no_sql_tito(x):
y = sanitize_all_parameters_no_sql_tito(x)
_rce(y)
def propagation_of_sanitize_sql_tito_in_source_trace():
x = _user_controlled()
y = sanitize_all_parameters_no_sql_tito(x)
return y
def no_issue_propagation_of_sanitize_sql_tito_in_source_trace():
x = propagation_of_sanitize_sql_tito_in_source_trace()
_sql(x)
def sanitize_all_parameters_no_cookies_sql_tito(x):
return x
def no_propagation_of_cookies_with_sanitize_all_parameters_no_cookies_sql_tito():
a = _cookies()
b = sanitize_all_parameters_no_cookies_sql_tito(a)
return b
def propagation_of_user_controlled_with_sanitize_all_parameters_no_cookies_sql_tito():
b = _user_controlled()
tito = sanitize_all_parameters_no_cookies_sql_tito(b)
return tito
def no_propagation_of_sql_with_sanitize_all_parameters_no_cookies_sql_tito(x):
y = sanitize_all_parameters_no_cookies_sql_tito(x)
_sql(y)
def propagation_of_rce_with_sanitize_all_parameters_no_cookies_sql_tito(x):
y = sanitize_all_parameters_no_cookies_sql_tito(x)
_rce(y)
def sanitize_all_parameters_with_user_declared_sink(x):
return x
# Frame linking test
def sink_taint_sanitize_a(arg):
arg = sanitize_a_source_tito(arg)
_rce(arg)
def sink_taint_sanitize_a_sanitize_b(arg):
arg = sanitize_b_source_tito(arg)
sink_taint_sanitize_a(arg)
def sink_taint_sanitize_a_sanitize_b_santize_test(arg):
arg = sanitize_test_source_tito(arg)
sink_taint_sanitize_a_sanitize_b(arg)
def sink_taint_sanitize_b(arg):
arg = sanitize_b_source_tito(arg)
_rce(arg)
def sink_taint_sanitize_b_sanitize_a(arg):
arg = sanitize_a_source_tito(arg)
sink_taint_sanitize_b(arg)
def sink_taint_sanitize_b_sanitize_a_santize_test(arg):
arg = sanitize_test_source_tito(arg)
sink_taint_sanitize_b_sanitize_a(arg)
def sanitize_single_argument_tito(x, y):
if 1 > 1:
return x
else:
return y
| C_sanitized_all_sinks |
python | doocs__leetcode | solution/1600-1699/1631.Path With Minimum Effort/Solution2.py | {
"start": 0,
"end": 969
} | class ____:
def minimumEffortPath(self, heights: List[List[int]]) -> int:
def check(h: int) -> bool:
q = deque([(0, 0)])
vis = {(0, 0)}
dirs = (-1, 0, 1, 0, -1)
while q:
for _ in range(len(q)):
i, j = q.popleft()
if i == m - 1 and j == n - 1:
return True
for a, b in pairwise(dirs):
x, y = i + a, j + b
if (
0 <= x < m
and 0 <= y < n
and (x, y) not in vis
and abs(heights[i][j] - heights[x][y]) <= h
):
q.append((x, y))
vis.add((x, y))
return False
m, n = len(heights), len(heights[0])
return bisect_left(range(10**6), True, key=check)
| Solution |
python | numba__numba | numba/tests/npyufunc/test_ufunc.py | {
"start": 813,
"end": 4426
} | class ____(TestCase):
def _test_ufunc_attributes(self, cls, a, b, *args):
"Test ufunc attributes"
vectorizer = cls(add, *args)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.ndim)
self.assertPreciseEqual(ufunc(a, b), a + b, msg=info)
self.assertPreciseEqual(ufunc_reduce(ufunc, a), np.sum(a), msg=info)
self.assertPreciseEqual(ufunc.accumulate(a), np.add.accumulate(a),
msg=info)
self.assertPreciseEqual(ufunc.outer(a, b), np.add.outer(a, b), msg=info)
def _test_broadcasting(self, cls, a, b, c, d):
"Test multiple args"
vectorizer = cls(add_multiple_args)
vectorizer.add(float32(float32, float32, float32, float32))
ufunc = vectorizer.build_ufunc()
info = (cls, a.shape)
self.assertPreciseEqual(ufunc(a, b, c, d), a + b + c + d, msg=info)
def test_ufunc_attributes(self):
for v in vectorizers: # 1D
self._test_ufunc_attributes(v, a[0], b[0])
for v in vectorizers: # 2D
self._test_ufunc_attributes(v, a, b)
for v in vectorizers: # 3D
self._test_ufunc_attributes(v, a[:, np.newaxis, :],
b[np.newaxis, :, :])
def test_broadcasting(self):
for v in vectorizers: # 1D
self._test_broadcasting(v, a[0], b[0], c[0], d[0])
for v in vectorizers: # 2D
self._test_broadcasting(v, a, b, c, d)
for v in vectorizers: # 3D
self._test_broadcasting(v, a[:, np.newaxis, :], b[np.newaxis, :, :],
c[:, np.newaxis, :], d[np.newaxis, :, :])
def test_implicit_broadcasting(self):
for v in vectorizers:
vectorizer = v(add)
vectorizer.add(float32(float32, float32))
ufunc = vectorizer.build_ufunc()
broadcasting_b = b[np.newaxis, :, np.newaxis, np.newaxis, :]
self.assertPreciseEqual(ufunc(a, broadcasting_b),
a + broadcasting_b)
def test_ufunc_exception_on_write_to_readonly(self):
z = np.ones(10)
z.flags.writeable = False # flip write bit
tests = []
expect = "ufunc 'sin' called with an explicit output that is read-only"
tests.append((jit(nopython=True), TypingError, expect))
tests.append((jit(forceobj=True), ValueError,
"output array is read-only"))
for dec, exc, msg in tests:
def test(x):
a = np.ones(x.shape, x.dtype) # do not copy RO attribute from x
np.sin(a, x)
with self.assertRaises(exc) as raises:
dec(test)(z)
self.assertIn(msg, str(raises.exception))
def test_optional_type_handling(self):
# Tests ufunc compilation with Optional type
@njit
def inner(x, y):
if y > 2:
z = None
else:
z = np.ones(4)
return np.add(x, z)
# This causes `z` to be np.ones(4) at runtime, success
self.assertPreciseEqual(inner(np.arange(4), 1),
np.arange(1, 5).astype(np.float64))
with self.assertRaises(TypeError) as raises:
# This causes `z` to be None at runtime, TypeError raised on the
# type cast of the Optional.
inner(np.arange(4), 3)
msg = "expected array(float64, 1d, C), got None"
self.assertIn(msg, str(raises.exception))
| TestUFuncs |
python | Delgan__loguru | loguru/_recattrs.py | {
"start": 1385,
"end": 2481
} | class ____:
"""A class representing a file record with name and path.
Attributes
----------
name : str
The name of the file
path : str
The path to the file
"""
__slots__ = ("name", "path")
def __init__(self, name, path):
"""Initialize a RecordFile instance.
Parameters
----------
name : str
The name of the file
path : str
The path to the file
"""
self.name = name
self.path = path
def __repr__(self):
"""Return string representation of RecordFile.
Returns
-------
str
Formatted string with name and path
"""
return "(name=%r, path=%r)" % (self.name, self.path)
def __format__(self, spec):
"""Format the RecordFile instance.
Parameters
----------
spec : str
Format specification
Returns
-------
str
Formatted name according to specification
"""
return self.name.__format__(spec)
| RecordFile |
python | django__django | tests/queries/tests.py | {
"start": 165108,
"end": 165524
} | class ____(SimpleTestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = (
Item.objects.exclude(note__isnull=False)
.filter(name="something", creator__extra__isnull=True)
.order_by()
)
self.assertEqual(1, str(qs.query).count("INNER JOIN"))
self.assertEqual(0, str(qs.query).count("OUTER JOIN"))
| Ticket18785Tests |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/tags.py | {
"start": 589,
"end": 9771
} | class ____:
"""Helper object that keeps track of when the tag concurrency limits are met."""
_key_limits: dict[str, int]
_key_value_limits: dict[tuple[str, str], int]
_unique_value_limits: dict[str, int]
_key_counts: dict[str, int]
_key_value_counts: dict[tuple[str, str], int]
_unique_value_counts: dict[tuple[str, str], int]
def __init__(
self,
tag_concurrency_limits: Sequence[Mapping[str, Any]],
in_progress_tagged_items: Sequence[Union["DagsterRun", "ExecutionStep"]],
):
check.opt_list_param(tag_concurrency_limits, "tag_concurrency_limits", of_type=dict)
check.list_param(in_progress_tagged_items, "in_progress_tagged_items")
self._key_limits = {}
self._key_value_limits = {}
self._unique_value_limits = {}
for tag_limit in tag_concurrency_limits:
key = tag_limit["key"]
value = tag_limit.get("value")
limit = tag_limit["limit"]
if isinstance(value, str):
self._key_value_limits[(key, value)] = limit
elif not value or not value["applyLimitPerUniqueValue"]:
self._key_limits[key] = limit
else:
self._unique_value_limits[key] = limit
self._key_counts = defaultdict(lambda: 0)
self._key_value_counts = defaultdict(lambda: 0)
self._unique_value_counts = defaultdict(lambda: 0)
# initialize counters based on current in progress item
for item in in_progress_tagged_items:
self.update_counters_with_launched_item(item)
def is_blocked(self, item: Union["DagsterRun", "ExecutionStep"]) -> bool:
"""True if there are in progress item which are blocking this item based on tag limits."""
for key, value in item.tags.items():
if key in self._key_limits and self._key_counts[key] >= self._key_limits[key]:
return True
tag_tuple = (key, value)
if (
tag_tuple in self._key_value_limits
and self._key_value_counts[tag_tuple] >= self._key_value_limits[tag_tuple]
):
return True
if (
key in self._unique_value_limits
and self._unique_value_counts[tag_tuple] >= self._unique_value_limits[key]
):
return True
return False
def update_counters_with_launched_item(
self, item: Union["DagsterRun", "ExecutionStep"]
) -> None:
"""Add a new in progress item to the counters."""
for key, value in item.tags.items():
if key in self._key_limits:
self._key_counts[key] += 1
tag_tuple = (key, value)
if tag_tuple in self._key_value_limits:
self._key_value_counts[tag_tuple] += 1
if key in self._unique_value_limits:
self._unique_value_counts[tag_tuple] += 1
def get_boolean_tag_value(tag_value: Optional[str], default_value: bool = False) -> bool:
if tag_value is None:
return default_value
return get_boolean_string_value(tag_value)
# ########################
# ##### NORMALIZATION
# ########################
# Tag key constraints are inspired by allowed Kubernetes labels:
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
# We allow in some cases for users to specify multi-level namespaces for tags,
# right now we only allow this for the `dagster/kind` namespace, which is how asset kinds are
# encoded under the hood.
VALID_NESTED_NAMESPACES_TAG_KEYS = r"dagster/kind/"
VALID_TAG_KEY_REGEX = re.compile(
r"^([A-Za-z0-9_.-]{1,63}/|" + VALID_NESTED_NAMESPACES_TAG_KEYS + r")?[A-Za-z0-9_.-]{1,63}$"
)
VALID_TAG_KEY_EXPLANATION = (
"Allowed characters: alpha-numeric, '_', '-', '.'. "
"Tag keys can also contain a namespace section, separated by a '/'. Each section "
"must have <= 63 characters."
)
VALID_STRICT_TAG_VALUE_REGEX = re.compile(r"^[A-Za-z0-9_.-]{0,63}$")
def normalize_tags(
tags: Optional[Mapping[str, Any]],
strict: bool = False,
allow_private_system_tags: bool = True,
warning_stacklevel: int = 4,
) -> Mapping[str, str]:
"""Normalizes key-value tags attached to definitions throughout Dagster.
Tag normalization is complicated for backcompat reasons. In the past, tags were permitted to be
arbitrary and potentially large JSON-serializable objects. This is inconsistent with the vision
we have for tags going forward, which is as short string labels used for filtering and grouping
in the UI.
The `strict` flag controls whether to normalize/validate tags according to the new vision or the
old. `strict` should be set whenever we are normalizing a tags parameter that is newly added. It
should not be set if we are normalizing an older tags parameter for which we are maintaining old
behavior.
Args:
strict (bool):
If `strict=True`, we accept a restricted character set and impose length restrictions
(<=63 characters) for string keys and values. Violations of these constraints raise
errors. If `strict=False` then we run the same test but only warn for keys. Values are
permitted to be any JSON-serializable object that is unaffected by JSON round-trip.
Unserializable or round-trip-unequal values raise errors. Values are normalized to the
JSON string representation in the return value.
allow_private_system_tags (bool):
Whether to allow non-whitelisted tags that start with the system tag prefix. This should
be set to False whenever we are dealing with exclusively user-provided tags.
warning_stacklevel (int):
The stacklevel to use for warnings. This should be set to the calling function's
stacklevel.
Returns:
Mapping[str, str]: A dictionary of normalized tags.
"""
normalized_tags: dict[str, str] = {}
invalid_tag_keys = []
for key, value in check.opt_mapping_param(tags, "tags", key_type=str).items():
# Validate the key
if not isinstance(key, str):
raise DagsterInvalidDefinitionError("Tag keys must be strings")
elif (not allow_private_system_tags) and is_private_system_tag_key(key):
raise DagsterInvalidDefinitionError(
f"Attempted to set tag with reserved system prefix: {key}"
)
elif not is_valid_tag_key(key):
invalid_tag_keys.append(key)
# Normalize the value
if not isinstance(value, str):
if strict:
raise DagsterInvalidDefinitionError(
f"Tag values must be strings, got type {type(value)} at key {key}."
)
else:
normalized_tags[key] = _normalize_value(value, key)
else:
if strict and not is_valid_strict_tag_value(value):
raise DagsterInvalidDefinitionError(
f"Invalid tag value: {value}, for key: {key}. Allowed characters: alpha-numeric, '_', '-', '.'. "
"Must have <= 63 characters."
)
normalized_tags[key] = value
# Issue errors (strict=True) or warnings (strict=False) for any invalid tag keys that are too
# long or contain invalid characters.
if invalid_tag_keys:
invalid_tag_keys_sample = invalid_tag_keys[: min(5, len(invalid_tag_keys))]
if strict:
raise DagsterInvalidDefinitionError(
f"Found invalid tag keys: {invalid_tag_keys_sample}. {VALID_TAG_KEY_EXPLANATION}"
)
else:
warnings.warn(
f"Non-compliant tag keys like {invalid_tag_keys_sample} are deprecated. {VALID_TAG_KEY_EXPLANATION}",
category=DeprecationWarning,
stacklevel=warning_stacklevel,
)
return normalized_tags
def _normalize_value(value: Any, key: str) -> str:
error = None
try:
serialized_value = seven.json.dumps(value)
except TypeError:
error = 'Could not JSON encode value "{value}"'
if not error and not seven.json.loads(serialized_value) == value: # pyright: ignore[reportPossiblyUnboundVariable]
error = f'JSON encoding "{serialized_value}" of value "{value}" is not equivalent to original value' # pyright: ignore[reportPossiblyUnboundVariable]
if error:
raise DagsterInvalidDefinitionError(
f'Invalid value for tag "{key}", {error}. Tag values must be strings '
"or meet the constraint that json.loads(json.dumps(value)) == value."
)
return serialized_value # pyright: ignore[reportPossiblyUnboundVariable]
def is_private_system_tag_key(tag) -> bool:
return tag.startswith(SYSTEM_TAG_PREFIX) and tag not in USER_EDITABLE_SYSTEM_TAGS
def is_valid_tag_key(key: str) -> bool:
return bool(VALID_TAG_KEY_REGEX.match(key))
def is_valid_strict_tag_value(key: str) -> bool:
return bool(VALID_STRICT_TAG_VALUE_REGEX.match(key))
| TagConcurrencyLimitsCounter |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_bigquery.py | {
"start": 61771,
"end": 63895
} | class ____(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = "Test description."
expiration_time_patched = 2524608000000
friendly_name_patched = "Test friendly name."
labels_patched = {"label1": "test1", "label2": "test2"}
schema_patched = [
{"name": "id", "type": "STRING", "mode": "REQUIRED"},
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "balance", "type": "FLOAT", "mode": "NULLABLE"},
{"name": "new_field", "type": "STRING", "mode": "NULLABLE"},
]
time_partitioning_patched = {"expirationMs": 10000000}
require_partition_filter_patched = True
view_patched = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
"useLegacySql": False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
| TestBigQueryWithKMS |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 982923,
"end": 983329
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("SponsorshipNewsletter", graphql_name="node")
"""The item at the end of the edge."""
| SponsorshipNewsletterEdge |
python | spyder-ide__spyder | spyder/plugins/layout/container.py | {
"start": 770,
"end": 1996
} | class ____:
DefaultLayout = 'default_layout_action'
MatlabLayout = 'matlab_layout_action'
RStudio = 'rstudio_layout_action'
HorizontalSplit = 'horizontal_split_layout_action'
VerticalSplit = 'vertical_split_layout_action'
SaveLayoutAction = 'save_layout_action'
ShowLayoutPreferencesAction = 'show_layout_preferences_action'
ResetLayout = 'reset_layout_action'
# Needs to have 'Maximize pane' as name to properly register
# the action shortcut
MaximizeCurrentDockwidget = 'Maximize pane'
# Needs to have 'Fullscreen mode' as name to properly register
# the action shortcut
Fullscreen = 'Fullscreen mode'
# Needs to have 'Use next layout' as name to properly register
# the action shortcut
NextLayout = 'Use next layout'
# Needs to have 'Use previous layout' as name to properly register
# the action shortcut
PreviousLayout = 'Use previous layout'
# Needs to have 'Close pane' as name to properly register
# the action shortcut
CloseCurrentDockwidget = 'Close pane'
# Needs to have 'Lock unlock panes' as name to properly register
# the action shortcut
LockDockwidgetsAndToolbars = 'Lock unlock panes'
| LayoutContainerActions |
python | pydantic__pydantic | pydantic/v1/dataclasses.py | {
"start": 8295,
"end": 18172
} | class ____:
__slots__ = '__dataclass__'
def __init__(self, dc_cls: Type['Dataclass']) -> None:
object.__setattr__(self, '__dataclass__', dc_cls)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
with set_validation(self.__dataclass__, True):
return self.__dataclass__(*args, **kwargs)
def __getattr__(self, name: str) -> Any:
return getattr(self.__dataclass__, name)
def __setattr__(self, __name: str, __value: Any) -> None:
return setattr(self.__dataclass__, __name, __value)
def __instancecheck__(self, instance: Any) -> bool:
return isinstance(instance, self.__dataclass__)
def __copy__(self) -> 'DataclassProxy':
return DataclassProxy(copy.copy(self.__dataclass__))
def __deepcopy__(self, memo: Any) -> 'DataclassProxy':
return DataclassProxy(copy.deepcopy(self.__dataclass__, memo))
def _add_pydantic_validation_attributes( # noqa: C901 (ignore complexity)
dc_cls: Type['Dataclass'],
config: Type[BaseConfig],
validate_on_init: bool,
dc_cls_doc: str,
) -> None:
"""
We need to replace the right method. If no `__post_init__` has been set in the stdlib dataclass
it won't even exist (code is generated on the fly by `dataclasses`)
By default, we run validation after `__init__` or `__post_init__` if defined
"""
init = dc_cls.__init__
@wraps(init)
def handle_extra_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None:
if config.extra == Extra.ignore:
init(self, *args, **{k: v for k, v in kwargs.items() if k in self.__dataclass_fields__})
elif config.extra == Extra.allow:
for k, v in kwargs.items():
self.__dict__.setdefault(k, v)
init(self, *args, **{k: v for k, v in kwargs.items() if k in self.__dataclass_fields__})
else:
init(self, *args, **kwargs)
if hasattr(dc_cls, '__post_init__'):
try:
post_init = dc_cls.__post_init__.__wrapped__ # type: ignore[attr-defined]
except AttributeError:
post_init = dc_cls.__post_init__
@wraps(post_init)
def new_post_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None:
if config.post_init_call == 'before_validation':
post_init(self, *args, **kwargs)
if self.__class__.__pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
self.__post_init_post_parse__(*args, **kwargs)
if config.post_init_call == 'after_validation':
post_init(self, *args, **kwargs)
setattr(dc_cls, '__init__', handle_extra_init)
setattr(dc_cls, '__post_init__', new_post_init)
else:
@wraps(init)
def new_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None:
handle_extra_init(self, *args, **kwargs)
if self.__class__.__pydantic_run_validation__:
self.__pydantic_validate_values__()
if hasattr(self, '__post_init_post_parse__'):
# We need to find again the initvars. To do that we use `__dataclass_fields__` instead of
# public method `dataclasses.fields`
# get all initvars and their default values
initvars_and_values: Dict[str, Any] = {}
for i, f in enumerate(self.__class__.__dataclass_fields__.values()):
if f._field_type is dataclasses._FIELD_INITVAR: # type: ignore[attr-defined]
try:
# set arg value by default
initvars_and_values[f.name] = args[i]
except IndexError:
initvars_and_values[f.name] = kwargs.get(f.name, f.default)
self.__post_init_post_parse__(**initvars_and_values)
setattr(dc_cls, '__init__', new_init)
setattr(dc_cls, '__pydantic_run_validation__', ClassAttribute('__pydantic_run_validation__', validate_on_init))
setattr(dc_cls, '__pydantic_initialised__', False)
setattr(dc_cls, '__pydantic_model__', create_pydantic_model_from_dataclass(dc_cls, config, dc_cls_doc))
setattr(dc_cls, '__pydantic_validate_values__', _dataclass_validate_values)
setattr(dc_cls, '__validate__', classmethod(_validate_dataclass))
setattr(dc_cls, '__get_validators__', classmethod(_get_validators))
if dc_cls.__pydantic_model__.__config__.validate_assignment and not dc_cls.__dataclass_params__.frozen:
setattr(dc_cls, '__setattr__', _dataclass_validate_assignment_setattr)
def _get_validators(cls: 'DataclassClassOrWrapper') -> 'CallableGenerator':
yield cls.__validate__
def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
with set_validation(cls, True):
if isinstance(v, cls):
v.__pydantic_validate_values__()
return v
elif isinstance(v, (list, tuple)):
return cls(*v)
elif isinstance(v, dict):
return cls(**v)
else:
raise DataclassTypeError(class_name=cls.__name__)
def create_pydantic_model_from_dataclass(
dc_cls: Type['Dataclass'],
config: Type[Any] = BaseConfig,
dc_cls_doc: Optional[str] = None,
) -> Type['BaseModel']:
field_definitions: Dict[str, Any] = {}
for field in dataclasses.fields(dc_cls):
default: Any = Undefined
default_factory: Optional['NoArgAnyCallable'] = None
field_info: FieldInfo
if field.default is not dataclasses.MISSING:
default = field.default
elif field.default_factory is not dataclasses.MISSING:
default_factory = field.default_factory
else:
default = Required
if isinstance(default, FieldInfo):
field_info = default
dc_cls.__pydantic_has_field_info_default__ = True
else:
field_info = Field(default=default, default_factory=default_factory, **field.metadata)
field_definitions[field.name] = (field.type, field_info)
validators = gather_all_validators(dc_cls)
model: Type['BaseModel'] = create_model(
dc_cls.__name__,
__config__=config,
__module__=dc_cls.__module__,
__validators__=validators,
__cls_kwargs__={'__resolve_forward_refs__': False},
**field_definitions,
)
model.__doc__ = dc_cls_doc if dc_cls_doc is not None else dc_cls.__doc__ or ''
return model
if sys.version_info >= (3, 8):
def _is_field_cached_property(obj: 'Dataclass', k: str) -> bool:
return isinstance(getattr(type(obj), k, None), cached_property)
else:
def _is_field_cached_property(obj: 'Dataclass', k: str) -> bool:
return False
def _dataclass_validate_values(self: 'Dataclass') -> None:
# validation errors can occur if this function is called twice on an already initialised dataclass.
# for example if Extra.forbid is enabled, it would consider __pydantic_initialised__ an invalid extra property
if getattr(self, '__pydantic_initialised__'):
return
if getattr(self, '__pydantic_has_field_info_default__', False):
# We need to remove `FieldInfo` values since they are not valid as input
# It's ok to do that because they are obviously the default values!
input_data = {
k: v
for k, v in self.__dict__.items()
if not (isinstance(v, FieldInfo) or _is_field_cached_property(self, k))
}
else:
input_data = {k: v for k, v in self.__dict__.items() if not _is_field_cached_property(self, k)}
d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__)
if validation_error:
raise validation_error
self.__dict__.update(d)
object.__setattr__(self, '__pydantic_initialised__', True)
def _dataclass_validate_assignment_setattr(self: 'Dataclass', name: str, value: Any) -> None:
if self.__pydantic_initialised__:
d = dict(self.__dict__)
d.pop(name, None)
known_field = self.__pydantic_model__.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
object.__setattr__(self, name, value)
def is_builtin_dataclass(_cls: Type[Any]) -> bool:
"""
Whether a class is a stdlib dataclass
(useful to discriminated a pydantic dataclass that is actually a wrapper around a stdlib dataclass)
we check that
- `_cls` is a dataclass
- `_cls` is not a processed pydantic dataclass (with a basemodel attached)
- `_cls` is not a pydantic dataclass inheriting directly from a stdlib dataclass
e.g.
```
@dataclasses.dataclass
class A:
x: int
@pydantic.dataclasses.dataclass
class B(A):
y: int
```
In this case, when we first check `B`, we make an extra check and look at the annotations ('y'),
which won't be a superset of all the dataclass fields (only the stdlib fields i.e. 'x')
"""
return (
dataclasses.is_dataclass(_cls)
and not hasattr(_cls, '__pydantic_model__')
and set(_cls.__dataclass_fields__).issuperset(set(getattr(_cls, '__annotations__', {})))
)
def make_dataclass_validator(dc_cls: Type['Dataclass'], config: Type[BaseConfig]) -> 'CallableGenerator':
"""
Create a pydantic.dataclass from a builtin dataclass to add type validation
and yield the validators
It retrieves the parameters of the dataclass and forwards them to the newly created dataclass
"""
yield from _get_validators(dataclass(dc_cls, config=config, use_proxy=True))
| DataclassProxy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1599468,
"end": 1599664
} | class ____(sgqlc.types.Union):
"""A record that can be featured on a GitHub Sponsors profile."""
__schema__ = github_schema
__types__ = (Repository, User)
| SponsorsListingFeatureableItem |
python | fastapi__sqlmodel | docs_src/tutorial/offset_and_limit/tutorial004_py310.py | {
"start": 71,
"end": 1614
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age > 32).offset(1).limit(2)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | ray-project__ray | python/ray/serve/_private/test_utils.py | {
"start": 5104,
"end": 5473
} | class ____:
def __init__(self):
self._init_args = ()
self._options = dict()
def options(self, **kwargs):
res = copy(self)
for k, v in kwargs.items():
res._options[k] = v
return res
def remote(self, *args) -> MockActorHandle:
return MockActorHandle(init_args=args, **self._options)
| MockActorClass |
python | PrefectHQ__prefect | src/prefect/settings/models/server/database.py | {
"start": 2945,
"end": 4600
} | class ____(PrefectBaseSettings):
"""
Settings for controlling SQLAlchemy behavior; note that these settings only take effect when
using a PostgreSQL database.
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "database", "sqlalchemy")
)
connect_args: SQLAlchemyConnectArgsSettings = Field(
default_factory=SQLAlchemyConnectArgsSettings,
description="Settings for controlling SQLAlchemy connection behavior",
)
pool_size: int = Field(
default=5,
description="Controls connection pool size of database connection pools from the Prefect backend.",
validation_alias=AliasChoices(
AliasPath("pool_size"),
"prefect_server_database_sqlalchemy_pool_size",
"prefect_sqlalchemy_pool_size",
),
)
pool_recycle: int = Field(
default=3600,
description="This setting causes the pool to recycle connections after the given number of seconds has passed; set it to -1 to avoid recycling entirely.",
)
pool_timeout: Optional[float] = Field(
default=30.0,
description="Number of seconds to wait before giving up on getting a connection from the pool. Defaults to 30 seconds.",
)
max_overflow: int = Field(
default=10,
description="Controls maximum overflow of the connection pool. To prevent overflow, set to -1.",
validation_alias=AliasChoices(
AliasPath("max_overflow"),
"prefect_server_database_sqlalchemy_max_overflow",
"prefect_sqlalchemy_max_overflow",
),
)
| SQLAlchemySettings |
python | getsentry__sentry | src/sentry/monitors/validators.py | {
"start": 3355,
"end": 3720
} | class ____(serializers.Serializer):
environment = serializers.CharField(
max_length=64, required=False, allow_null=True, help_text="Name of the environment"
)
targets = MonitorAlertRuleTargetValidator(
many=True,
help_text="Array of dictionaries with information of the user or team to be notified",
)
| MonitorAlertRuleValidator |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 128648,
"end": 129356
} | class ____(test_util.TensorFlowTestCase):
def testNoDeadlineSet(self):
with ops.Graph().as_default() as g:
get_deadline = test_ops.get_deadline()
with self.session(graph=g) as sess:
run_options = config_pb2.RunOptions()
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_deadline, options=run_options)
def testDeadlineSetTimesOut(self):
with ops.Graph().as_default() as g:
sleep_op = test_ops.sleep_op(10)
with self.session(graph=g) as sess:
run_options = config_pb2.RunOptions(timeout_in_ms=3_000)
with self.assertRaises(errors.DeadlineExceededError):
sess.run(sleep_op, options=run_options)
| DeadlineTest |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 870,
"end": 1131
} | class ____(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
| ImportShadowedByLoopVar |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 68937,
"end": 69508
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm")
self.quant = torch.ao.quantization.QuantStub()
self.hardswish = torch.nn.Hardswish().to(dtype=torch.float)
self.elu = torch.nn.ELU().to(dtype=torch.float)
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.hardswish(x)
x = self.elu(x)
x = self.dequant(x)
return x
| ActivationsTestModel |
python | kamyu104__LeetCode-Solutions | Python/the-score-of-students-solving-math-expression.py | {
"start": 39,
"end": 982
} | class ____(object):
def scoreOfStudents(self, s, answers):
"""
:type s: str
:type answers: List[int]
:rtype: int
"""
MAX_ANS = 1000
n = (len(s)+1)//2
dp = [[set() for _ in xrange(n)] for _ in xrange(n)]
for i in xrange(n):
dp[i][i].add(int(s[i*2]))
for l in xrange(1, n):
for left in xrange(n-l):
right = left+l
for k in xrange(left, right):
if s[2*k+1] == '+':
dp[left][right].update((x+y for x in dp[left][k] for y in dp[k+1][right] if x+y <= MAX_ANS))
else:
dp[left][right].update((x*y for x in dp[left][k] for y in dp[k+1][right] if x*y <= MAX_ANS))
target = eval(s)
return sum(5 if ans == target else 2 if ans in dp[0][-1] else 0 for ans in answers)
# Time: O(n^3 * a^2)
# Space: O(n^2)
| Solution |
python | openai__openai-python | tests/api_resources/test_files.py | {
"start": 575,
"end": 10073
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create(self, client: OpenAI) -> None:
file = client.files.create(
file=b"raw file contents",
purpose="assistants",
)
assert_matches_type(FileObject, file, path=["response"])
@parametrize
def test_method_create_with_all_params(self, client: OpenAI) -> None:
file = client.files.create(
file=b"raw file contents",
purpose="assistants",
expires_after={
"anchor": "created_at",
"seconds": 3600,
},
)
assert_matches_type(FileObject, file, path=["response"])
@parametrize
def test_raw_response_create(self, client: OpenAI) -> None:
response = client.files.with_raw_response.create(
file=b"raw file contents",
purpose="assistants",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileObject, file, path=["response"])
@parametrize
def test_streaming_response_create(self, client: OpenAI) -> None:
with client.files.with_streaming_response.create(
file=b"raw file contents",
purpose="assistants",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileObject, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
file = client.files.retrieve(
"string",
)
assert_matches_type(FileObject, file, path=["response"])
@parametrize
def test_raw_response_retrieve(self, client: OpenAI) -> None:
response = client.files.with_raw_response.retrieve(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileObject, file, path=["response"])
@parametrize
def test_streaming_response_retrieve(self, client: OpenAI) -> None:
with client.files.with_streaming_response.retrieve(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileObject, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.retrieve(
"",
)
@parametrize
def test_method_list(self, client: OpenAI) -> None:
file = client.files.list()
assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
file = client.files.list(
after="after",
limit=0,
order="asc",
purpose="purpose",
)
assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.files.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.files.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(SyncCursorPage[FileObject], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_delete(self, client: OpenAI) -> None:
file = client.files.delete(
"string",
)
assert_matches_type(FileDeleted, file, path=["response"])
@parametrize
def test_raw_response_delete(self, client: OpenAI) -> None:
response = client.files.with_raw_response.delete(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileDeleted, file, path=["response"])
@parametrize
def test_streaming_response_delete(self, client: OpenAI) -> None:
with client.files.with_streaming_response.delete(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileDeleted, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_delete(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.delete(
"",
)
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
file = client.files.content(
"string",
)
assert isinstance(file, _legacy_response.HttpxBinaryResponseContent)
assert file.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = client.files.with_raw_response.content(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, file, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_streaming_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.files.with_streaming_response.content(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(bytes, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_path_params_content(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.content(
"",
)
@parametrize
def test_method_retrieve_content(self, client: OpenAI) -> None:
with pytest.warns(DeprecationWarning):
file = client.files.retrieve_content(
"string",
)
assert_matches_type(str, file, path=["response"])
@parametrize
def test_raw_response_retrieve_content(self, client: OpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = client.files.with_raw_response.retrieve_content(
"string",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(str, file, path=["response"])
@parametrize
def test_streaming_response_retrieve_content(self, client: OpenAI) -> None:
with pytest.warns(DeprecationWarning):
with client.files.with_streaming_response.retrieve_content(
"string",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(str, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_retrieve_content(self, client: OpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
client.files.with_raw_response.retrieve_content(
"",
)
| TestFiles |
python | huggingface__transformers | src/transformers/models/clipseg/configuration_clipseg.py | {
"start": 5381,
"end": 9395
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CLIPSeg
[CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import CLIPSegVisionConfig, CLIPSegVisionModel
>>> # Initializing a CLIPSegVisionConfig with CIDAS/clipseg-rd64 style configuration
>>> configuration = CLIPSegVisionConfig()
>>> # Initializing a CLIPSegVisionModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
>>> model = CLIPSegVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "clipseg_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
| CLIPSegVisionConfig |
python | weaviate__weaviate-python-client | weaviate/rbac/sync.py | {
"start": 160,
"end": 215
} | class ____(_RolesExecutor[ConnectionSync]):
pass
| _Roles |
python | doocs__leetcode | solution/2300-2399/2368.Reachable Nodes With Restrictions/Solution.py | {
"start": 0,
"end": 407
} | class ____:
def reachableNodes(
self, n: int, edges: List[List[int]], restricted: List[int]
) -> int:
def dfs(i: int) -> int:
vis.add(i)
return 1 + sum(j not in vis and dfs(j) for j in g[i])
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
g[b].append(a)
vis = set(restricted)
return dfs(0)
| Solution |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_common.py | {
"start": 7401,
"end": 7547
} | class ____(Marker):
"""A `Marker` value that represents an undefined value encountered during templating."""
__slots__ = ()
| UndefinedMarker |
python | google__jax | tests/shard_map_test.py | {
"start": 178134,
"end": 178876
} | class ____(jtu.JaxTestCase):
# Verify we can lower to a `ManualComputationOp`.
def test_shardy_collective_permute(self):
mesh = jtu.create_mesh((2,), ('x',))
a = jax.device_put(
jnp.arange(8 * 8).reshape((8, 8)),
jax.sharding.NamedSharding(mesh, P('x', None)),
)
@jax.jit
@partial(
shard_map, mesh=mesh, in_specs=(P('x', None),), out_specs=P('x', None)
)
def fwd(a):
axis_size = lax.axis_size('x')
perm = [(j, (j + 1) % axis_size) for j in range(axis_size)]
return lax.ppermute(a, 'x', perm=perm)
self.assertIn('sdy.manual_computation', jax.jit(fwd).lower(a).as_text())
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| SdyIntegrationTest |
python | django__django | tests/update/models.py | {
"start": 557,
"end": 667
} | class ____(models.Model):
a = models.ForeignKey(A, models.CASCADE)
y = models.IntegerField(default=10)
| B |
python | ray-project__ray | rllib/core/models/torch/primitives.py | {
"start": 404,
"end": 8853
} | class ____(nn.Module):
"""A multi-layer perceptron with N dense layers.
All layers (except for an optional additional extra output layer) share the same
activation function, bias setup (use bias or not), and LayerNorm setup
(use layer normalization or not).
If `output_dim` (int) is not None, an additional, extra output dense layer is added,
which might have its own activation function (e.g. "linear"). However, the output
layer does NOT use layer normalization.
"""
def __init__(
self,
*,
input_dim: int,
hidden_layer_dims: List[int],
hidden_layer_activation: Union[str, Callable] = "relu",
hidden_layer_use_bias: bool = True,
hidden_layer_use_layernorm: bool = False,
hidden_layer_weights_initializer: Optional[Union[str, Callable]] = None,
hidden_layer_weights_initializer_config: Optional[Union[str, Callable]] = None,
hidden_layer_bias_initializer: Optional[Union[str, Callable]] = None,
hidden_layer_bias_initializer_config: Optional[Dict] = None,
output_dim: Optional[int] = None,
output_use_bias: bool = True,
output_activation: Union[str, Callable] = "linear",
output_weights_initializer: Optional[Union[str, Callable]] = None,
output_weights_initializer_config: Optional[Dict] = None,
output_bias_initializer: Optional[Union[str, Callable]] = None,
output_bias_initializer_config: Optional[Dict] = None,
):
"""Initialize a TorchMLP object.
Args:
input_dim: The input dimension of the network. Must not be None.
hidden_layer_dims: The sizes of the hidden layers. If an empty list, only a
single layer will be built of size `output_dim`.
hidden_layer_use_layernorm: Whether to insert a LayerNormalization
functionality in between each hidden layer's output and its activation.
hidden_layer_use_bias: Whether to use bias on all dense layers (excluding
the possible separate output layer).
hidden_layer_activation: The activation function to use after each layer
(except for the output). Either a torch.nn.[activation fn] callable or
the name thereof, or an RLlib recognized activation name,
e.g. "ReLU", "relu", "tanh", "SiLU", or "linear".
hidden_layer_weights_initializer: The initializer function or class to use
forweights initialization in the hidden layers. If `None` the default
initializer of the respective dense layer is used. Note, only the
in-place initializers, i.e. ending with an underscore "_" are allowed.
hidden_layer_weights_initializer_config: Configuration to pass into the
initializer defined in `hidden_layer_weights_initializer`.
hidden_layer_bias_initializer: The initializer function or class to use for
bias initialization in the hidden layers. If `None` the default
initializer of the respective dense layer is used. Note, only the
in-place initializers, i.e. ending with an underscore "_" are allowed.
hidden_layer_bias_initializer_config: Configuration to pass into the
initializer defined in `hidden_layer_bias_initializer`.
output_dim: The output dimension of the network. If None, no specific output
layer will be added and the last layer in the stack will have
size=`hidden_layer_dims[-1]`.
output_use_bias: Whether to use bias on the separate output layer,
if any.
output_activation: The activation function to use for the output layer
(if any). Either a torch.nn.[activation fn] callable or
the name thereof, or an RLlib recognized activation name,
e.g. "ReLU", "relu", "tanh", "SiLU", or "linear".
output_layer_weights_initializer: The initializer function or class to use
for weights initialization in the output layers. If `None` the default
initializer of the respective dense layer is used. Note, only the
in-place initializers, i.e. ending with an underscore "_" are allowed.
output_layer_weights_initializer_config: Configuration to pass into the
initializer defined in `output_layer_weights_initializer`.
output_layer_bias_initializer: The initializer function or class to use for
bias initialization in the output layers. If `None` the default
initializer of the respective dense layer is used. Note, only the
in-place initializers, i.e. ending with an underscore "_" are allowed.
output_layer_bias_initializer_config: Configuration to pass into the
initializer defined in `output_layer_bias_initializer`.
"""
super().__init__()
assert input_dim > 0
self.input_dim = input_dim
hidden_activation = get_activation_fn(
hidden_layer_activation, framework="torch"
)
hidden_weights_initializer = get_initializer_fn(
hidden_layer_weights_initializer, framework="torch"
)
hidden_bias_initializer = get_initializer_fn(
hidden_layer_bias_initializer, framework="torch"
)
output_weights_initializer = get_initializer_fn(
output_weights_initializer, framework="torch"
)
output_bias_initializer = get_initializer_fn(
output_bias_initializer, framework="torch"
)
layers = []
dims = (
[self.input_dim]
+ list(hidden_layer_dims)
+ ([output_dim] if output_dim else [])
)
for i in range(0, len(dims) - 1):
# Whether we are already processing the last (special) output layer.
is_output_layer = output_dim is not None and i == len(dims) - 2
layer = nn.Linear(
dims[i],
dims[i + 1],
bias=output_use_bias if is_output_layer else hidden_layer_use_bias,
)
# Initialize layers, if necessary.
if is_output_layer:
# Initialize output layer weigths if necessary.
if output_weights_initializer:
output_weights_initializer(
layer.weight, **output_weights_initializer_config or {}
)
# Initialize output layer bias if necessary.
if output_bias_initializer:
output_bias_initializer(
layer.bias, **output_bias_initializer_config or {}
)
# Must be hidden.
else:
# Initialize hidden layer weights if necessary.
if hidden_layer_weights_initializer:
hidden_weights_initializer(
layer.weight, **hidden_layer_weights_initializer_config or {}
)
# Initialize hidden layer bias if necessary.
if hidden_layer_bias_initializer:
hidden_bias_initializer(
layer.bias, **hidden_layer_bias_initializer_config or {}
)
layers.append(layer)
# We are still in the hidden layer section: Possibly add layernorm and
# hidden activation.
if not is_output_layer:
# Insert a layer normalization in between layer's output and
# the activation.
if hidden_layer_use_layernorm:
# We use an epsilon of 0.001 here to mimick the Tf default behavior.
layers.append(nn.LayerNorm(dims[i + 1], eps=0.001))
# Add the activation function.
if hidden_activation is not None:
layers.append(hidden_activation())
# Add output layer's (if any) activation.
output_activation = get_activation_fn(output_activation, framework="torch")
if output_dim is not None and output_activation is not None:
layers.append(output_activation())
self.mlp = nn.Sequential(*layers)
def forward(self, x):
return self.mlp(x)
| TorchMLP |
python | neetcode-gh__leetcode | python/0075-sort-colors.py | {
"start": 0,
"end": 480
} | class ____:
def sortColors(self, nums: List[int]) -> None:
low = 0
high = len(nums) - 1
mid = 0
while mid <= high:
if nums[mid] == 0:
nums[low], nums[mid] = nums[mid], nums[low]
low += 1
mid += 1
elif nums[mid] == 1:
mid +=1
else:
nums[mid], nums[high] = nums[high], nums[mid]
high -= 1
return nums
| Solution |
python | huggingface__transformers | src/transformers/models/t5gemma/modeling_t5gemma.py | {
"start": 42699,
"end": 47717
} | class ____(T5GemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.out_proj.weight": "model.decoder.embed_tokens.weight"}
_tp_plan = {"lm_head.out_proj": "colwise_rep"}
_pp_plan = {"lm_head.out_proj": (["hidden_states"], ["logits"])}
def __init__(self, config: T5GemmaConfig):
config.is_encoder_decoder = True
super().__init__(config)
self.model = T5GemmaModel(config)
self.vocab_size = config.decoder.vocab_size
self.lm_head = T5GemmaLMHead(config.decoder.hidden_size, self.vocab_size)
self.loss_type = "ForMaskedLM"
self.post_init()
def set_output_embeddings(self, new_embeddings):
self.lm_head.out_proj = new_embeddings
def get_output_embeddings(self):
return self.lm_head.out_proj
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
decoder_outputs: Seq2SeqModelOutput = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = decoder_outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
decoder_config = self.get_decoder().config
if decoder_config.final_logit_softcapping is not None:
logits = logits / decoder_config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * decoder_config.final_logit_softcapping
loss = None
if labels is not None:
# Input has right-shifted so we directly perform masked lm loss
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.decoder_hidden_states,
decoder_attentions=decoder_outputs.decoder_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=decoder_outputs.encoder_last_hidden_state,
encoder_hidden_states=decoder_outputs.encoder_hidden_states,
encoder_attentions=decoder_outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@auto_docstring
| T5GemmaForConditionalGeneration |
python | spyder-ide__spyder | spyder/plugins/appearance/plugin.py | {
"start": 725,
"end": 2164
} | class ____(SpyderPluginV2):
"""
Appearance Plugin.
"""
NAME = "appearance"
# TODO: Fix requires to reflect the desired order in the preferences
REQUIRES = [Plugins.Preferences]
CONTAINER_CLASS = None
CONF_SECTION = NAME
CONF_WIDGET_CLASS = AppearanceConfigPage
CONF_FILE = False
CAN_BE_DISABLED = False
# ---- SpyderPluginV2 API
# -------------------------------------------------------------------------
@staticmethod
def get_name():
return _("Appearance")
@staticmethod
def get_description():
return _("Manage application appearance and themes.")
@classmethod
def get_icon(cls):
return cls.create_icon('eyedropper')
def on_initialize(self):
# NOTES:
# 1. This avoids applying the color scheme twice at startup, which is
# quite resource intensive.
# 2. Notifications for this option are restored when creating the
# config page.
self.disable_conf('ui_theme')
@on_plugin_available(plugin=Plugins.Preferences)
def register_preferences(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Preferences)
def deregister_preferences(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
| Appearance |
python | google__jax | jax/_src/lax/fft.py | {
"start": 1135,
"end": 7259
} | class ____(enum.IntEnum):
"Describes which FFT operation to perform."
FFT = 0
"Forward complex-to-complex FFT."
IFFT = 1
"Inverse complex-to-complex FFT."
RFFT = 2
"Forward real-to-complex FFT."
IRFFT = 3
"Inverse real-to-complex FFT."
def _str_to_fft_type(s: str) -> FftType:
if s in ("fft", "FFT"):
return FftType.FFT
elif s in ("ifft", "IFFT"):
return FftType.IFFT
elif s in ("rfft", "RFFT"):
return FftType.RFFT
elif s in ("irfft", "IRFFT"):
return FftType.IRFFT
else:
raise ValueError(f"Unknown FFT type '{s}'")
@jit(static_argnums=(1, 2))
def fft(x, fft_type: FftType | str, fft_lengths: Sequence[int]):
if isinstance(fft_type, str):
typ = _str_to_fft_type(fft_type)
elif isinstance(fft_type, FftType):
typ = fft_type
else:
raise TypeError(f"Unknown FFT type value '{fft_type}'")
if typ == FftType.RFFT:
if np.iscomplexobj(x):
raise ValueError("only real valued inputs supported for rfft")
x = lax.convert_element_type(x, dtypes.to_inexact_dtype(dtypes.dtype(x)))
else:
x = lax.convert_element_type(x, dtypes.to_complex_dtype(dtypes.dtype(x)))
if len(fft_lengths) == 0:
# XLA FFT doesn't support 0-rank.
return x
fft_lengths = tuple(fft_lengths)
return fft_p.bind(x, fft_type=typ, fft_lengths=fft_lengths)
def _fft_impl(x, fft_type, fft_lengths):
return dispatch.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths)
_complex_dtype = lambda dtype: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
_real_dtype = lambda dtype: np.finfo(dtype).dtype
def fft_abstract_eval(x, fft_type, fft_lengths):
if len(fft_lengths) > x.ndim:
raise ValueError(f"FFT input shape {x.shape} must have at least as many "
f"input dimensions as fft_lengths {fft_lengths}.")
if fft_type == FftType.RFFT:
if x.dtype not in (np.float32, np.float64):
raise ValueError(f"RFFT input must be float32 or float64, got {x.dtype}")
if x.shape[-len(fft_lengths):] != fft_lengths:
raise ValueError(f"RFFT input shape {x.shape} minor dimensions must "
f"be equal to fft_lengths {fft_lengths}")
shape = (x.shape[:-len(fft_lengths)] + fft_lengths[:-1]
+ (fft_lengths[-1] // 2 + 1,))
dtype = _complex_dtype(x.dtype)
elif fft_type == FftType.IRFFT:
if not np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("IRFFT input must be complex64 or complex128, got "
f"{x.dtype}")
if x.shape[-len(fft_lengths):-1] != fft_lengths[:-1]:
raise ValueError(f"IRFFT input shape {x.shape} minor dimensions must "
"be equal to all except the last fft_length, got "
f"{fft_lengths=}")
shape = x.shape[:-len(fft_lengths)] + fft_lengths
dtype = _real_dtype(x.dtype)
else:
if not np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("FFT input must be complex64 or complex128, got "
f"{x.dtype}")
if x.shape[-len(fft_lengths):] != fft_lengths:
raise ValueError(f"FFT input shape {x.shape} minor dimensions must "
f"be equal to fft_lengths {fft_lengths}")
shape = x.shape
dtype = x.dtype
return x.update(shape=shape, dtype=dtype, vma=x.vma)
def _fft_lowering(ctx, x, *, fft_type, fft_lengths):
if not is_constant_shape(fft_lengths):
# TODO: https://github.com/openxla/stablehlo/issues/1366
raise NotImplementedError("Shape polymorphism for FFT with non-constant fft_length is not implemented for TPU and GPU")
return [
hlo.FftOp(x, hlo.FftTypeAttr.get(fft_type.name),
mlir.dense_int_array(fft_lengths)).result
]
def _naive_rfft(x, fft_lengths):
y = fft(x, FftType.FFT, fft_lengths)
n = fft_lengths[-1]
return y[..., : n//2 + 1]
@jit(static_argnums=1)
def _rfft_transpose(t, fft_lengths):
# The transpose of RFFT can't be expressed only in terms of irfft. Instead of
# manually building up larger twiddle matrices (which would increase the
# asymptotic complexity and is also rather complicated), we rely JAX to
# transpose a naive RFFT implementation.
dummy_shape = t.shape[:-len(fft_lengths)] + fft_lengths
dummy_primal = ShapeDtypeStruct(dummy_shape, _real_dtype(t.dtype))
transpose = linear_transpose(
partial(_naive_rfft, fft_lengths=fft_lengths), dummy_primal)
result, = transpose(t)
assert result.dtype == _real_dtype(t.dtype), (result.dtype, t.dtype)
return result
def _irfft_transpose(t, fft_lengths):
# The transpose of IRFFT is the RFFT of the cotangent times a scaling
# factor and a mask. The mask scales the cotangent for the Hermitian
# symmetric components of the RFFT by a factor of two, since these components
# are de-duplicated in the RFFT.
x = fft(t, FftType.RFFT, fft_lengths)
n = x.shape[-1]
is_odd = fft_lengths[-1] % 2
full = partial(lax.full_like, t, dtype=x.dtype)
mask = lax.concatenate(
[full(1.0, shape=(1,)),
full(2.0, shape=(n - 2 + is_odd,)),
full(1.0, shape=(1 - is_odd,))],
dimension=0)
scale = 1 / math.prod(fft_lengths)
out = scale * lax.expand_dims(mask, range(x.ndim - 1)) * x
assert out.dtype == _complex_dtype(t.dtype), (out.dtype, t.dtype)
# Use JAX's convention for complex gradients
# https://github.com/jax-ml/jax/issues/6223#issuecomment-807740707
return lax.conj(out)
def _fft_transpose_rule(t, operand, fft_type, fft_lengths):
if fft_type == FftType.RFFT:
result = _rfft_transpose(t, fft_lengths)
elif fft_type == FftType.IRFFT:
result = _irfft_transpose(t, fft_lengths)
else:
result = fft(t, fft_type, fft_lengths)
return result,
def _fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return fft(x, fft_type, fft_lengths), 0
fft_p = Primitive('fft')
fft_p.def_impl(_fft_impl)
fft_p.def_abstract_eval(fft_abstract_eval)
mlir.register_lowering(fft_p, _fft_lowering)
ad.deflinear2(fft_p, _fft_transpose_rule)
batching.primitive_batchers[fft_p] = _fft_batching_rule
| FftType |
python | numba__numba | numba/tests/enum_usecases.py | {
"start": 714,
"end": 803
} | class ____(IntEnum):
dummy = 2
not_found = 404
internal_error = 500
| RequestError |
python | tornadoweb__tornado | tornado/log.py | {
"start": 2462,
"end": 12547
} | class ____(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` or `tornado.options.parse_config_file`
(unless ``--logging=none`` is used).
Color support on Windows versions that do not support ANSI color codes is
enabled by use of the colorama__ library. Applications that wish to use
this must first initialize colorama with a call to ``colorama.init``.
See the colorama documentation for details.
__ https://pypi.python.org/pypi/colorama
.. versionchanged:: 4.5
Added support for ``colorama``. Changed the constructor
signature to be compatible with `logging.config.dictConfig`.
"""
DEFAULT_FORMAT = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s" # noqa: E501
DEFAULT_DATE_FORMAT = "%y%m%d %H:%M:%S"
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
logging.CRITICAL: 5, # Magenta
}
def __init__(
self,
fmt: str = DEFAULT_FORMAT,
datefmt: str = DEFAULT_DATE_FORMAT,
style: str = "%",
color: bool = True,
colors: Dict[int, int] = DEFAULT_COLORS,
) -> None:
r"""
:arg bool color: Enables color support.
:arg str fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg str datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {} # type: Dict[int, str]
if color and _stderr_supports_color():
if curses is not None:
fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or b""
for levelno, code in colors.items():
# Convert the terminal control characters from
# bytes to unicode strings for easier use with the
# logging module.
self._colors[levelno] = unicode_type(
curses.tparm(fg_color, code), "ascii"
)
normal = curses.tigetstr("sgr0")
if normal is not None:
self._normal = unicode_type(normal, "ascii")
else:
self._normal = ""
else:
# If curses is not present (currently we'll only get here for
# colorama on windows), assume hard-coded ANSI color codes.
for levelno, code in colors.items():
self._colors[levelno] = "\033[2;3%dm" % code
self._normal = "\033[0m"
else:
self._normal = ""
def format(self, record: Any) -> str:
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings wherever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = f"Bad message ({e!r}): {record.__dict__!r}"
record.asctime = self.formatTime(record, cast(str, self.datefmt))
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ""
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split("\n"))
formatted = "\n".join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(
options: Any = None, logger: Optional[logging.Logger] = None
) -> None:
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
import tornado.options
options = tornado.options.options
if options.logging is None or options.logging.lower() == "none":
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
rotate_mode = options.log_rotate_mode
if rotate_mode == "size":
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups,
encoding="utf-8",
) # type: logging.Handler
elif rotate_mode == "time":
channel = logging.handlers.TimedRotatingFileHandler(
filename=options.log_file_prefix,
when=options.log_rotate_when,
interval=options.log_rotate_interval,
backupCount=options.log_file_num_backups,
encoding="utf-8",
)
else:
error_message = (
"The value of log_rotate_mode option should be "
+ '"size" or "time", not "%s".' % rotate_mode
)
raise ValueError(error_message)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if options.log_to_stderr or (options.log_to_stderr is None and not logger.handlers):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options: Any = None) -> None:
"""Add logging-related flags to ``options``.
These options are present automatically on the default options instance;
this method is only necessary if you have created your own `.OptionParser`.
.. versionadded:: 4.2
This function existed in prior versions but was broken and undocumented until 4.2.
"""
if options is None:
# late import to prevent cycle
import tornado.options
options = tornado.options.options
options.define(
"logging",
default="info",
help=(
"Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."
),
metavar="debug|info|warning|error|none",
)
options.define(
"log_to_stderr",
type=bool,
default=None,
help=(
"Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."
),
)
options.define(
"log_file_prefix",
type=str,
default=None,
metavar="PATH",
help=(
"Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"
),
)
options.define(
"log_file_max_size",
type=int,
default=100 * 1000 * 1000,
help="max size of log files before rollover",
)
options.define(
"log_file_num_backups", type=int, default=10, help="number of log files to keep"
)
options.define(
"log_rotate_when",
type=str,
default="midnight",
help=(
"specify the type of TimedRotatingFileHandler interval "
"other options:('S', 'M', 'H', 'D', 'W0'-'W6')"
),
)
options.define(
"log_rotate_interval",
type=int,
default=1,
help="The interval value of timed rotating",
)
options.define(
"log_rotate_mode",
type=str,
default="size",
help="The mode of rotating files(time or size)",
)
options.add_parse_callback(lambda: enable_pretty_logging(options))
| LogFormatter |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/task_instances.py | {
"start": 1474,
"end": 2817
} | class ____(BaseModel):
"""TaskInstance serializer for responses."""
id: str
task_id: str
dag_id: str
run_id: str = Field(alias="dag_run_id")
map_index: int
logical_date: datetime | None
run_after: datetime
start_date: datetime | None
end_date: datetime | None
duration: float | None
state: TaskInstanceState | None
try_number: int
max_tries: int
task_display_name: str
dag_display_name: str = Field(validation_alias=AliasPath("dag_run", "dag_model", "dag_display_name"))
hostname: str | None
unixname: str | None
pool: str
pool_slots: int
queue: str | None
priority_weight: int | None
operator: str | None
operator_name: str | None
queued_dttm: datetime | None = Field(alias="queued_when")
scheduled_dttm: datetime | None = Field(alias="scheduled_when")
pid: int | None
executor: str | None
executor_config: Annotated[str, BeforeValidator(str)]
note: str | None
rendered_map_index: str | None
rendered_fields: dict = Field(
validation_alias=AliasPath("rendered_task_instance_fields", "rendered_fields"),
default_factory=dict,
)
trigger: TriggerResponse | None
queued_by_job: JobResponse | None = Field(alias="triggerer_job")
dag_version: DagVersionResponse | None
| TaskInstanceResponse |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 98898,
"end": 99262
} | class ____(Block):
winner: str = "kendrick"
secret_str: SecretStr
secret_str_manual: PydanticSecret[str]
secret_bytes: SecretBytes
secret_bytes_manual: PydanticSecret[bytes]
secret_int: PydanticSecret[int]
nested_model: NestedFunModel
normal_dictionary: Dict[str, Union[str, Dict[str, Any]]]
secret_dict: SecretDict
| FunSecretModel |
python | sphinx-doc__sphinx | sphinx/builders/latex/transforms.py | {
"start": 6693,
"end": 11361
} | class ____(SphinxPostTransform):
"""Convert footnote definitions and references to appropriate form to LaTeX.
* Replace footnotes on restricted zone (e.g. headings) by footnotemark node.
In addition, append a footnotetext node after the zone.
Before::
<section>
<title>
headings having footnotes
<footnote_reference>
1
<footnote ids="id1">
<label>
1
<paragraph>
footnote body
After::
<section>
<title>
headings having footnotes
<footnotemark refid="id1">
1
<footnotetext ids="id1">
<label>
1
<paragraph>
footnote body
* Integrate footnote definitions and footnote references to single footnote node
Before::
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah ...
<footnote ids="id1">
<label>
1
<paragraph>
footnote body
After::
blah blah blah
<footnote ids="id1">
<label>
1
<paragraph>
footnote body
blah blah blah ...
* Replace second and subsequent footnote references which refers same footnote definition
by footnotemark node. Additionally, the footnote definition node is marked as
"referred".
Before::
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah
<footnote_reference refid="id1">
1
blah blah blah ...
<footnote ids="id1">
<label>
1
<paragraph>
footnote body
After::
blah blah blah
<footnote ids="id1" referred=True>
<label>
1
<paragraph>
footnote body
blah blah blah
<footnotemark refid="id1">
1
blah blah blah ...
* Remove unreferenced footnotes
Before::
<footnote ids="id1">
<label>
1
<paragraph>
Unreferenced footnote!
After::
<!-- nothing! -->
* Move footnotes in a title of table or thead to head of tbody
Before::
<table>
<title>
title having footnote_reference
<footnote_reference refid="id1">
1
<tgroup>
<thead>
<row>
<entry>
header having footnote_reference
<footnote_reference refid="id2">
2
<tbody>
<row>
...
<footnote ids="id1">
<label>
1
<paragraph>
footnote body
<footnote ids="id2">
<label>
2
<paragraph>
footnote body
After::
<table>
<title>
title having footnote_reference
<footnotemark refid="id1">
1
<tgroup>
<thead>
<row>
<entry>
header having footnote_reference
<footnotemark refid="id2">
2
<tbody>
<footnotetext ids="id1">
<label>
1
<paragraph>
footnote body
<footnotetext ids="id2">
<label>
2
<paragraph>
footnote body
<row>
...
"""
default_priority = 600
formats = ('latex',)
def run(self, **kwargs: Any) -> None:
footnotes = list(self.document.findall(nodes.footnote))
for node in footnotes:
node.parent.remove(node)
visitor = LaTeXFootnoteVisitor(self.document, footnotes)
self.document.walkabout(visitor)
| LaTeXFootnoteTransform |
python | google__jax | jax/_src/core.py | {
"start": 71600,
"end": 78075
} | class ____(Exception):
pass
# TODO(dougalm): Cast scalar, numpy arrays, etc to jax arrays so that values
# passed to primitives are always have avals, etc i.e. they are canonical.
def canonicalize_value(val):
try:
aval = get_aval(val)
except TypeError:
return val
if not isinstance(aval, ShapedArray):
return val
if aval.sharding.mesh.empty:
return val
cur_mesh = mesh_lib.get_abstract_mesh()
if cur_mesh == aval.sharding.mesh:
return val
# TODO(yashkatariya): Casting to Explicit is not yet allowed. Maybe we need
# cast_and_slice_p for it since shape might change?
# Atleast 1 mesh axis should be Manual and all other axes should be
# Manual or Auto to allow casting.
if cur_mesh._any_axis_manual and cur_mesh._are_all_axes_auto_or_manual:
if aval.sharding.mesh.are_all_axes_auto:
from jax._src.pjit import reshard # pytype: disable=import-error
return reshard(val, NamedSharding(cur_mesh, P(*[None] * aval.ndim)))
elif aval.sharding.mesh._any_axis_explicit:
raise NotImplementedError(
"Closing over inputs to shard_map where the input is sharded on"
" `Explicit` axes is not implemented. As a workaround, please pass"
" those inputs as an argument to shard_map. Got input with shape"
f" {aval.str_short(True, True)}")
return val
def get_cur_mesh_sharding(spec=None):
spec = P() if spec is None else spec
return NamedSharding(mesh_lib.get_abstract_mesh(), spec)
def _make_lengths_same(sharding, ndim):
pspec = sharding.spec
if ndim > len(pspec):
return sharding.update(spec=pspec._normalized_spec_for_aval(ndim))
if ndim < len(pspec):
assert all(s is None for s in pspec[ndim:]), (ndim, pspec)
return sharding.update(spec=P(*pspec[:ndim], unreduced=pspec.unreduced,
reduced=pspec.reduced))
assert False, "unreachable"
def modify_spec_for_auto_manual(spec, mesh) -> P:
new_spec = [] # type: ignore
# PartitionSpec can only mention mesh axes that are Explicit.
for s in spec:
if s is None:
new_spec.append(s) # type: ignore
elif isinstance(s, tuple):
new_spec.append(tuple(
p for p in s if mesh._name_to_type[p] == AxisType.Explicit))
else:
new_spec.append(s if mesh._name_to_type[s] == AxisType.Explicit else None) # type: ignore
# Unreduced and reduced can mention mesh axes that are Explicit and Manual.
new_unreduced = {u for u in spec.unreduced
if mesh._name_to_type[u] != AxisType.Auto}
new_reduced = {u for u in spec.reduced
if mesh._name_to_type[u] != AxisType.Auto}
return P(*new_spec, unreduced=new_unreduced, reduced=new_reduced)
def remove_size_one_mesh_axis(spec, mesh) -> P:
new_spec = [] # type: ignore
for s in spec:
if s is None:
new_spec.append(s) # type: ignore
elif isinstance(s, tuple):
new_spec.append(tuple(i for i in s if mesh.shape[i] != 1))
else:
new_spec.append(None if mesh.shape[s] == 1 else s) # type: ignore
return P(*new_spec, unreduced=spec.unreduced, reduced=spec.reduced)
def _maybe_modify_sharding(sharding, ndim):
if len(sharding.spec) == 0 or all(s is None for s in sharding.spec):
out = sharding
elif sharding.mesh.are_all_axes_explicit:
out = sharding
else:
out = sharding.update(spec=modify_spec_for_auto_manual(
sharding.spec, sharding.mesh))
if config.remove_size_one_mesh_axis_from_type.value:
out = out.update(spec=remove_size_one_mesh_axis(out.spec, out.mesh))
if len(out.spec) != ndim:
out = _make_lengths_same(out, ndim)
return out
def _check_divisibility(sharding, shape):
mesh = sharding.mesh
for dim, (spec, sh) in enumerate(zip(sharding.spec, shape)):
if spec is None:
continue
spec = spec if isinstance(spec, tuple) else (spec,)
size = math.prod(mesh.shape[s] for s in spec)
_, remainder = divmod(sh, size)
if remainder != 0:
raise ValueError(
f"Sharding spec {spec} implies that array axis {dim} is partitioned"
f" {size} times, but does not evenly divide the dimension size {sh}."
f" Got shape: {shape} and sharding {sharding}")
@cache(max_size=4096,
trace_context_in_key=lambda: config.remove_size_one_mesh_axis_from_type.value)
def get_sharding(sharding, shape):
"""Modifies and checks the sharding.
Some modifications/checks include:
* Making the length of specs the same as ndim
* If a mesh axis is mentioned in pspec is Auto/Manual, replace it with None
* Checking for len(spec)-ndim match
* Checking if the mesh is an AbstractMesh.
"""
ndim = len(shape)
if sharding is None:
return NamedSharding(mesh_lib.empty_abstract_mesh, P(*[None] * ndim))
out_s = _maybe_modify_sharding(sharding, ndim)
if len(out_s.spec) != ndim:
raise ValueError(
"Length of sharding.spec must be equal to aval's ndim. Got"
f" sharding.spec {out_s.spec}, aval.ndim {ndim} and sharding {out_s}")
if not isinstance(out_s.mesh, mesh_lib.AbstractMesh):
raise ValueError("Mesh of an aval must be an AbstractMesh. "
f"Got {out_s.mesh} of type {type(out_s.mesh)}")
_check_divisibility(out_s, shape)
assert out_s.memory_kind is None
return out_s
@cache(max_size=4096,
trace_context_in_key=lambda: config.remove_size_one_mesh_axis_from_type.value)
def get_vma(vma, sharding):
mesh = sharding.mesh
spec = sharding.spec
if mesh.empty:
assert not vma, vma
return vma
axis_env = get_axis_env()
for i in vma:
if axis_env.axis_exists(i) and i not in mesh._name_to_type:
continue
if mesh._name_to_type[i] != AxisType.Manual:
raise ValueError(
"Axes mentioned in `vma` field of ShapedArray should"
f" be of type `Manual`. Got axis: {i} of type {mesh._name_to_type[i]}")
if config.remove_size_one_mesh_axis_from_type.value:
vma = frozenset(i for i in vma if mesh.shape[i] != 1)
if vma & spec.unreduced:
raise ValueError(
f"vma and unreduced cannot have common mesh axes. Got {vma=} and"
f" unreduced={spec.unreduced}")
if vma & spec.reduced:
raise ValueError(
f"vma and reduced cannot have common mesh axes. Got {vma=} and"
f" reduced={spec.reduced}")
assert isinstance(vma, frozenset)
return vma
def get_memory_space(memory_space):
assert isinstance(memory_space, MemorySpace)
return memory_space
| ShardingTypeError |
python | mkdocs__mkdocs | mkdocs/livereload/__init__.py | {
"start": 1706,
"end": 2257
} | class ____(logging.LoggerAdapter):
def process(self, msg: str, kwargs: dict) -> tuple[str, dict]: # type: ignore[override]
return time.strftime("[%H:%M:%S] ") + msg, kwargs
log = _LoggerAdapter(logging.getLogger(__name__), {})
def _normalize_mount_path(mount_path: str) -> str:
"""Ensure the mount path starts and ends with a slash."""
return ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
def _serve_url(host: str, port: int, path: str) -> str:
return f"http://{host}:{port}{_normalize_mount_path(path)}"
| _LoggerAdapter |
python | getsentry__sentry | tests/sentry/api/endpoints/issues/test_organization_derive_code_mappings.py | {
"start": 641,
"end": 11731
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.organization = self.create_organization("federal-bureau-of-control")
self.organization.flags.allow_joinleave = False
self.organization.save()
self.team = self.create_team(organization=self.organization, name="night-springs")
self.create_team_membership(team=self.team, user=self.user)
self.project = self.create_project(organization=self.organization, teams=[self.team])
self.url = reverse(
"sentry-api-0-organization-derive-code-mappings",
args=[self.organization.slug],
)
self.repo = self.create_repo(
name="getsentry/sentry",
provider="integrations:github",
integration_id=self.integration.id,
project=self.project,
)
@patch("sentry.integrations.github.integration.GitHubIntegration.get_trees_for_org")
def test_get_single_match(self, mock_get_trees_for_org: Any) -> None:
config_data = {
"stacktraceFilename": "stack/root/file.py",
}
expected_matches = [
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "",
"source_path": "",
}
]
mock_get_trees_for_org.return_value = {
"getsentry/codemap": RepoTree(
RepoAndBranch(
name="getsentry/codemap",
branch="master",
),
files=["stack/root/file.py"],
)
}
response = self.client.get(self.url, data=config_data, format="json")
assert mock_get_trees_for_org.call_count == 1
assert response.status_code == 200, response.content
assert response.data == expected_matches
@patch("sentry.integrations.github.integration.GitHubIntegration.get_trees_for_org")
def test_get_frame_with_module(self, mock_get_trees_for_org: Any) -> None:
config_data = {
"absPath": "Billing.kt",
"module": "com.waffleware.billing.Billing$1",
"platform": "java",
"stacktraceFilename": "Billing.kt",
}
expected_matches = [
{
"filename": "app/src/main/java/com/waffleware/billing/Billing.kt",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "com/waffleware/billing/",
"source_path": "app/src/main/java/com/waffleware/billing/",
}
]
mock_get_trees_for_org.return_value = {
"getsentry/codemap": RepoTree(
RepoAndBranch(
name="getsentry/codemap",
branch="master",
),
files=["app/src/main/java/com/waffleware/billing/Billing.kt"],
)
}
response = self.client.get(self.url, data=config_data, format="json")
assert response.status_code == 200, response.content
assert response.data == expected_matches
@patch("sentry.integrations.github.integration.GitHubIntegration.get_trees_for_org")
def test_get_start_with_backslash(self, mock_get_trees_for_org: Any) -> None:
file = "stack/root/file.py"
config_data = {"stacktraceFilename": f"/{file}"}
expected_matches = [
{
"filename": file,
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "/",
"source_path": "",
}
]
mock_get_trees_for_org.return_value = {
"getsentry/codemap": RepoTree(
RepoAndBranch(
name="getsentry/codemap",
branch="master",
),
files=["stack/root/file.py"],
)
}
response = self.client.get(self.url, data=config_data, format="json")
assert mock_get_trees_for_org.call_count == 1
assert response.status_code == 200, response.content
assert response.data == expected_matches
@patch("sentry.integrations.github.integration.GitHubIntegration.get_trees_for_org")
def test_get_multiple_matches(self, mock_get_trees_for_org: Any) -> None:
config_data = {
"stacktraceFilename": "stack/root/file.py",
}
expected_matches = [
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/codemap",
"repo_branch": "master",
"stacktrace_root": "",
"source_path": "",
},
{
"filename": "stack/root/file.py",
"repo_name": "getsentry/foobar",
"repo_branch": "master",
"stacktrace_root": "",
"source_path": "",
},
]
mock_get_trees_for_org.return_value = {
"getsentry/codemap": RepoTree(
RepoAndBranch(
name="getsentry/codemap",
branch="master",
),
files=["stack/root/file.py"],
),
"getsentry/foobar": RepoTree(
RepoAndBranch(
name="getsentry/foobar",
branch="master",
),
files=["stack/root/file.py"],
),
}
response = self.client.get(self.url, data=config_data, format="json")
assert mock_get_trees_for_org.call_count == 1
assert response.status_code == 200, response.content
assert response.data == expected_matches
def test_get_no_installation(self) -> None:
config_data = {
"projectId": self.project.id,
"stacktraceFilename": "stack/root/file.py",
}
with (
assume_test_silo_mode(SiloMode.CONTROL),
unguarded_write(using=router.db_for_write(Integration)),
):
Integration.objects.all().delete()
response = self.client.get(self.url, data=config_data, format="json")
assert response.status_code == 404, response.content
@patch("sentry.integrations.github.integration.GitHubIntegration.get_trees_for_org")
def test_get_unsupported_frame_info(self, mock_get_trees_for_org: Any) -> None:
config_data = {
"stacktraceFilename": "top_level_file.py",
}
mock_get_trees_for_org.return_value = {
"getsentry/codemap": RepoTree(
RepoAndBranch(
name="getsentry/codemap",
branch="master",
),
files=["top_level_file.py"],
)
}
response = self.client.get(self.url, data=config_data, format="json")
assert response.status_code == 400, response.content
def test_non_project_member_permissions(self) -> None:
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "getsentry/codemap",
}
non_member = self.create_user()
non_member_om = self.create_member(organization=self.organization, user=non_member)
self.login_as(user=non_member)
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == status.HTTP_403_FORBIDDEN
self.create_team_membership(team=self.team, member=non_member_om)
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == status.HTTP_201_CREATED
def test_post_simple(self) -> None:
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "getsentry/codemap",
}
response = self.client.post(self.url, data=config_data, format="json")
repo = Repository.objects.get(name="getsentry/codemap")
assert response.status_code == 201, response.content
assert response.data == {
"automaticallyGenerated": False,
"id": str(response.data["id"]),
"projectId": str(self.project.id),
"projectSlug": self.project.slug,
"repoId": str(repo.id),
"repoName": "getsentry/codemap",
"provider": {
"aspects": {},
"features": [
"codeowners",
"commits",
"issue-basic",
"issue-sync",
"stacktrace-link",
],
"name": "GitHub",
"canDisable": False,
"key": "github",
"slug": "github",
"canAdd": True,
},
"integrationId": str(self.integration.id),
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
}
def test_post_no_installation(self) -> None:
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "name",
}
with (
assume_test_silo_mode(SiloMode.CONTROL),
unguarded_write(using=router.db_for_write(Integration)),
):
Integration.objects.all().delete()
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == 404, response.content
def test_post_existing_code_mapping(self) -> None:
RepositoryProjectPathConfig.objects.create(
project=self.project,
stack_root="/stack/root",
source_root="/source/root/wrong",
default_branch="master",
repository=self.repo,
organization_integration_id=self.organization_integration.id,
organization_id=self.organization_integration.organization_id,
integration_id=self.organization_integration.integration_id,
)
config_data = {
"projectId": self.project.id,
"stackRoot": "/stack/root",
"sourceRoot": "/source/root",
"defaultBranch": "master",
"repoName": "name",
}
response = self.client.post(self.url, data=config_data, format="json")
assert response.status_code == 201, response.content
new_code_mapping = RepositoryProjectPathConfig.objects.get(
project=self.project, stack_root="/stack/root"
)
assert new_code_mapping.source_root == "/source/root"
| OrganizationDeriveCodeMappingsTest |
python | ipython__ipython | IPython/extensions/autoreload.py | {
"start": 20981,
"end": 31724
} | class ____(Magics):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self._reloader = ModuleReloader(self.shell)
self._reloader.check_all = False
self._reloader.autoload_obj = False
self.loaded_modules = set(sys.modules)
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"mode",
type=str,
default="now",
nargs="?",
help="""blank or 'now' - Reload all modules (except those excluded by %%aimport)
automatically now.
'0' or 'off' - Disable automatic reloading.
'1' or 'explicit' - Reload only modules imported with %%aimport every
time before executing the Python code typed.
'2' or 'all' - Reload all modules (except those excluded by %%aimport)
every time before executing the Python code typed.
'3' or 'complete' - Same as 2/all, but also adds any new
objects in the module.
By default, a newer autoreload algorithm that diffs the module's source code
with the previous version and only reloads changed parts is applied for modes
2 and below. To use the original algorithm, add the `-` suffix to the mode,
e.g. '%autoreload 2-', or pass in --full.
""",
)
@magic_arguments.argument(
"-p",
"--print",
action="store_true",
default=False,
help="Show autoreload activity using `print` statements",
)
@magic_arguments.argument(
"-l",
"--log",
action="store_true",
default=False,
help="Show autoreload activity using the logger",
)
@magic_arguments.argument(
"--hide-errors",
action="store_true",
default=False,
help="Hide autoreload errors",
)
@magic_arguments.argument(
"--full",
action="store_true",
default=False,
help="Don't ever use new diffing algorithm",
)
def autoreload(self, line=""):
r"""%autoreload => Reload modules automatically
%autoreload or %autoreload now
Reload all modules (except those excluded by %aimport) automatically
now.
%autoreload 0 or %autoreload off
Disable automatic reloading.
%autoreload 1 or %autoreload explicit
Reload only modules imported with %aimport every time before executing
the Python code typed.
%autoreload 2 or %autoreload all
Reload all modules (except those excluded by %aimport) every time
before executing the Python code typed.
%autoreload 3 or %autoreload complete
Same as 2/all, but also but also adds any new objects in the module. See
unit test at IPython/extensions/tests/test_autoreload.py::test_autoload_newly_added_objects
The optional arguments --print and --log control display of autoreload activity. The default
is to act silently; --print (or -p) will print out the names of modules that are being
reloaded, and --log (or -l) outputs them to the log at INFO level.
The optional argument --hide-errors hides any errors that can happen when trying to
reload code.
Reloading Python modules in a reliable way is in general
difficult, and unexpected things may occur. %autoreload tries to
work around common pitfalls by replacing function code objects and
parts of classes previously in the module with new versions. This
makes the following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be
autoreloaded.
"""
args = magic_arguments.parse_argstring(self.autoreload, line)
mode = args.mode.lower()
enable_deduperreload = not args.full
if mode.endswith("-"):
enable_deduperreload = False
mode = mode[:-1]
self._reloader.deduper_reloader.enabled = enable_deduperreload
p = print
logger = logging.getLogger("autoreload")
l = logger.info
def pl(msg):
p(msg)
l(msg)
if args.print is False and args.log is False:
self._reloader._report = lambda msg: None
elif args.print is True:
if args.log is True:
self._reloader._report = pl
else:
self._reloader._report = p
elif args.log is True:
self._reloader._report = l
self._reloader.hide_errors = args.hide_errors
if mode == "" or mode == "now":
self._reloader.check(True)
elif mode == "0" or mode == "off":
self._reloader.enabled = False
elif mode == "1" or mode == "explicit":
self._reloader.enabled = True
self._reloader.check_all = False
self._reloader.autoload_obj = False
elif mode == "2" or mode == "all":
self._reloader.enabled = True
self._reloader.check_all = True
self._reloader.autoload_obj = False
elif mode == "3" or mode == "complete":
self._reloader.enabled = True
self._reloader.check_all = True
self._reloader.autoload_obj = True
else:
raise ValueError(f'Unrecognized autoreload mode "{mode}".')
@line_magic
def aimport(self, parameter_s="", stream=None):
"""%aimport => Import modules for automatic reloading.
%aimport
List modules to automatically import and not to import.
%aimport foo
Import module 'foo' and mark it to be autoreloaded for %autoreload explicit
%aimport foo, bar
Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload explicit
%aimport -foo, bar
Mark module 'foo' to not be autoreloaded for %autoreload explicit, all, or complete, and 'bar'
to be autoreloaded for mode explicit.
"""
modname = parameter_s
if not modname:
to_reload = sorted(self._reloader.modules.keys())
to_skip = sorted(self._reloader.skip_modules.keys())
if stream is None:
stream = sys.stdout
if self._reloader.check_all:
stream.write("Modules to reload:\nall-except-skipped\n")
else:
stream.write("Modules to reload:\n%s\n" % " ".join(to_reload))
stream.write("\nModules to skip:\n%s\n" % " ".join(to_skip))
else:
for _module in [_.strip() for _ in modname.split(",")]:
if _module.startswith("-"):
_module = _module[1:].strip()
self._reloader.mark_module_skipped(_module)
else:
top_module, top_name = self._reloader.aimport_module(_module)
# Inject module to user namespace
self.shell.push({top_name: top_module})
def pre_run_cell(self, info):
# Store the execution info for later use in post_execute_hook
self._last_execution_info = info
if self._reloader.enabled:
try:
self._reloader.check()
except:
pass
def post_execute_hook(self):
"""Cache the modification times of any modules imported in this execution and track imports"""
# Track imports from the recently executed code if autoreload 3 is enabled
if self._reloader.enabled and self._reloader.autoload_obj:
# Use the stored execution info
if (
hasattr(self, "_last_execution_info")
and self._last_execution_info
and self._last_execution_info.transformed_cell
):
self._track_imports_from_code(
self._last_execution_info.transformed_cell
)
newly_loaded_modules = set(sys.modules) - self.loaded_modules
for modname in newly_loaded_modules:
_, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
if pymtime is not None:
self._reloader.modules_mtimes[modname] = pymtime
self.loaded_modules.update(newly_loaded_modules)
def _track_imports_from_code(self, code: str) -> None:
"""Track import statements from executed code"""
try:
tree = ast.parse(code)
for node in ast.walk(tree):
# Handle "from X import Y" style imports
if isinstance(node, ast.ImportFrom):
mod = node.module
# Skip relative imports that don't have a module name
if mod is None:
continue
for name in node.names:
# name.name is going to be actual name that we want to import from module
# name.asname is Z in the case of from X import Y as Z
# we should update Z in the shell in this situation, so track it too.
original_name = name.name
resolved_name = name.asname if name.asname else name.name
# Since the code executed successfully, we know this import is valid
self._reloader.import_from_tracker.add_import(
mod, original_name, resolved_name
)
except (SyntaxError, ValueError):
# If there's a syntax error, skip import tracking
# (though this shouldn't happen since the code already executed successfully)
pass
def load_ipython_extension(ip):
"""Load the extension in IPython."""
auto_reload = AutoreloadMagics(ip)
ip.register_magics(auto_reload)
ip.events.register("pre_run_cell", auto_reload.pre_run_cell)
ip.events.register("post_execute", auto_reload.post_execute_hook)
| AutoreloadMagics |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1693,
"end": 1725
} | class ____[*Ts = P1]: ...
| ClassTs7 |
python | falconry__falcon | falcon/routing/compiled.py | {
"start": 44725,
"end": 45130
} | class ____(_CxChild):
def __init__(self, param_name: str, field_value_name: str) -> None:
self._param_name = param_name
self._field_value_name = field_value_name
def src(self, indentation: int) -> str:
return "{0}params['{1}'] = {2}".format(
_TAB_STR * indentation,
self._param_name,
self._field_value_name,
)
| _CxSetParamFromValue |
python | cherrypy__cherrypy | cherrypy/lib/sessions.py | {
"start": 14449,
"end": 16221
} | class ____(Session):
"""A memory-baked session store implementation."""
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = _id not in self.cache and self.locks[_id].acquire(
blocking=False,
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
| RamSession |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/test_utils.py | {
"start": 225,
"end": 471
} | class ____:
"""
Fixture for testing list_to_scope()/scope_to_list() with objects other
than regular strings.
"""
def __init__(self, scope):
self.scope = scope
def __str__(self):
return self.scope
| ScopeObject |
python | ansible__ansible | lib/ansible/galaxy/dependency_resolution/reporters.py | {
"start": 1347,
"end": 3389
} | class ____(BaseReporter):
"""A dependency reporter for Ansible Collections.
This is a proxy class allowing us to abstract away importing resolvelib
outside of the `ansible.galaxy.dependency_resolution` Python package.
"""
def __init__(self) -> None:
"""Initialize the collection rejection counter."""
super().__init__()
self.reject_count_by_fqcn: defaultdict[str, int] = defaultdict(int)
def _maybe_log_rejection_message(self, candidate: Candidate) -> bool:
"""Print out rejection messages on pre-defined limit hits."""
# Inspired by https://github.com/pypa/pip/commit/9731131
self.reject_count_by_fqcn[candidate.fqcn] += 1
collection_rejections_count = self.reject_count_by_fqcn[candidate.fqcn]
if collection_rejections_count not in _MESSAGES_AT_REJECT_COUNT:
return False
collection_rejection_message = _MESSAGES_AT_REJECT_COUNT[
collection_rejections_count
]
display.display(collection_rejection_message.format(fqcn=candidate.fqcn))
return True
def rejecting_candidate( # resolvelib >= 0.9.0
self,
criterion: Criterion[Candidate, Requirement],
candidate: Candidate,
) -> None:
"""Augment rejection messages with conflict details."""
if not self._maybe_log_rejection_message(candidate):
return
msg = 'Will try a different candidate, due to conflict:'
for req_info in criterion.information:
req, parent = req_info.requirement, req_info.parent
msg += '\n '
if parent:
msg += f'{parent !s} depends on '
else:
msg += 'The user requested '
msg += str(req)
display.v(msg)
def backtracking(self, candidate: Candidate) -> None: # resolvelib < 0.9.0
"""Print out rejection messages on pre-defined limit hits."""
self._maybe_log_rejection_message(candidate)
| CollectionDependencyReporter |
python | PrefectHQ__prefect | tests/test_context.py | {
"start": 5603,
"end": 12292
} | class ____:
@pytest.fixture(autouse=True)
def temporary_profiles_path(self, tmp_path):
path = tmp_path / "profiles.toml"
with temporary_settings(
updates={PREFECT_HOME: tmp_path, PREFECT_PROFILES_PATH: path}
):
yield path
def test_settings_context_variable(self):
with SettingsContext(
profile=Profile(name="test", settings={}),
settings=prefect.settings.get_current_settings(),
) as context:
assert get_settings_context() is context
assert context.profile == Profile(name="test", settings={})
assert context.settings == prefect.settings.get_current_settings()
def test_get_settings_context_missing(self, monkeypatch):
# It's kind of hard to actually exit the default profile, so we patch `get`
monkeypatch.setattr(
"prefect.context.SettingsContext.get", MagicMock(return_value=None)
)
with pytest.raises(MissingContextError, match="No settings context found"):
get_settings_context()
def test_settings_context_uses_settings(self, temporary_profiles_path):
temporary_profiles_path.write_text(
textwrap.dedent(
"""
[profiles.foo]
PREFECT_API_URL="test"
"""
)
)
with use_profile("foo") as ctx:
assert prefect.settings.PREFECT_API_URL.value() == "test"
assert ctx.settings == prefect.settings.get_current_settings()
assert ctx.profile == Profile(
name="foo",
settings={PREFECT_API_URL: "test"},
source=temporary_profiles_path,
)
def test_root_settings_context_creates_home(self, tmpdir, monkeypatch):
monkeypatch.setenv("PREFECT_HOME", str(tmpdir / "testing"))
with root_settings_context() as ctx:
assert ctx.settings.home == tmpdir / "testing"
assert ctx.settings.home.exists()
def test_settings_context_does_not_setup_logging(self, monkeypatch):
setup_logging = MagicMock()
monkeypatch.setattr(
"prefect.logging.configuration.setup_logging", setup_logging
)
with use_profile("ephemeral"):
setup_logging.assert_not_called()
def test_settings_context_nesting(self, temporary_profiles_path):
temporary_profiles_path.write_text(
textwrap.dedent(
"""
[profiles.foo]
PREFECT_API_URL="foo"
[profiles.bar]
PREFECT_API_URL="bar"
"""
)
)
with use_profile("foo") as foo_context:
with use_profile("bar") as bar_context:
assert bar_context.settings == prefect.settings.get_current_settings()
assert (
prefect.settings.PREFECT_API_URL.value_from(bar_context.settings)
== "bar"
)
assert bar_context.profile == Profile(
name="bar",
settings={PREFECT_API_URL: "bar"},
source=temporary_profiles_path,
)
assert foo_context.settings == prefect.settings.get_current_settings()
assert (
prefect.settings.PREFECT_API_URL.value_from(foo_context.settings)
== "foo"
)
assert foo_context.profile == Profile(
name="foo",
settings={PREFECT_API_URL: "foo"},
source=temporary_profiles_path,
)
@pytest.fixture
def foo_profile(self, temporary_profiles_path):
profile = Profile(
name="foo",
settings={PREFECT_API_KEY: "xxx"},
source=temporary_profiles_path,
)
save_profiles(ProfilesCollection(profiles=[profile]))
return profile
def test_root_settings_context_default(self):
result = root_settings_context()
assert result is not None
assert isinstance(result, SettingsContext)
@pytest.mark.parametrize(
"cli_command",
[
# No profile name provided
["prefect", "--profile"],
# Not called via `prefect` CLI
["foobar", "--profile", "test"],
],
)
def test_root_settings_context_default_if_cli_args_do_not_match_format(
self, monkeypatch, cli_command
):
monkeypatch.setattr("sys.argv", cli_command)
result = root_settings_context()
assert result is not None
def test_root_settings_context_respects_cli(self, monkeypatch, foo_profile):
use_profile = MagicMock()
monkeypatch.setattr("prefect.context.use_profile", use_profile)
monkeypatch.setattr("sys.argv", ["/prefect", "--profile", "foo"])
result = root_settings_context()
assert result is not None
def test_root_settings_context_respects_environment_variable(
self, temporary_profiles_path, monkeypatch
):
temporary_profiles_path.write_text(
textwrap.dedent(
"""
[profiles.foo]
PREFECT_API_URL="foo"
"""
)
)
monkeypatch.setenv("PREFECT_PROFILE", "foo")
settings_context = root_settings_context()
assert settings_context.profile.name == "foo"
def test_root_settings_context_missing_environment_variables(
self, monkeypatch, capsys
):
use_profile = MagicMock()
monkeypatch.setattr("prefect.context.use_profile", use_profile)
monkeypatch.setenv("PREFECT_PROFILE", "bar")
root_settings_context()
_, err = capsys.readouterr()
assert (
"profile 'bar' set by environment variable not found. The default profile"
" will be used instead." in err
)
@pytest.mark.usefixtures("remove_existing_settings_context")
def test_root_settings_context_accessible_in_new_thread(self):
from concurrent.futures.thread import ThreadPoolExecutor
with ThreadPoolExecutor() as executor:
result = executor.submit(get_settings_context).result()
assert result == GLOBAL_SETTINGS_CONTEXT
@pytest.mark.usefixtures("remove_existing_settings_context")
def test_root_settings_context_accessible_in_new_loop(self):
from anyio.from_thread import start_blocking_portal
with start_blocking_portal() as portal:
result = portal.call(get_settings_context)
assert result == GLOBAL_SETTINGS_CONTEXT
| TestSettingsContext |
python | mlflow__mlflow | dev/clint/src/clint/rules/empty_notebook_cell.py | {
"start": 36,
"end": 167
} | class ____(Rule):
def _message(self) -> str:
return "Empty notebook cell. Remove it or add some content."
| EmptyNotebookCell |
python | pytorch__pytorch | torch/utils/_sympy/functions.py | {
"start": 20921,
"end": 21140
} | class ____(sympy.Function):
is_integer = True
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError("negative shift count")
return FloorDiv(base, 2**shift)
| RShift |
python | fluentpython__example-code | attic/attributes/hasattr.py | {
"start": 295,
"end": 869
} | class ____:
def __init__(self):
self.gadget = True
gizmo = Gizmo()
test_keys = 'hasattr', 'getattr', 'tryget'
def test():
for test_key in test_keys:
test_name = 'test_' + test_key
test = globals()[test_name]
setup = 'from __main__ import gizmo'
t_present = min(timeit.repeat(test, setup=setup))
del gizmo.gadget
t_absent = min(timeit.repeat(test, setup=setup))
gizmo.gadget = True
print('{:7} {:.3f} {:.3f}'.format(test_key, t_present, t_absent))
if __name__ == '__main__':
test()
| Gizmo |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-llm-rails/llama_index/embeddings/llm_rails/base.py | {
"start": 214,
"end": 3755
} | class ____(BaseEmbedding):
"""
LLMRails embedding models.
This class provides an interface to generate embeddings using a model deployed
in an LLMRails cluster. It requires a model_id of the model deployed in the cluster and api key you can obtain
from https://console.llmrails.com/api-keys.
"""
model_id: str
api_key: str
session: requests.Session
@classmethod
def class_name(self) -> str:
return "LLMRailsEmbedding"
def __init__(
self,
api_key: str,
model_id: str = "embedding-english-v1", # or embedding-multi-v1
**kwargs: Any,
):
retry = Retry(
total=3,
connect=3,
read=2,
allowed_methods=["POST"],
backoff_factor=2,
status_forcelist=[502, 503, 504],
)
session = requests.Session()
session.mount("https://api.llmrails.com", HTTPAdapter(max_retries=retry))
session.headers = {"X-API-KEY": api_key}
super().__init__(model_id=model_id, api_key=api_key, session=session, **kwargs)
def _get_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
response = self.session.post(
"https://api.llmrails.com/v1/embeddings",
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except requests.exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
async def _aget_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
import httpx
except ImportError:
raise ImportError(
"The httpx library is required to use the async version of "
"this function. Install it with `pip install httpx`."
)
try:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": self.api_key},
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except httpx._exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
def _get_text_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_query_embedding(self, query: str) -> List[float]:
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
async def _aget_text_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
LLMRailsEmbeddings = LLMRailsEmbedding
| LLMRailsEmbedding |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 75057,
"end": 80852
} | class ____(Structure):
pass # opaque handle
c_nvmlEventSet_t = POINTER(struct_c_nvmlEventSet_t)
nvmlEventTypeSingleBitEccError = 0x0000000000000001
nvmlEventTypeDoubleBitEccError = 0x0000000000000002
nvmlEventTypePState = 0x0000000000000004
nvmlEventTypeXidCriticalError = 0x0000000000000008
nvmlEventTypeClock = 0x0000000000000010
nvmlEventTypePowerSourceChange = 0x0000000000000080
nvmlEventMigConfigChange = 0x0000000000000100
nvmlEventTypeSingleBitEccErrorStorm = 0x0000000000000200
nvmlEventTypeDramRetirementEvent = 0x0000000000000400
nvmlEventTypeDramRetirementFailure = 0x0000000000000800
nvmlEventTypeNonFatalPoisonError = 0x0000000000001000
nvmlEventTypeFatalPoisonError = 0x0000000000002000
nvmlEventTypeGpuUnavailableError = 0x0000000000004000
nvmlEventTypeGpuRecoveryAction = 0x0000000000008000
nvmlEventTypeNone = 0x0000000000000000
nvmlEventTypeAll = (
nvmlEventTypeNone
| nvmlEventTypeSingleBitEccError
| nvmlEventTypeDoubleBitEccError
| nvmlEventTypePState
| nvmlEventTypeClock
| nvmlEventTypePowerSourceChange
| nvmlEventTypeXidCriticalError
| nvmlEventMigConfigChange
| nvmlEventTypeSingleBitEccErrorStorm
| nvmlEventTypeDramRetirementEvent
| nvmlEventTypeDramRetirementFailure
| nvmlEventTypeNonFatalPoisonError
| nvmlEventTypeFatalPoisonError
| nvmlEventTypeGpuUnavailableError
| nvmlEventTypeGpuRecoveryAction
)
## Clock Event Reasons defines
nvmlClocksEventReasonGpuIdle = 0x0000000000000001
nvmlClocksEventReasonApplicationsClocksSetting = 0x0000000000000002
nvmlClocksEventReasonUserDefinedClocks = nvmlClocksEventReasonApplicationsClocksSetting # deprecated, use nvmlClocksEventReasonApplicationsClocksSetting
nvmlClocksEventReasonSwPowerCap = 0x0000000000000004
nvmlClocksEventReasonHwSlowdown = 0x0000000000000008
nvmlClocksEventReasonSyncBoost = 0x0000000000000010
nvmlClocksEventReasonSwThermalSlowdown = 0x0000000000000020
nvmlClocksEventReasonHwThermalSlowdown = 0x0000000000000040
nvmlClocksEventReasonHwPowerBrakeSlowdown = 0x0000000000000080
nvmlClocksEventReasonDisplayClockSetting = 0x0000000000000100
nvmlClocksEventReasonNone = 0x0000000000000000
nvmlClocksEventReasonAll = (
nvmlClocksEventReasonNone |
nvmlClocksEventReasonGpuIdle |
nvmlClocksEventReasonApplicationsClocksSetting |
nvmlClocksEventReasonSwPowerCap |
nvmlClocksEventReasonHwSlowdown |
nvmlClocksEventReasonSyncBoost |
nvmlClocksEventReasonSwThermalSlowdown |
nvmlClocksEventReasonHwThermalSlowdown |
nvmlClocksEventReasonHwPowerBrakeSlowdown |
nvmlClocksEventReasonDisplayClockSetting
)
## Following have been deprecated
nvmlClocksThrottleReasonGpuIdle = 0x0000000000000001
nvmlClocksThrottleReasonApplicationsClocksSetting = 0x0000000000000002
nvmlClocksThrottleReasonUserDefinedClocks = nvmlClocksThrottleReasonApplicationsClocksSetting # deprecated, use nvmlClocksThrottleReasonApplicationsClocksSetting
nvmlClocksThrottleReasonSwPowerCap = 0x0000000000000004
nvmlClocksThrottleReasonHwSlowdown = 0x0000000000000008
nvmlClocksThrottleReasonSyncBoost = 0x0000000000000010
nvmlClocksThrottleReasonSwThermalSlowdown = 0x0000000000000020
nvmlClocksThrottleReasonHwThermalSlowdown = 0x0000000000000040
nvmlClocksThrottleReasonHwPowerBrakeSlowdown = 0x0000000000000080
nvmlClocksThrottleReasonDisplayClockSetting = 0x0000000000000100
nvmlClocksThrottleReasonNone = 0x0000000000000000
nvmlClocksThrottleReasonAll = (
nvmlClocksThrottleReasonNone |
nvmlClocksThrottleReasonGpuIdle |
nvmlClocksThrottleReasonApplicationsClocksSetting |
nvmlClocksThrottleReasonSwPowerCap |
nvmlClocksThrottleReasonHwSlowdown |
nvmlClocksThrottleReasonSyncBoost |
nvmlClocksThrottleReasonSwThermalSlowdown |
nvmlClocksThrottleReasonHwThermalSlowdown |
nvmlClocksThrottleReasonHwPowerBrakeSlowdown |
nvmlClocksThrottleReasonDisplayClockSetting
)
| struct_c_nvmlEventSet_t |
python | readthedocs__readthedocs.org | readthedocs/search/api/v3/queryparser.py | {
"start": 74,
"end": 220
} | class ____:
def __init__(self, *, name, value, type):
self.name = name
self.value = value
self.type = type
| ArgumentToken |
python | numpy__numpy | numpy/ma/tests/test_subclassing.py | {
"start": 4548,
"end": 5453
} | class ____(NDArrayOperatorsMixin):
"""
Wrapping a MaskedArray rather than subclassing to test that
ufunc deferrals are commutative.
See: https://github.com/numpy/numpy/issues/15200)
"""
__slots__ = ('_array', 'attrs')
__array_priority__ = 20
def __init__(self, array, **attrs):
self._array = array
self.attrs = attrs
def __repr__(self):
return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)"
def __array__(self, dtype=None, copy=None):
return np.asarray(self._array)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if method == '__call__':
inputs = [arg._array if isinstance(arg, self.__class__) else arg
for arg in inputs]
return self.__class__(ufunc(*inputs, **kwargs), **self.attrs)
else:
return NotImplemented
| WrappedArray |
python | modin-project__modin | modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py | {
"start": 1263,
"end": 9267
} | class ____(PandasDataframeAxisPartition):
"""
The class implements the interface in ``PandasDataframeAxisPartition``.
Parameters
----------
list_of_partitions : Union[list, PandasOnRayDataframePartition]
List of ``PandasOnRayDataframePartition`` and
``PandasOnRayDataframeVirtualPartition`` objects, or a single
``PandasOnRayDataframePartition``.
get_ip : bool, default: False
Whether to get node IP addresses to conforming partitions or not.
full_axis : bool, default: True
Whether or not the virtual partition encompasses the whole axis.
call_queue : list, optional
A list of tuples (callable, args, kwargs) that contains deferred calls.
length : ray.ObjectRef or int, optional
Length, or reference to length, of wrapped ``pandas.DataFrame``.
width : ray.ObjectRef or int, optional
Width, or reference to width, of wrapped ``pandas.DataFrame``.
"""
_PARTITIONS_METADATA_LEN = 3 # (length, width, ip)
partition_type = PandasOnRayDataframePartition
axis = None
# these variables are intentionally initialized at runtime (see #6023)
_DEPLOY_AXIS_FUNC = None
_DEPLOY_SPLIT_FUNC = None
_DRAIN_FUNC = None
@classmethod
def _get_deploy_axis_func(cls): # noqa: GL08
if cls._DEPLOY_AXIS_FUNC is None:
cls._DEPLOY_AXIS_FUNC = RayWrapper.put(
PandasDataframeAxisPartition.deploy_axis_func
)
return cls._DEPLOY_AXIS_FUNC
@classmethod
def _get_deploy_split_func(cls): # noqa: GL08
if cls._DEPLOY_SPLIT_FUNC is None:
cls._DEPLOY_SPLIT_FUNC = RayWrapper.put(
PandasDataframeAxisPartition.deploy_splitting_func
)
return cls._DEPLOY_SPLIT_FUNC
@classmethod
def _get_drain_func(cls): # noqa: GL08
if cls._DRAIN_FUNC is None:
cls._DRAIN_FUNC = RayWrapper.put(PandasDataframeAxisPartition.drain)
return cls._DRAIN_FUNC
@property
def list_of_ips(self):
"""
Get the IPs holding the physical objects composing this partition.
Returns
-------
List
A list of IPs as ``ray.ObjectRef`` or str.
"""
# Defer draining call queue until we get the ip address
result = [None] * len(self.list_of_block_partitions)
for idx, partition in enumerate(self.list_of_block_partitions):
partition.drain_call_queue()
result[idx] = partition.ip(materialize=False)
return result
@classmethod
@_inherit_docstrings(PandasDataframeAxisPartition.deploy_splitting_func)
def deploy_splitting_func(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
*partitions,
extract_metadata=False,
):
return _deploy_ray_func.options(
num_returns=(
num_splits * (1 + cls._PARTITIONS_METADATA_LEN)
if extract_metadata
else num_splits
),
resources=RayTaskCustomResources.get(),
).remote(
cls._get_deploy_split_func(),
*f_args,
num_splits,
*partitions,
axis=axis,
f_to_deploy=func,
f_len_args=len(f_args),
f_kwargs=f_kwargs,
extract_metadata=extract_metadata,
)
@classmethod
def deploy_axis_func(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
maintain_partitioning,
*partitions,
min_block_size,
lengths=None,
manual_partition=False,
max_retries=None,
):
"""
Deploy a function along a full axis.
Parameters
----------
axis : {0, 1}
The axis to perform the function along.
func : callable
The function to perform.
f_args : list or tuple
Positional arguments to pass to ``func``.
f_kwargs : dict
Keyword arguments to pass to ``func``.
num_splits : int
The number of splits to return (see ``split_result_of_axis_func_pandas``).
maintain_partitioning : bool
If True, keep the old partitioning if possible.
If False, create a new partition layout.
*partitions : iterable
All partitions that make up the full axis (row or column).
min_block_size : int
Minimum number of rows/columns in a single split.
lengths : list, optional
The list of lengths to shuffle the object.
manual_partition : bool, default: False
If True, partition the result with `lengths`.
max_retries : int, default: None
The max number of times to retry the func.
Returns
-------
list
A list of ``ray.ObjectRef``-s.
"""
return _deploy_ray_func.options(
num_returns=(num_splits if lengths is None else len(lengths))
* (1 + cls._PARTITIONS_METADATA_LEN),
**({"max_retries": max_retries} if max_retries is not None else {}),
resources=RayTaskCustomResources.get(),
).remote(
cls._get_deploy_axis_func(),
*f_args,
num_splits,
maintain_partitioning,
*partitions,
axis=axis,
f_to_deploy=func,
f_len_args=len(f_args),
f_kwargs=f_kwargs,
manual_partition=manual_partition,
min_block_size=min_block_size,
lengths=lengths,
return_generator=True,
)
@classmethod
def deploy_func_between_two_axis_partitions(
cls,
axis,
func,
f_args,
f_kwargs,
num_splits,
len_of_left,
other_shape,
*partitions,
min_block_size,
):
"""
Deploy a function along a full axis between two data sets.
Parameters
----------
axis : {0, 1}
The axis to perform the function along.
func : callable
The function to perform.
f_args : list or tuple
Positional arguments to pass to ``func``.
f_kwargs : dict
Keyword arguments to pass to ``func``.
num_splits : int
The number of splits to return (see ``split_result_of_axis_func_pandas``).
len_of_left : int
The number of values in `partitions` that belong to the left data set.
other_shape : np.ndarray
The shape of right frame in terms of partitions, i.e.
(other_shape[i-1], other_shape[i]) will indicate slice to restore i-1 axis partition.
*partitions : iterable
All partitions that make up the full axis (row or column) for both data sets.
min_block_size : int
Minimum number of rows/columns in a single split.
Returns
-------
list
A list of ``ray.ObjectRef``-s.
"""
return _deploy_ray_func.options(
num_returns=num_splits * (1 + cls._PARTITIONS_METADATA_LEN),
resources=RayTaskCustomResources.get(),
).remote(
PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions,
*f_args,
num_splits,
len_of_left,
other_shape,
*partitions,
axis=axis,
f_to_deploy=func,
f_len_args=len(f_args),
f_kwargs=f_kwargs,
min_block_size=min_block_size,
return_generator=True,
)
def wait(self):
"""Wait completing computations on the object wrapped by the partition."""
self.drain_call_queue()
futures = self.list_of_blocks
RayWrapper.wait(futures)
@_inherit_docstrings(PandasOnRayDataframeVirtualPartition)
| PandasOnRayDataframeVirtualPartition |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 7782,
"end": 8189
} | class ____(IntegrationBase, unittest.TestCase):
package = 'tests.pkgs.static_assetspec_nulbyte'
def test_nulbyte_chroot(self):
super_w_null = '..\x00/'
self.testapp.get(f'/{super_w_null}', status=404)
def test_nulbyte_chroot_assetspec_override(self):
super_w_null = '..\x00/'
self.testapp.get(f'/sub/{super_w_null}', status=404)
| TestStaticAppUsingAssetSpecNulByte |
python | getsentry__sentry | tests/sentry/issues/test_status_change_consumer.py | {
"start": 9262,
"end": 15897
} | class ____(IssueOccurrenceTestBase):
@django_db_all
def setUp(self) -> None:
super().setUp()
message = get_test_message(self.project.id)
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence = result[0]
assert occurrence is not None
self.occurrence = occurrence
self.group = Group.objects.get(grouphash__hash=self.occurrence.fingerprint[0])
self.fingerprint = ["touch-id"]
def test_bulk_get_single_project(self) -> None:
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[(self.project.id, self.occurrence.fingerprint)]
)
assert len(groups_by_fingerprint) == 1
group = groups_by_fingerprint[(self.project.id, tuple(self.occurrence.fingerprint))]
assert group.id == self.group.id
def test_bulk_get_multiple_projects(self) -> None:
# set up second project and occurrence
project2 = self.create_project(organization=self.organization)
message = get_test_message(project2.id, fingerprint="new-fingerprint")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence2 = result[0]
assert occurrence2 is not None
group2 = Group.objects.get(grouphash__hash=occurrence2.fingerprint[0])
# get groups by fingerprint
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[
(self.project.id, self.occurrence.fingerprint),
(project2.id, occurrence2.fingerprint),
]
)
assert len(groups_by_fingerprint) == 2
group1 = groups_by_fingerprint[(self.project.id, tuple(self.occurrence.fingerprint))]
assert group1.id == self.group.id
group2 = groups_by_fingerprint[(project2.id, tuple(occurrence2.fingerprint))]
assert group2.id == group2.id
@patch("sentry.issues.status_change_consumer.metrics.incr")
def test_bulk_get_missing_hash(self, mock_metrics_incr: MagicMock) -> None:
# set up second project and occurrence
project2 = self.create_project(organization=self.organization)
message = get_test_message(project2.id, fingerprint="new-fingerprint")
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence2 = result[0]
assert occurrence2 is not None
assert Group.objects.filter(grouphash__hash=occurrence2.fingerprint[0]).exists()
# get groups by fingerprint
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[
(self.project.id, self.occurrence.fingerprint),
(project2.id, self.occurrence.fingerprint), # this one is missing
]
)
assert len(groups_by_fingerprint) == 1
group = groups_by_fingerprint[(self.project.id, tuple(self.occurrence.fingerprint))]
assert group.id == self.group.id
mock_metrics_incr.assert_called_with("occurrence_ingest.grouphash.not_found", amount=1)
def test_bulk_get_same_fingerprint(self) -> None:
# Set up second project and occurrence with the same
# fingerprint as the occurrence from the first project.
project2 = self.create_project(organization=self.organization)
message = get_test_message(project2.id)
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
occurrence2 = result[0]
assert occurrence2 is not None
group2 = Group.objects.get(grouphash__hash=occurrence2.fingerprint[0], project=project2)
assert occurrence2.fingerprint[0] == self.occurrence.fingerprint[0]
# get groups by fingerprint
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[
(self.project.id, self.occurrence.fingerprint),
(project2.id, self.occurrence.fingerprint),
]
)
assert len(groups_by_fingerprint) == 2
group1 = groups_by_fingerprint[(self.project.id, tuple(self.occurrence.fingerprint))]
assert group1.id == self.group.id
group2 = groups_by_fingerprint[(project2.id, tuple(self.occurrence.fingerprint))]
assert group2.id == group2.id
assert group1.id != group2.id
def test_bulk_get_single_project_multiple_hash(self) -> None:
message = get_test_message(self.project.id, fingerprint=["new-fingerprint"])
with self.feature("organizations:profile-file-io-main-thread-ingest"):
result = _process_message(message)
assert result is not None
other_occurrence = result[0]
assert other_occurrence is not None
other_group = Group.objects.get(grouphash__hash=other_occurrence.fingerprint[0])
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[(self.project.id, self.occurrence.fingerprint)]
)
assert groups_by_fingerprint == {
(self.project.id, tuple(self.occurrence.fingerprint)): self.group
}
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[(self.project.id, other_occurrence.fingerprint)]
)
assert groups_by_fingerprint == {
(self.project.id, tuple(other_occurrence.fingerprint)): other_group
}
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[
(
self.project.id,
tuple([*self.occurrence.fingerprint, *other_occurrence.fingerprint]),
)
]
)
assert groups_by_fingerprint == {
(
self.project.id,
tuple([*self.occurrence.fingerprint, *other_occurrence.fingerprint]),
): self.group
}
groups_by_fingerprint = bulk_get_groups_from_fingerprints(
[
(
self.project.id,
tuple([*other_occurrence.fingerprint, *self.occurrence.fingerprint]),
)
]
)
assert groups_by_fingerprint == {
(
self.project.id,
tuple([*other_occurrence.fingerprint, *self.occurrence.fingerprint]),
): other_group
}
| StatusChangeBulkGetGroupsFromFingerprintsTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/html.py | {
"start": 12179,
"end": 15766
} | class ____(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
.. versionadded:: 1.4
"""
name = 'Scaml'
aliases = ['scaml']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[\w:-]+', Name.Class, 'tag'),
(r'\#[\w:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
default('plain'),
],
'content': [
include('css'),
(r'%[\w:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[\w:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'\w+', Name.Variable, '#pop'),
(r'@\w+', Name.Variable.Instance, '#pop'),
(r'\$\w+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
| ScamlLexer |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 152211,
"end": 153280
} | class ____(Response):
"""
Response of frames.get_count endpoint.
:param total: Total count of frames for the entire query.
:type total: int
"""
_service = "frames"
_action = "get_count"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"total": {
"description": "Total count of frames for the entire query.",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, total=None, **kwargs):
super(GetCountResponse, self).__init__(**kwargs)
self.total = total
@schema_property("total")
def total(self):
return self._property_total
@total.setter
def total(self, value):
if value is None:
self._property_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total", six.integer_types)
self._property_total = value
| GetCountResponse |
python | python-pillow__Pillow | Tests/test_image_access.py | {
"start": 8452,
"end": 10426
} | class ____:
@pytest.mark.xfail(not (sys.version_info >= (3, 13)), reason="failing test")
@pytest.mark.skipif(not is_win32(), reason="requires Windows")
def test_embeddable(self) -> None:
import ctypes
from setuptools.command import build_ext
compiler = getattr(build_ext, "new_compiler")()
compiler.add_include_dir(sysconfig.get_config_var("INCLUDEPY"))
libdir = sysconfig.get_config_var("LIBDIR") or sysconfig.get_config_var(
"INCLUDEPY"
).replace("include", "libs")
compiler.add_library_dir(libdir)
try:
compiler.initialize()
except Exception:
pytest.skip("Compiler could not be initialized")
with open("embed_pil.c", "w", encoding="utf-8") as fh:
home = sys.prefix.replace("\\", "\\\\")
fh.write(
f"""
#include "Python.h"
int main(int argc, char* argv[])
{{
char *home = "{home}";
wchar_t *whome = Py_DecodeLocale(home, NULL);
Py_SetPythonHome(whome);
Py_InitializeEx(0);
Py_DECREF(PyImport_ImportModule("PIL.Image"));
Py_Finalize();
Py_InitializeEx(0);
Py_DECREF(PyImport_ImportModule("PIL.Image"));
Py_Finalize();
PyMem_RawFree(whome);
return 0;
}}
"""
)
objects = compiler.compile(["embed_pil.c"])
compiler.link_executable(objects, "embed_pil")
env = os.environ.copy()
env["PATH"] = sys.prefix + ";" + env["PATH"]
# Do not display the Windows Error Reporting dialog
getattr(ctypes, "windll").kernel32.SetErrorMode(0x0002)
process = subprocess.Popen(["embed_pil.exe"], env=env)
process.communicate()
assert process.returncode == 0
def teardown_method(self) -> None:
try:
os.remove("embed_pil.c")
except FileNotFoundError:
# If the test was skipped or failed, the file won't exist
pass
| TestEmbeddable |
python | django__django | tests/many_to_one/tests.py | {
"start": 644,
"end": 39008
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Reporters.
cls.r = Reporter(first_name="John", last_name="Smith", email="john@example.com")
cls.r.save()
cls.r2 = Reporter(
first_name="Paul", last_name="Jones", email="paul@example.com"
)
cls.r2.save()
# Create an Article.
cls.a = Article(
headline="This is a test",
pub_date=datetime.date(2005, 7, 27),
reporter=cls.r,
)
cls.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
self.assertEqual((r.first_name, self.r.last_name), ("John", "Smith"))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
msg = (
"<Article: Paul's story> instance isn't saved. Use bulk=False or save the "
"object first."
)
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
# Add the same article to a different article set - check that it
# moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with self.assertRaisesMessage(
TypeError, "'Article' instance expected, got <Reporter:"
):
self.r.article_set.add(self.r2)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
def test_set(self):
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
new_article2 = self.r2.article_set.create(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertSequenceEqual(self.r.article_set.all(), [self.a])
self.assertSequenceEqual(
self.r2.article_set.all(),
[new_article, new_article2],
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(
headline="John's second story", pub_date=datetime.date(2005, 7, 29)
)
new_article2 = self.r2.article_set.create(
headline="Paul's story", pub_date=datetime.date(2006, 1, 17)
)
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, new_article2, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertSequenceEqual(self.r.article_set.all(), [self.a])
self.assertSequenceEqual(
self.r2.article_set.all(),
[new_article, new_article2],
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article, self.a],
)
self.assertSequenceEqual(self.r2.article_set.all(), [new_article2])
# Reporter cannot be null - there should not be a clear or remove
# method
self.assertFalse(hasattr(self.r2.article_set, "remove"))
self.assertFalse(hasattr(self.r2.article_set, "clear"))
def test_assign_fk_id_value(self):
parent = Parent.objects.create(name="jeff")
child1 = Child.objects.create(name="frank", parent=parent)
child2 = Child.objects.create(name="randy", parent=parent)
parent.bestchild = child1
parent.save()
parent.bestchild_id = child2.pk
parent.save()
self.assertEqual(parent.bestchild_id, child2.pk)
self.assertFalse(Parent.bestchild.is_cached(parent))
self.assertEqual(parent.bestchild, child2)
self.assertTrue(Parent.bestchild.is_cached(parent))
# Reassigning the same value doesn't clear cached instance.
parent.bestchild_id = child2.pk
self.assertTrue(Parent.bestchild.is_cached(parent))
def test_assign_fk_id_none(self):
parent = Parent.objects.create(name="jeff")
child = Child.objects.create(name="frank", parent=parent)
parent.bestchild = child
parent.save()
parent.bestchild_id = None
parent.save()
self.assertIsNone(parent.bestchild_id)
self.assertFalse(Parent.bestchild.is_cached(parent))
self.assertIsNone(parent.bestchild)
self.assertTrue(Parent.bestchild.is_cached(parent))
def test_selects(self):
new_article1 = self.r.article_set.create(
headline="John's second story",
pub_date=datetime.date(2005, 7, 29),
)
new_article2 = self.r2.article_set.create(
headline="Paul's story",
pub_date=datetime.date(2006, 1, 17),
)
# Reporter objects have access to their related Article objects.
self.assertSequenceEqual(
self.r.article_set.all(),
[new_article1, self.a],
)
self.assertSequenceEqual(
self.r.article_set.filter(headline__startswith="This"), [self.a]
)
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertSequenceEqual(Article.objects.filter(id__exact=self.a.id), [self.a])
self.assertSequenceEqual(Article.objects.filter(pk=self.a.id), [self.a])
# Query on an article property
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="This"), [self.a]
)
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name__exact="John"),
[new_article1, self.a],
)
# Implied __exact also works
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name="John"),
[new_article1, self.a],
)
# Query twice over the related field.
self.assertSequenceEqual(
Article.objects.filter(
reporter__first_name__exact="John", reporter__last_name__exact="Smith"
),
[new_article1, self.a],
)
# The underlying query only makes one join when a related table is
# referenced twice.
queryset = Article.objects.filter(
reporter__first_name__exact="John", reporter__last_name__exact="Smith"
)
self.assertNumQueries(1, list, queryset)
self.assertEqual(
queryset.query.get_compiler(queryset.db).as_sql()[0].count("INNER JOIN"), 1
)
# The automatically joined table has a predictable name.
self.assertSequenceEqual(
Article.objects.filter(reporter__first_name__exact="John").extra(
where=["many_to_one_reporter.last_name='Smith'"]
),
[new_article1, self.a],
)
# ... and should work fine with the string that comes out of
# forms.Form.cleaned_data.
self.assertQuerySetEqual(
(
Article.objects.filter(reporter__first_name__exact="John").extra(
where=["many_to_one_reporter.last_name='%s'" % "Smith"]
)
),
[new_article1, self.a],
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertSequenceEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__pk=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter=self.r.id),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter=self.r),
[new_article1, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[new_article1, new_article2, self.a],
)
self.assertSequenceEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[new_article1, new_article2, self.a],
)
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertSequenceEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name="John")
.values("pk")
.query
).distinct(),
[new_article1, self.a],
)
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = [self.r]
# Reporters can be queried
self.assertSequenceEqual(
Reporter.objects.filter(id__exact=self.r.id), john_smith
)
self.assertSequenceEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertSequenceEqual(
Reporter.objects.filter(first_name__startswith="John"), john_smith
)
# Reporters can query in opposite direction of ForeignKey definition
self.assertSequenceEqual(
Reporter.objects.filter(article__id__exact=self.a.id), john_smith
)
self.assertSequenceEqual(
Reporter.objects.filter(article__pk=self.a.id), john_smith
)
self.assertSequenceEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertSequenceEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
john_smith,
)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith
)
self.assertSequenceEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith
)
self.assertCountEqual(
Reporter.objects.filter(article__headline__startswith="T"),
[self.r, self.r],
)
self.assertSequenceEqual(
Reporter.objects.filter(article__headline__startswith="T").distinct(),
john_smith,
)
# Counting in the opposite direction works in conjunction with
# distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith="T").count(), 2
)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith="T")
.distinct()
.count(),
1,
)
# Queries can go round in circles.
self.assertCountEqual(
Reporter.objects.filter(article__reporter__first_name__startswith="John"),
[self.r, self.r, self.r],
)
self.assertSequenceEqual(
Reporter.objects.filter(
article__reporter__first_name__startswith="John"
).distinct(),
john_smith,
)
self.assertSequenceEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
john_smith,
)
# Implied __exact also works.
self.assertSequenceEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith
)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {"reporter__first_name": "John", "reporter__last_name": "Smith"}
qs = (
Article.objects.filter(
reporter=self.r,
)
.distinct()
.order_by()
.values("reporter__first_name", "reporter__last_name")
)
self.assertEqual([d], list(qs))
def test_select_related(self):
# Article.objects.select_related().dates() works properly when there
# are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(
first_name="Mike", last_name="Royko", email="royko@suntimes.com"
)
r2 = Reporter.objects.create(
first_name="John", last_name="Kass", email="jkass@tribune.com"
)
Article.objects.create(
headline="First", pub_date=datetime.date(1980, 4, 23), reporter=r1
)
Article.objects.create(
headline="Second", pub_date=datetime.date(1980, 4, 23), reporter=r2
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "day")),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)],
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "month")),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)],
)
self.assertEqual(
list(Article.objects.select_related().dates("pub_date", "year")),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)],
)
def test_delete(self):
new_article1 = self.r.article_set.create(
headline="John's second story",
pub_date=datetime.date(2005, 7, 29),
)
new_article2 = self.r2.article_set.create(
headline="Paul's story",
pub_date=datetime.date(2006, 1, 17),
)
new_article3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
new_article4 = Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, their articles will be deleted.
self.assertSequenceEqual(
Article.objects.all(),
[new_article4, new_article1, new_article2, new_article3, self.a],
)
self.assertSequenceEqual(
Reporter.objects.order_by("first_name"),
[self.r, self.r2],
)
self.r2.delete()
self.assertSequenceEqual(
Article.objects.all(),
[new_article4, new_article1, new_article3, self.a],
)
self.assertSequenceEqual(Reporter.objects.order_by("first_name"), [self.r])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith="This").delete()
self.assertSequenceEqual(Reporter.objects.all(), [])
self.assertSequenceEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertSequenceEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[a2, self.a],
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
msg = "get() returned more than one Article -- it returned 2!"
with self.assertRaisesMessage(MultipleObjectsReturned, msg):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(
Article.objects.get(
reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)
)
),
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name="Mike")
r2 = Reporter.objects.create(first_name="John")
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_gettext_lazy(self):
reporter = Reporter.objects.create(
first_name="John", last_name="Smith", email="john.smith@example.com"
)
lazy = gettext_lazy("test")
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = str(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = (
"Cannot resolve keyword 'notafield' into field. Choices are: %s"
)
reporter_fields = ", ".join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list("reporter__notafield")
article_fields = ", ".join(
["EXTRA"] + sorted(f.name for f in Article._meta.get_fields())
)
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={"EXTRA": "EXTRA_SELECT"}).values_list(
"notafield"
)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see
# #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._state.fields_cache["parent"]
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached
# immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
msg = (
'Cannot assign "<First: First object (1)>": "Child.parent" must '
'be a "Parent" instance.'
)
with self.assertRaisesMessage(ValueError, msg):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name="xyzzy", parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name="xyzzy", parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = (
"save() prohibited to prevent data loss due to unsaved related object "
"'parent'."
)
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_save_parent_after_assign(self):
category = Category(name="cats")
record = Record(category=category)
category.save()
record.save()
category.name = "dogs"
with self.assertNumQueries(0):
self.assertEqual(category.id, record.category_id)
self.assertEqual(category.name, record.category.name)
def test_save_nullable_fk_after_parent(self):
parent = Parent()
child = ChildNullableParent(parent=parent)
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
def test_save_nullable_fk_after_parent_with_to_field(self):
parent = Parent(name="jeff")
child = ToFieldChild(parent=parent)
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
self.assertEqual(child.parent_id, parent.name)
def test_save_fk_after_parent_with_non_numeric_pk_set_on_child(self):
parent = ParentStringPrimaryKey()
child = ChildStringPrimaryKeyParent(parent=parent)
child.parent.name = "jeff"
parent.save()
child.save()
child.refresh_from_db()
self.assertEqual(child.parent, parent)
self.assertEqual(child.parent_id, parent.name)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name="Chicago")
District.objects.create(city=ch, name="Far South")
District.objects.create(city=ch, name="North")
ny = City.objects.create(name="New York", id=2**33)
District.objects.create(city=ny, name="Brooklyn")
District.objects.create(city=ny, name="Manhattan")
def test_fk_to_smallautofield(self):
us = Country.objects.create(name="United States")
City.objects.create(country=us, name="Chicago")
City.objects.create(country=us, name="New York")
uk = Country.objects.create(name="United Kingdom", id=2**11)
City.objects.create(country=uk, name="London")
City.objects.create(country=uk, name="Edinburgh")
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name="First")
c2 = Category.objects.create(name="Second")
c3 = Category.objects.create(name="Third")
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
rel = Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(
left__category__name__in=["First"], right__category__name__in=["Second"]
)
self.assertSequenceEqual(q1, [rel])
q2 = Category.objects.filter(
record__left_set__right__category__name="Second"
).order_by("name")
self.assertSequenceEqual(q2, [c1, c2])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
msg = 'Cannot assign "%r": "Child.parent" must be a "Parent" instance.' % c
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual("id", cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
Third.objects.create(name="Third 1")
Third.objects.create(name="Third 2")
th = Third(name="testing")
# The object isn't saved and the relation cannot be used.
msg = (
"'Third' instance needs to have a primary key value before this "
"relationship can be used."
)
with self.assertRaisesMessage(ValueError, msg):
th.child_set.count()
# The reverse foreign key manager can be created.
self.assertEqual(th.child_set.model, Third)
th.save()
# Now the model is saved, so we will need to execute a query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default
# manager.
self.assertSequenceEqual(School.objects.all(), [public_school])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that a student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
School._meta.base_manager_name = "objects"
School._meta._expire_cache()
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), "reporter"))
def test_create_after_prefetch(self):
c = City.objects.create(name="Musical City")
d1 = District.objects.create(name="Ladida", city=c)
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertSequenceEqual(city.districts.all(), [d1])
d2 = city.districts.create(name="Goa")
self.assertSequenceEqual(city.districts.all(), [d1, d2])
def test_clear_after_prefetch(self):
c = City.objects.create(name="Musical City")
d = District.objects.create(name="Ladida", city=c)
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertSequenceEqual(city.districts.all(), [d])
city.districts.clear()
self.assertSequenceEqual(city.districts.all(), [])
def test_remove_after_prefetch(self):
c = City.objects.create(name="Musical City")
d = District.objects.create(name="Ladida", city=c)
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertSequenceEqual(city.districts.all(), [d])
city.districts.remove(d)
self.assertSequenceEqual(city.districts.all(), [])
def test_add_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
def test_set_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.set([d2])
self.assertSequenceEqual(city.districts.all(), [d2])
def test_add_then_remove_after_prefetch(self):
c = City.objects.create(name="Musical City")
District.objects.create(name="Ladida", city=c)
d2 = District.objects.create(name="Ladidu")
city = City.objects.prefetch_related("districts").get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
city.districts.remove(d2)
self.assertEqual(city.districts.count(), 1)
def test_cached_relation_invalidated_on_save(self):
"""
Model.save() invalidates stale ForeignKey relations after a primary key
assignment.
"""
self.assertEqual(self.a.reporter, self.r) # caches a.reporter
self.a.reporter_id = self.r2.pk
self.a.save()
self.assertEqual(self.a.reporter, self.r2)
def test_cached_foreign_key_with_to_field_not_cleared_by_save(self):
parent = Parent.objects.create(name="a")
child = ToFieldChild.objects.create(parent=parent)
with self.assertNumQueries(0):
self.assertIs(child.parent, parent)
def test_reverse_foreign_key_instance_to_field_caching(self):
parent = Parent.objects.create(name="a")
ToFieldChild.objects.create(parent=parent)
child = parent.to_field_children.get()
with self.assertNumQueries(0):
self.assertIs(child.parent, parent)
def test_add_remove_set_by_pk_raises(self):
usa = Country.objects.create(name="United States")
chicago = City.objects.create(name="Chicago")
msg = "'City' instance expected, got %s" % chicago.pk
with self.assertRaisesMessage(TypeError, msg):
usa.cities.add(chicago.pk)
with self.assertRaisesMessage(TypeError, msg):
usa.cities.remove(chicago.pk)
with self.assertRaisesMessage(TypeError, msg):
usa.cities.set([chicago.pk])
def test_get_prefetch_querysets_invalid_querysets_length(self):
City.objects.create(name="Chicago")
cities = City.objects.all()
msg = (
"querysets argument of get_prefetch_querysets() should have a length of 1."
)
with self.assertRaisesMessage(ValueError, msg):
City.country.get_prefetch_querysets(
instances=cities,
querysets=[Country.objects.all(), Country.objects.all()],
)
def test_get_prefetch_querysets_reverse_invalid_querysets_length(self):
usa = Country.objects.create(name="United States")
City.objects.create(name="Chicago")
countries = Country.objects.all()
msg = (
"querysets argument of get_prefetch_querysets() should have a length of 1."
)
with self.assertRaisesMessage(ValueError, msg):
usa.cities.get_prefetch_querysets(
instances=countries,
querysets=[City.objects.all(), City.objects.all()],
)
def test_fetch_mode_fetch_peers_forward(self):
Article.objects.create(
headline="This is another test",
pub_date=datetime.date(2005, 7, 27),
reporter=self.r2,
)
a1, a2 = Article.objects.fetch_mode(FETCH_PEERS)
with self.assertNumQueries(1):
a1.reporter
with self.assertNumQueries(0):
a2.reporter
def test_fetch_mode_raise_forward(self):
a = Article.objects.fetch_mode(RAISE).get(pk=self.a.pk)
msg = "Fetching of Article.reporter blocked."
with self.assertRaisesMessage(FieldFetchBlocked, msg) as cm:
a.reporter
self.assertIsNone(cm.exception.__cause__)
self.assertTrue(cm.exception.__suppress_context__)
def test_fetch_mode_copied_forward_fetching_one(self):
a1 = Article.objects.fetch_mode(FETCH_PEERS).get()
self.assertEqual(a1._state.fetch_mode, FETCH_PEERS)
self.assertEqual(
a1.reporter._state.fetch_mode,
FETCH_PEERS,
)
def test_fetch_mode_copied_forward_fetching_many(self):
Article.objects.create(
headline="This is another test",
pub_date=datetime.date(2005, 7, 27),
reporter=self.r2,
)
a1, a2 = Article.objects.fetch_mode(FETCH_PEERS)
self.assertEqual(a1._state.fetch_mode, FETCH_PEERS)
self.assertEqual(
a1.reporter._state.fetch_mode,
FETCH_PEERS,
)
def test_fetch_mode_copied_reverse_fetching_one(self):
r1 = Reporter.objects.fetch_mode(FETCH_PEERS).get(pk=self.r.pk)
self.assertEqual(r1._state.fetch_mode, FETCH_PEERS)
article = r1.article_set.get()
self.assertEqual(
article._state.fetch_mode,
FETCH_PEERS,
)
def test_fetch_mode_copied_reverse_fetching_many(self):
Article.objects.create(
headline="This is another test",
pub_date=datetime.date(2005, 7, 27),
reporter=self.r2,
)
r1, r2 = Reporter.objects.fetch_mode(FETCH_PEERS)
self.assertEqual(r1._state.fetch_mode, FETCH_PEERS)
a1 = r1.article_set.get()
self.assertEqual(
a1._state.fetch_mode,
FETCH_PEERS,
)
a2 = r2.article_set.get()
self.assertEqual(
a2._state.fetch_mode,
FETCH_PEERS,
)
| ManyToOneTests |
python | wandb__wandb | wandb/sdk/artifacts/artifact_file_cache.py | {
"start": 527,
"end": 1169
} | class ____(Protocol):
def __call__(self, mode: str = ...) -> ContextManager[IO]: ...
def artifacts_cache_dir() -> Path:
"""Get the artifacts cache directory."""
return env.get_cache_dir() / "artifacts"
def _get_sys_umask_threadsafe() -> int:
# Workaround to get the current system umask, since
# - `os.umask()` isn't thread-safe
# - we don't want to inadvertently change the umask of the current process
# See: https://stackoverflow.com/questions/53227072/reading-umask-thread-safe
umask_cmd = (sys.executable, "-c", "import os; print(os.umask(22))")
return int(subprocess.check_output(umask_cmd))
| Opener |
python | getsentry__sentry | src/sentry/search/events/builder/errors.py | {
"start": 5386,
"end": 6254
} | class ____(ErrorsQueryBuilderMixin, TimeseriesQueryBuilder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def time_column(self) -> SelectType:
return Column("time", entity=Entity(self.dataset.value, alias=self.dataset.value))
def get_snql_query(self) -> Request:
return Request(
dataset=self.dataset.value,
app_id="errors",
query=Query(
match=self.match,
select=self.select,
where=self.where,
having=self.having,
groupby=self.groupby,
orderby=[OrderBy(self.time_column, Direction.ASC)],
granularity=self.granularity,
limit=self.limit,
),
tenant_ids=self.tenant_ids,
)
| ErrorsTimeseriesQueryBuilder |
python | scrapy__scrapy | tests/CrawlerProcess/caching_hostname_resolver_ipv6.py | {
"start": 58,
"end": 548
} | class ____(scrapy.Spider):
"""
Finishes without a twisted.internet.error.DNSLookupError exception
"""
name = "caching_hostname_resolver_spider"
start_urls = ["http://[::1]"]
if __name__ == "__main__":
process = CrawlerProcess(
settings={
"RETRY_ENABLED": False,
"DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver",
}
)
process.crawl(CachingHostnameResolverSpider)
process.start()
| CachingHostnameResolverSpider |
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 12623,
"end": 12962
} | class ____(BaseModel):
errored: int
"""Number of output items that resulted in an error."""
failed: int
"""Number of output items that failed to pass the evaluation."""
passed: int
"""Number of output items that passed the evaluation."""
total: int
"""Total number of executed output items."""
| ResultCounts |
python | chroma-core__chroma | chromadb/segment/impl/vector/hnsw_params.py | {
"start": 1533,
"end": 2485
} | class ____(Params):
space: str
construction_ef: int
search_ef: int
M: int
num_threads: int
resize_factor: float
def __init__(self, metadata: Metadata):
metadata = metadata or {}
self.space = str(metadata.get("hnsw:space", "l2"))
self.construction_ef = int(metadata.get("hnsw:construction_ef", 100))
self.search_ef = int(metadata.get("hnsw:search_ef", 100))
self.M = int(metadata.get("hnsw:M", 16))
self.num_threads = int(
metadata.get("hnsw:num_threads", multiprocessing.cpu_count())
)
self.resize_factor = float(metadata.get("hnsw:resize_factor", 1.2))
@staticmethod
def extract(metadata: Metadata) -> Metadata:
"""Validate and return only the relevant hnsw params"""
segment_metadata = HnswParams._select(metadata)
HnswParams._validate(segment_metadata, param_validators)
return segment_metadata
| HnswParams |
python | catalyst-team__catalyst | catalyst/callbacks/sklearn_model.py | {
"start": 282,
"end": 5294
} | class ____(Callback):
"""Callback to train a classifier on the train loader and
to give predictions on the valid loader.
Args:
feature_key: keys of tensors that should be used as features
for the classifier fit
target_key: keys of tensors that should be used as targets
for the classifier fit
train_loader: train loader name
valid_loaders: valid loaders where model should be predicted
model_fn: fabric to produce objects with .fit and predict method
predict_method: predict method name for the classifier
predict_key: key to store computed classifier predicts in ``runner.batch``
model_kwargs: additional parameters for ``model_fn``
.. note::
catalyst[ml] required for this callback
"""
def __init__(
self,
feature_key: str,
target_key: Union[str, None],
train_loader: str,
valid_loaders: Union[str, List[str]],
model_fn: Union[Callable, str],
predict_method: str = "predict",
predict_key: str = "sklearn_predict",
**model_kwargs,
) -> None:
super().__init__(order=CallbackOrder.Metric)
if isinstance(model_fn, str):
model_fn = REGISTRY.get(model_fn)
assert hasattr(
model_fn(), predict_method
), "The classifier must have the predict method!"
self._train_loader = train_loader
if isinstance(valid_loaders, str):
self._valid_loaders = [valid_loaders]
else:
self._valid_loaders = valid_loaders
self.model_fabric_fn = partial(model_fn, **model_kwargs)
self.feature_key = feature_key
self.target_key = target_key
self.predict_method = predict_method
self.predict_key = predict_key
self.model = None
if self.target_key:
self.storage = AccumulativeMetric(keys=[feature_key, target_key])
if self.target_key is None:
self.storage = AccumulativeMetric(keys=[feature_key])
def on_loader_start(self, runner: "IRunner") -> None:
"""
Loader start hook: initiliaze storages for the loaders.
Args:
runner: current runner
"""
super().on_loader_start(runner)
if runner.loader_key == self._train_loader:
self.storage.reset(
num_samples=runner.loader_sample_len, num_batches=runner.loader_batch_len
)
if runner.loader_key in self._valid_loaders:
assert self.model is not None, "The train loader has to be processed first!"
def on_batch_end(self, runner: "IRunner") -> None:
"""On batch end action: get data from runner's batch
and update a loader storage with it
Args:
runner: runner for the experiment.
"""
assert (
torch.isnan(runner.batch[self.feature_key]).sum() == 0
), "SklearnModelCallback can't process Tensors with NaN!"
if runner.loader_key == self._train_loader:
self.storage.update(**runner.batch)
if runner.loader_key in self._valid_loaders:
features = runner.batch[self.feature_key].detach().cpu().numpy()
# classifier predict
classifier_predict = getattr(self.model, self.predict_method)
predictions = classifier_predict(features)
runner.batch[self.predict_key] = torch.tensor(
predictions, device=runner.engine.device
)
def on_loader_end(self, runner: "IRunner") -> None:
"""Loader end hook: for the train loader train classifier,
for the test check the quality.
Args:
runner: current runner
"""
if runner.loader_key == self._train_loader:
data = self.storage.compute_key_value()
collected_size = self.storage.collected_samples
loader_len = runner.loader_sample_len
assert (
collected_size == loader_len
), f"collected samples - {collected_size} != loader len - {loader_len}!"
assert (
torch.isnan(data[self.feature_key]).sum() == 0
), "SklearnModelCallback - NaN after Accumulation!"
self.model = self.model_fabric_fn()
if self.target_key is None:
features = data[self.feature_key].detach().cpu().numpy()
self.model.fit(features)
else:
features = data[self.feature_key].detach().cpu().numpy()
targets = data[self.target_key].detach().cpu().numpy()
self.model.fit(features, targets)
def on_epoch_end(self, runner: "IRunner") -> None:
"""
Epoch end hook: the callback delete the model.
Args:
runner: current runner
"""
# We need this for the control of a loader order.
self.model = None
__all__ = ["SklearnModelCallback"]
| SklearnModelCallback |
python | numpy__numpy | numpy/distutils/tests/test_misc_util.py | {
"start": 1459,
"end": 2060
} | class ____:
def test_1(self):
n = lambda path: path.replace('/', sep)
assert_equal(minrelpath(n('aa/bb')), n('aa/bb'))
assert_equal(minrelpath('..'), '..')
assert_equal(minrelpath(n('aa/..')), '')
assert_equal(minrelpath(n('aa/../bb')), 'bb')
assert_equal(minrelpath(n('aa/bb/..')), 'aa')
assert_equal(minrelpath(n('aa/bb/../..')), '')
assert_equal(minrelpath(n('aa/bb/../cc/../dd')), n('aa/dd'))
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
| TestMinrelpath |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/transport/tcp/producer.py | {
"start": 938,
"end": 3330
} | class ____(LanguageServerClient):
"""Implementation of a v3.0 compilant language server TCP client."""
MAX_TIMEOUT_TIME = 20000
def __init__(self, host='127.0.0.1', port=2087, zmq_in_port=7000,
zmq_out_port=7001):
LanguageServerClient.__init__(self, zmq_in_port, zmq_out_port)
self.req_status = {}
self.host = host
self.port = port
self.socket = None
# self.request_seq = 1
logger.info('Connecting to language server at {0}:{1}'.format(
self.host, self.port))
super().finalize_initialization()
self.socket.setblocking(True)
self.reading_thread = TCPIncomingMessageThread()
self.reading_thread.initialize(self.socket, self.zmq_out_socket,
self.req_status)
def start(self):
self.reading_thread.start()
logger.info('Ready to receive/attend requests and responses!')
def stop(self):
logger.info('Closing TCP socket...')
self.socket.close()
logger.info('Closing consumer thread...')
self.reading_thread.stop()
logger.debug('Exit routine should be complete')
def transport_send(self, content_length, body):
logger.debug('Sending message via TCP')
try:
self.socket.send(content_length)
self.socket.send(body)
except (BrokenPipeError, ConnectionError) as e:
# This avoids a total freeze at startup
# when we're trying to connect to a TCP
# socket that rejects our connection
logger.error(e)
def is_server_alive(self):
connected = False
initial_time = time.time()
connection_error = None
while not connected:
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, int(self.port)))
connected = True
except Exception as e:
connection_error = e
if time.time() - initial_time > self.MAX_TIMEOUT_TIME:
break
if self.socket.getsockname() == self.socket.getpeername():
connection_error = Exception("Failed to connect to server: Self-connected socket")
connected = False
return connected, connection_error, None
| TCPLanguageServerClient |
python | pytorch__pytorch | test/jit/test_save_load_for_op_version.py | {
"start": 579,
"end": 23550
} | class ____(JitTestCase):
# Helper that returns the module after saving and loading
def _save_load_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO()
torch.jit.save(scripted_module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
def _save_load_mobile_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
return _load_for_lite_interpreter(buffer)
# Helper which returns the result of a function or the exception the
# function threw.
def _try_fn(self, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
return e
def _verify_no(self, kind, m):
self._verify_count(kind, m, 0)
def _verify_count(self, kind, m, count):
node_count = sum(str(n).count(kind) for n in m.graph.nodes())
self.assertEqual(node_count, count)
"""
Tests that verify Torchscript remaps aten::div(_) from versions 0-3
to call either aten::true_divide(_), if an input is a float type,
or truncated aten::divide(_) otherwise.
NOTE: currently compares against current div behavior, too, since
div behavior has not yet been updated.
"""
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor(self, sample_input):
def historic_div(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide(other)
return self.divide(other, rounding_mode="trunc")
# Tensor x Tensor
class MyModule(torch.nn.Module):
def forward(self, a, b):
result_0 = a / b
result_1 = torch.div(a, b)
result_2 = a.div(b)
return result_0, result_1, result_2
# Loads historic module
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
m_results = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_results, Exception):
self.assertTrue(isinstance(fn_result, Exception))
else:
for result in m_results:
self.assertEqual(result, fn_result)
_helper(v3_mobile_module, historic_div)
_helper(current_mobile_module, torch.div)
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_inplace(self, sample_input):
def historic_div_(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode="trunc")
class MyModule(torch.nn.Module):
def forward(self, a, b):
a /= b
return a
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
fn_result = self._try_fn(fn, a.clone(), b)
m_result = self._try_fn(m, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, a)
_helper(v3_mobile_module, historic_div_)
# Recreates a since it was modified in place
a = torch.tensor((val_a,))
_helper(current_mobile_module, torch.Tensor.div_)
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_out(self, sample_input):
def historic_div_out(self, other, out):
if (
self.is_floating_point()
or other.is_floating_point()
or out.is_floating_point()
):
return torch.true_divide(self, other, out=out)
return torch.divide(self, other, out=out, rounding_mode="trunc")
class MyModule(torch.nn.Module):
def forward(self, a, b, out):
return a.div(b, out=out)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
for out in (torch.empty((1,)), torch.empty((1,), dtype=torch.long)):
def _helper(m, fn):
fn_result = None
if fn is torch.div:
fn_result = self._try_fn(fn, a, b, out=out.clone())
else:
fn_result = self._try_fn(fn, a, b, out.clone())
m_result = self._try_fn(m, a, b, out)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, out)
_helper(v3_mobile_module, historic_div_out)
_helper(current_mobile_module, torch.div)
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar(self, sample_input):
def historic_div_scalar_float(self, other: float):
return torch.true_divide(self, other)
def historic_div_scalar_int(self, other: int):
if self.is_floating_point():
return torch.true_divide(self, other)
return torch.divide(self, other, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
return a / b
class MyModuleInt(torch.nn.Module):
def forward(self, a, b: int):
return a / b
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir
+ "/jit/fixtures/test_versioned_div_scalar_float_v2.ptl"
)
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_int_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, historic_div_scalar_int)
_helper(current_mobile_module_int, torch.div)
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_reciprocal(self, sample_input):
def historic_div_scalar_float_reciprocal(self, other: float):
return other / self
def historic_div_scalar_int_reciprocal(self, other: int):
if self.is_floating_point():
return other / self
return torch.divide(other, self, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
return b / a
class MyModuleInt(torch.nn.Module):
def forward(self, a, b: int):
return b / a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_float_v2.ptl"
)
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_int_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = None
# Reverses argument order for torch.div
if fn is torch.div:
fn_result = self._try_fn(torch.div, b, a)
else:
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(isinstance(fn_result, Exception))
elif fn is torch.div or a.is_floating_point():
self.assertEqual(m_result, fn_result)
else:
# Skip when fn is not torch.div and a is integral because
# historic_div_scalar_int performs floored division
pass
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, current_mobile_module_int)
_helper(current_mobile_module_int, torch.div)
@settings(
max_examples=10, deadline=200000
) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(
st.integers(min_value=5, max_value=199),
st.floats(min_value=5.0, max_value=199.0),
)
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_inplace(self, sample_input):
def historic_div_scalar_float_inplace(self, other: float):
return self.true_divide_(other)
def historic_div_scalar_int_inplace(self, other: int):
if self.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode="trunc")
class MyModuleFloat(torch.nn.Module):
def forward(self, a, b: float):
a /= b
return a
class MyModuleInt(torch.nn.Module):
def forward(self, a, b: int):
a /= b
return a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl"
)
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_module(MyModuleFloat)
current_mobile_module_int = self._save_load_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(current_mobile_module_float, torch.Tensor.div_)
else:
_helper(current_mobile_module_int, torch.Tensor.div_)
# NOTE: Scalar division was already true division in op version 3,
# so this test verifies the behavior is unchanged.
def test_versioned_div_scalar_scalar(self):
class MyModule(torch.nn.Module):
def forward(self, a: float, b: int, c: float, d: int):
result_0 = a / b
result_1 = a / c
result_2 = b / c
result_3 = b / d
return (result_0, result_1, result_2, result_3)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir
+ "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl"
)
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
def _helper(m, fn):
vals = (5.0, 3, 2.0, 7)
m_result = m(*vals)
fn_result = fn(*vals)
for mr, hr in zip(m_result, fn_result):
self.assertEqual(mr, hr)
_helper(v3_mobile_module, current_mobile_module)
def test_versioned_linspace(self):
class Module(torch.nn.Module):
def forward(
self, a: Union[int, float, complex], b: Union[int, float, complex]
):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl"
)
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for a, b in sample_inputs:
(output_with_step, output_without_step) = v7_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
# outputs should be equal to the newest version
self.assertEqual(output_with_step, current_with_step)
self.assertEqual(output_without_step, current_without_step)
def test_versioned_linspace_out(self):
class Module(torch.nn.Module):
def forward(
self,
a: Union[int, float, complex],
b: Union[int, float, complex],
out: torch.Tensor,
):
return torch.linspace(a, b, steps=100, out=out)
model_path = (
pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
)
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
(
3,
10,
torch.empty((100,), dtype=torch.int64),
torch.empty((100,), dtype=torch.int64),
),
(
-10,
10,
torch.empty((100,), dtype=torch.int64),
torch.empty((100,), dtype=torch.int64),
),
(
4.0,
6.0,
torch.empty((100,), dtype=torch.float64),
torch.empty((100,), dtype=torch.float64),
),
(
3 + 4j,
4 + 5j,
torch.empty((100,), dtype=torch.complex64),
torch.empty((100,), dtype=torch.complex64),
),
)
for start, end, out_for_old, out_for_new in sample_inputs:
output = v7_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
def test_versioned_logspace(self):
class Module(torch.nn.Module):
def forward(
self, a: Union[int, float, complex], b: Union[int, float, complex]
):
c = torch.logspace(a, b, steps=5)
d = torch.logspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl"
)
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v8_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for a, b in sample_inputs:
(output_with_step, output_without_step) = v8_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
# outputs should be equal to the newest version
self.assertEqual(output_with_step, current_with_step)
self.assertEqual(output_without_step, current_without_step)
def test_versioned_logspace_out(self):
class Module(torch.nn.Module):
def forward(
self,
a: Union[int, float, complex],
b: Union[int, float, complex],
out: torch.Tensor,
):
return torch.logspace(a, b, steps=100, out=out)
model_path = (
pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
)
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v8_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
(
3,
10,
torch.empty((100,), dtype=torch.int64),
torch.empty((100,), dtype=torch.int64),
),
(
-10,
10,
torch.empty((100,), dtype=torch.int64),
torch.empty((100,), dtype=torch.int64),
),
(
4.0,
6.0,
torch.empty((100,), dtype=torch.float64),
torch.empty((100,), dtype=torch.float64),
),
(
3 + 4j,
4 + 5j,
torch.empty((100,), dtype=torch.complex64),
torch.empty((100,), dtype=torch.complex64),
),
)
for start, end, out_for_old, out_for_new in sample_inputs:
output = v8_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestSaveLoadForOpVersion |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 125765,
"end": 125955
} | class ____:
xlUpdateLinksAlways = 3 # from enum XlUpdateLinks
xlUpdateLinksNever = 2 # from enum XlUpdateLinks
xlUpdateLinksUserSetting = 1 # from enum XlUpdateLinks
| UpdateLinks |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.