language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/auth/services/orgauthtoken/service.py | {
"start": 458,
"end": 1636
} | class ____(RpcService):
key = "orgauthtoken"
local_mode = SiloMode.CONTROL
@classmethod
def get_local_implementation(cls) -> RpcService:
return impl_by_db()
@rpc_method
@abstractmethod
def update_orgauthtoken(
self,
*,
organization_id: int,
org_auth_token_id: int,
date_last_used: datetime | None = None,
project_last_used_id: int | None = None,
) -> None:
pass
def impl_by_db() -> OrgAuthTokenService:
from .impl import DatabaseBackedOrgAuthTokenService
return DatabaseBackedOrgAuthTokenService()
def impl_by_outbox() -> OrgAuthTokenService:
from .impl import OutboxBackedOrgAuthTokenService
return OutboxBackedOrgAuthTokenService()
# An asynchronous service which can delegate to an outbox implementation, essentially enqueueing
# updates of tokens for future processing.
orgauthtoken_service: OrgAuthTokenService = silo_mode_delegation(
{
SiloMode.REGION: impl_by_outbox,
SiloMode.CONTROL: impl_by_db,
SiloMode.MONOLITH: impl_by_db,
}
)
orgauthtoken_rpc_service = OrgAuthTokenService.create_delegation()
| OrgAuthTokenService |
python | PrefectHQ__prefect | tests/server/models/test_workers.py | {
"start": 7016,
"end": 7444
} | class ____:
async def test_read_work_pool(
self, session: AsyncSession, work_pool: schemas.core.WorkPool
):
result = await models.workers.read_work_pool(
session=session, work_pool_id=work_pool.id
)
assert result.name == work_pool.name
assert result.is_paused is work_pool.is_paused
assert result.concurrency_limit == work_pool.concurrency_limit
| TestReadWorkPool |
python | readthedocs__readthedocs.org | readthedocs/search/migrations/0001_initial.py | {
"start": 218,
"end": 2202
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
initial = True
dependencies = [
("builds", "0009_added_external_version_type"),
("projects", "0044_auto_20190703_1300"),
]
operations = [
migrations.CreateModel(
name="SearchQuery",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("query", models.CharField(max_length=4092, verbose_name="Query")),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="search_queries",
to="projects.Project",
),
),
(
"version",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="search_queries",
to="builds.Version",
verbose_name="Version",
),
),
],
options={
"verbose_name": "Search query",
"verbose_name_plural": "Search queries",
},
),
]
| Migration |
python | pytest-dev__pluggy | src/pluggy/_manager.py | {
"start": 1721,
"end": 2226
} | class ____:
"""Emulate a pkg_resources Distribution"""
def __init__(self, dist: importlib.metadata.Distribution) -> None:
self._dist = dist
@property
def project_name(self) -> str:
name: str = self.metadata["name"]
return name
def __getattr__(self, attr: str, default: Any | None = None) -> Any:
return getattr(self._dist, attr, default)
def __dir__(self) -> list[str]:
return sorted(dir(self._dist) + ["_dist", "project_name"])
| DistFacade |
python | apache__airflow | providers/google/src/airflow/providers/google/firebase/hooks/firestore.py | {
"start": 1227,
"end": 5677
} | class ____(GoogleBaseHook):
"""
Hook for the Google Firestore APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param api_version: API version used (for example v1 or v1beta1).
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
_conn: build | None = None
def __init__(
self,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self):
"""
Retrieve the connection to Cloud Firestore.
:return: Google Cloud Firestore services object.
"""
if not self._conn:
http_authorized = self._authorize()
# We cannot use an Authorized Client to retrieve discovery document due to an error in the API.
# When the authorized customer will send a request to the address below
# https://www.googleapis.com/discovery/v1/apis/firestore/v1/rest
# then it will get the message below:
# > Request contains an invalid argument.
# At the same time, the Non-Authorized Client has no problems.
non_authorized_conn = build("firestore", self.api_version, cache_discovery=False)
self._conn = build_from_document(non_authorized_conn._rootDesc, http=http_authorized)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def export_documents(
self, body: dict, database_id: str = "(default)", project_id: str = PROVIDE_PROJECT_ID
) -> None:
"""
Start a export with the specified configuration.
:param database_id: The Database ID.
:param body: The request body.
See:
https://firebase.google.com/docs/firestore/reference/rest/v1beta1/projects.databases/exportDocuments
:param project_id: Optional, Google Cloud Project project_id where the database belongs.
If set to None or missing, the default project_id from the Google Cloud connection is used.
"""
service = self.get_conn()
name = f"projects/{project_id}/databases/{database_id}"
operation = (
service.projects()
.databases()
.exportDocuments(name=name, body=body)
.execute(num_retries=self.num_retries)
)
self._wait_for_operation_to_complete(operation["name"])
def _wait_for_operation_to_complete(self, operation_name: str) -> None:
"""
Wait for the named operation to complete - checks status of the asynchronous call.
:param operation_name: The name of the operation.
:return: The response returned by the operation.
:exception: AirflowException in case error is returned.
"""
service = self.get_conn()
while True:
operation_response = (
service.projects()
.databases()
.operations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
)
if operation_response.get("done"):
response = operation_response.get("response")
error = operation_response.get("error")
# Note, according to documentation always either response or error is
# set when "done" == True
if error:
raise AirflowException(str(error))
return response
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
| CloudFirestoreHook |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_sampling_project_rates.py | {
"start": 111,
"end": 2877
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-sampling-project-rates"
def setUp(self) -> None:
super().setUp()
self.features = {
"organizations:dynamic-sampling": True,
"organizations:dynamic-sampling-custom": True,
}
self.organization.update_option("sentry:sampling_mode", DynamicSamplingMode.PROJECT)
self.login_as(user=self.user)
def test_without_ds(self) -> None:
self.get_error_response(self.organization.slug, status_code=404)
def test_get(self) -> None:
project1 = self.create_project(teams=[self.team])
project2 = self.create_project(teams=[self.team])
project2.update_option("sentry:target_sample_rate", 0.2)
with self.feature(self.features):
response = self.get_success_response(self.organization.slug)
assert response.data == [
{"id": project1.id, "sampleRate": 1.0},
{"id": project2.id, "sampleRate": 0.2},
]
def test_put(self) -> None:
project1 = self.create_project(teams=[self.team])
project1.update_option("sentry:target_sample_rate", 0.2)
project2 = self.create_project(teams=[self.team])
project2.update_option("sentry:target_sample_rate", 0.2)
project3 = self.create_project(teams=[self.team])
project3.update_option("sentry:target_sample_rate", 0.2)
data = [
# we leave project 1 unchanged
{"id": project2.id, "sampleRate": 0.5},
{"id": project3.id, "sampleRate": 0.123456789},
]
with self.feature(self.features):
response = self.get_success_response(
self.organization.slug, method="put", raw_data=data
)
assert response.data == [
{"id": project2.id, "sampleRate": 0.5},
{"id": project3.id, "sampleRate": 0.1235},
]
assert project1.get_option("sentry:target_sample_rate") == 0.2
assert project2.get_option("sentry:target_sample_rate") == 0.5
assert project3.get_option("sentry:target_sample_rate") == 0.1235
def test_put_automatic_mode(self) -> None:
self.organization.update_option(
"sentry:sampling_mode", DynamicSamplingMode.ORGANIZATION.value
)
data = [{"id": self.project.id, "sampleRate": 0.5}]
with self.feature(self.features):
self.get_error_response(self.organization.slug, method="put", raw_data=data)
def test_put_invalid_body(self) -> None:
with self.feature(self.features):
self.get_error_response(
self.organization.slug, method="put", raw_data={}, status_code=400
)
| OrganizationSamplingProjectRatesTest |
python | huggingface__transformers | src/transformers/models/layoutlm/modeling_layoutlm.py | {
"start": 10477,
"end": 11187
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextLayer with AlignText->LayoutLM
| LayoutLMOutput |
python | huggingface__transformers | tests/pipelines/test_pipelines_token_classification.py | {
"start": 1444,
"end": 41825
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if not hasattr(model_mapping, "is_dummy"):
model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
token_classifier = TokenClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
return token_classifier, ["A simple string", "A simple string that is quite a bit longer"]
def run_pipeline_test(self, token_classifier, _):
model = token_classifier.model
tokenizer = token_classifier.tokenizer
if not tokenizer.is_fast:
return # Slow tokenizers do not return offsets mappings, so this test will fail
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"])
self.assertIsInstance(outputs, list)
self.assertEqual(len(outputs), 2)
n = len(outputs[0])
m = len(outputs[1])
self.assertEqual(
nested_simplify(outputs),
[
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
[
{
"entity": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"index": ANY(int),
"word": ANY(str),
}
for i in range(m)
],
],
)
self.run_aggregation_strategy(model, tokenizer)
def run_aggregation_strategy(self, model, tokenizer):
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max")
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="average"
)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE)
outputs = token_classifier("A simple string")
self.assertIsInstance(outputs, list)
n = len(outputs)
self.assertEqual(
nested_simplify(outputs),
[
{
"entity_group": ANY(str),
"score": ANY(float),
"start": ANY(int),
"end": ANY(int),
"word": ANY(str),
}
for i in range(n)
],
)
with self.assertWarns(UserWarning):
token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE)
with self.assertWarns(UserWarning):
token_classifier = pipeline(
task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True
)
self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST)
@slow
@require_torch
def test_chunking(self):
NER_MODEL = "elastic/distilbert-base-uncased-finetuned-conll03-english"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
tokenizer.model_max_length = 10
stride = 5
sentence = (
"Hugging Face, Inc. is a French company that develops tools for building applications using machine learning. "
"The company, based in New York City was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf."
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="simple", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="first", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="max", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
token_classifier = TokenClassificationPipeline(
model=model, tokenizer=tokenizer, aggregation_strategy="average", stride=stride
)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30},
{"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144},
{"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174},
{"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205},
{"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222},
{"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239},
],
)
@require_torch
@slow
def test_is_split_into_words(self):
"""
Tests the pipeline with pre-tokenized inputs (is_split_into_words=True)
and validates that the character offsets are correct.
"""
token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", aggregation_strategy="simple")
# Input is a list of words
words = ["Hello", "Sarah", "lives", "in", "New", "York"]
# The reconstructed sentence will be "Hello Sarah lives in New York"
# - "Sarah": starts at index 6, ends at 11
# - "New York": starts at index 21, ends at 29
output = token_classifier(words, is_split_into_words=True)
self.assertEqual(
nested_simplify(output),
[
[
{"entity_group": "PER", "score": ANY(float), "word": "Sarah", "start": 6, "end": 11},
{"entity_group": "LOC", "score": ANY(float), "word": "New York", "start": 21, "end": 29},
]
],
)
# Also test batching with pre-tokenized inputs
words2 = ["My", "name", "is", "Wolfgang", "and", "I", "live", "in", "Berlin"]
batch_output = token_classifier([words, words2], is_split_into_words=True)
# Expected for second sentence ("My name is Wolfgang and I live in Berlin")
# - "Wolfgang": starts at 12, ends at 20
# - "Berlin": starts at 36, ends at 42
self.assertEqual(
nested_simplify(batch_output),
[
[
{"entity_group": "PER", "score": ANY(float), "word": "Sarah", "start": 6, "end": 11},
{"entity_group": "LOC", "score": ANY(float), "word": "New York", "start": 21, "end": 29},
],
[
{"entity_group": "PER", "score": ANY(float), "word": "Wolfgang", "start": 11, "end": 19},
{"entity_group": "LOC", "score": ANY(float), "word": "Berlin", "start": 34, "end": 40},
],
],
)
@require_torch
def test_chunking_fast(self):
# Note: We cannot run the test on "conflicts" on the chunking.
# The problem is that the model is random, and thus the results do heavily
# depend on the chunking, so we cannot expect "abcd" and "bcd" to find
# the same entities. We defer to slow tests for this.
pipe = pipeline(model="hf-internal-testing/tiny-bert-for-token-classification")
sentence = "The company, based in New York City was founded in 2016 by French entrepreneurs"
results = pipe(sentence, aggregation_strategy="first")
# This is what this random model gives on the full sentence
self.assertEqual(
nested_simplify(results),
[
# This is 2 actual tokens
{"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"},
{"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"},
],
)
# This will force the tokenizer to split after "city was".
pipe.tokenizer.model_max_length = 12
self.assertEqual(
pipe.tokenizer.decode(pipe.tokenizer.encode(sentence, truncation=True)),
"[CLS] the company, based in new york city was [SEP]",
)
stride = 4
results = pipe(sentence, aggregation_strategy="first", stride=stride)
self.assertEqual(
nested_simplify(results),
[
{"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"},
# This is an extra entity found by this random model, but at least both original
# entities are there
{"end": 58, "entity_group": "MISC", "score": 0.115, "start": 56, "word": "by"},
{"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"},
],
)
@require_torch
@slow
def test_spanish_bert(self):
# https://github.com/huggingface/transformers/pull/4987
NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses."""
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1},
{"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2},
{"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4},
{"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23},
{"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75},
{"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114},
],
)
@require_torch_accelerator
@slow
def test_accelerator(self):
sentence = "This is dummy sentence"
ner = pipeline(
"token-classification",
device=torch_device,
aggregation_strategy=AggregationStrategy.SIMPLE,
)
output = ner(sentence)
self.assertEqual(nested_simplify(output), [])
@require_torch
@slow
def test_dbmdz_english(self):
# Other sentence
NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english"
model = AutoModelForTokenClassification.from_pretrained(NER_MODEL)
tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True)
sentence = """Enzo works at the UN"""
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer)
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1},
{"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2},
{"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output[:3]),
[
{"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average")
output = token_classifier(sentence)
self.assertEqual(
nested_simplify(output),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20},
],
)
@require_torch
@slow
def test_aggregation_strategy_byte_level_tokenizer(self):
sentence = "Groenlinks praat over Schiphol."
ner = pipeline("ner", model="FacebookAI/xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max")
self.assertEqual(
nested_simplify(ner(sentence)),
[
{"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"},
{"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31},
],
)
@require_torch
def test_aggregation_strategy_no_b_i_prefix(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer)
# Just to understand scores indexes in this test
token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"}
example = [
{
"scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip
"index": 1,
"is_subword": False,
"word": "En",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip
"index": 2,
"is_subword": True,
"word": "##zo",
"start": 2,
"end": 4,
},
{
"scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip
"index": 7,
"word": "UN",
"is_subword": False,
"start": 11,
"end": 13,
},
]
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
[
{"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1},
{"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
{"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
[
{"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
@require_torch
def test_aggregation_strategy(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer)
# Just to understand scores indexes in this test
self.assertEqual(
token_classifier.model.config.id2label,
{0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
)
example = [
{
"scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip
"index": 1,
"is_subword": False,
"word": "En",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip
"index": 2,
"is_subword": True,
"word": "##zo",
"start": 2,
"end": 4,
},
{
"scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip
"index": 7,
"word": "UN",
"is_subword": False,
"start": 11,
"end": 13,
},
]
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)),
[
{"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1},
{"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2},
{"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)),
[
{"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)),
[
{"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
[
{"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4},
{"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13},
],
)
@require_torch
def test_aggregation_strategy_example2(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer)
# Just to understand scores indexes in this test
self.assertEqual(
token_classifier.model.config.id2label,
{0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"},
)
example = [
{
# Necessary for AVERAGE
"scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]),
"is_subword": False,
"index": 1,
"word": "Ra",
"start": 0,
"end": 2,
},
{
"scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]),
"is_subword": True,
"word": "##ma",
"start": 2,
"end": 4,
"index": 2,
},
{
# 4th score will have the higher average
# 4th score is B-PER for this model
# It's does not correspond to any of the subtokens.
"scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]),
"is_subword": True,
"word": "##zotti",
"start": 11,
"end": 13,
"index": 3,
},
]
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.NONE),
[
{"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1},
{"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2},
{"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3},
],
)
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.FIRST),
[{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}],
)
self.assertEqual(
token_classifier.aggregate(example, AggregationStrategy.MAX),
[{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}],
)
self.assertEqual(
nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)),
[{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}],
)
@require_torch
@slow
def test_aggregation_strategy_offsets_with_leading_space(self):
sentence = "We're from New York"
model_name = "brandon25/deberta-base-finetuned-ner"
ner = pipeline("ner", model=model_name, ignore_labels=[], aggregation_strategy="max")
self.assertEqual(
nested_simplify(ner(sentence)),
[
{"entity_group": "O", "score": 1.0, "word": " We're from", "start": 0, "end": 10},
{"entity_group": "LOC", "score": 1.0, "word": " New York", "start": 10, "end": 19},
],
)
@require_torch
def test_gather_pre_entities(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer)
sentence = "Hello there"
tokens = tokenizer(
sentence,
return_attention_mask=False,
return_tensors="pt",
truncation=True,
return_special_tokens_mask=True,
return_offsets_mapping=True,
)
offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
input_ids = tokens["input_ids"].numpy()[0]
# First element in [CLS]
scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]])
pre_entities = token_classifier.gather_pre_entities(
sentence,
input_ids,
scores,
offset_mapping,
special_tokens_mask,
aggregation_strategy=AggregationStrategy.NONE,
)
self.assertEqual(
nested_simplify(pre_entities),
[
{"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1},
{
"word": "there",
"scores": [0.8, 0.1, 0.1],
"index": 2,
"start": 6,
"end": 11,
"is_subword": False,
},
],
)
@require_torch
def test_word_heuristic_leading_space(self):
model_name = "hf-internal-testing/tiny-random-deberta-v2"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer)
sentence = "I play the theremin"
tokens = tokenizer(
sentence,
return_attention_mask=False,
return_tensors="pt",
return_special_tokens_mask=True,
return_offsets_mapping=True,
)
offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0]
special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0]
input_ids = tokens["input_ids"].numpy()[0]
scores = np.array([[1, 0] for _ in input_ids]) # values irrelevant for heuristic
pre_entities = token_classifier.gather_pre_entities(
sentence,
input_ids,
scores,
offset_mapping,
special_tokens_mask,
aggregation_strategy=AggregationStrategy.FIRST,
)
# ensure expected tokenization and correct is_subword values
self.assertEqual(
[(entity["word"], entity["is_subword"]) for entity in pre_entities],
[("▁I", False), ("▁play", False), ("▁the", False), ("▁there", False), ("min", True)],
)
@require_torch
def test_no_offset_tokenizer(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer)
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None},
],
)
@require_torch
def test_small_model_pt(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
token_classifier = pipeline(task="token-classification", model=model_name)
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
)
token_classifier = pipeline(task="token-classification", model=model_name, ignore_labels=["O", "I-MISC"])
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[],
)
token_classifier = pipeline(task="token-classification", model=model_name)
# Overload offset_mapping
outputs = token_classifier(
"This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)]
)
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2},
],
)
# Batch size does not affect outputs (attention_mask are required)
sentences = ["This is a test !", "Another test this is with longer sentence"]
outputs = token_classifier(sentences)
outputs_batched = token_classifier(sentences, batch_size=2)
# Batching does not make a difference in predictions
self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs))
self.assertEqual(
nested_simplify(outputs_batched),
[
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
[],
],
)
@require_torch
def test_small_model_pt_fp16(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
token_classifier = pipeline(task="token-classification", model=model_name, dtype=torch.float16)
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
)
@require_torch
def test_small_model_pt_bf16(self):
model_name = "hf-internal-testing/tiny-bert-for-token-classification"
token_classifier = pipeline(task="token-classification", model=model_name, dtype=torch.bfloat16)
outputs = token_classifier("This is a test !")
self.assertEqual(
nested_simplify(outputs),
[
{"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4},
{"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7},
],
)
@require_torch
def test_pt_ignore_subwords_slow_tokenizer_raises(self):
model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
with self.assertRaises(ValueError):
pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST)
with self.assertRaises(ValueError):
pipeline(
task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE
)
with self.assertRaises(ValueError):
pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX)
@slow
@require_torch
def test_simple(self):
token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True)
sentence = "Hello Sarah Jessica Parker who Jessica lives in New York"
sentence2 = "This is a simple test"
output = token_classifier(sentence)
output_ = nested_simplify(output)
self.assertEqual(
output_,
[
{
"entity_group": "PER",
"score": 0.996,
"word": "Sarah Jessica Parker",
"start": 6,
"end": 26,
},
{"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
{"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
],
)
output = token_classifier([sentence, sentence2])
output_ = nested_simplify(output)
self.assertEqual(
output_,
[
[
{"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26},
{"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38},
{"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56},
],
[],
],
)
| TokenClassificationPipelineTests |
python | conda__conda | conda/gateways/repodata/__init__.py | {
"start": 3256,
"end": 11234
} | class ____(RepoInterface):
"""Provides an interface for retrieving repodata data from channels."""
#: Channel URL
_url: str
#: Filename of the repodata file; defaults to value of conda.base.constants.REPODATA_FN
_repodata_fn: str
def __init__(self, url: str, repodata_fn: str | None, **kwargs) -> None:
log.debug("Using CondaRepoInterface")
self._url = url
self._repodata_fn = repodata_fn or REPODATA_FN
def repodata(self, state: RepodataState) -> str | None:
if not context.ssl_verify:
warnings.simplefilter("ignore", InsecureRequestWarning)
session = get_session(self._url)
headers = {}
etag = state.etag
last_modified = state.mod
if etag:
headers["If-None-Match"] = str(etag)
if last_modified:
headers["If-Modified-Since"] = str(last_modified)
filename = self._repodata_fn
url = join_url(self._url, filename)
with conda_http_errors(self._url, filename):
timeout = (
context.remote_connect_timeout_secs,
context.remote_read_timeout_secs,
)
response: Response = session.get(
url, headers=headers, proxies=session.proxies, timeout=timeout
)
if log.isEnabledFor(logging.DEBUG):
log.debug(stringify(response, content_max_len=256))
response.raise_for_status()
if response.status_code == 304:
# should we save cache-control to state here to put another n
# seconds on the "make a remote request" clock and/or touch cache
# mtime
raise Response304ContentUnchanged()
json_str = response.text
# We no longer add these tags to the large `resp.content` json
saved_fields = {"_url": self._url}
_add_http_value_to_dict(response, "Etag", saved_fields, "_etag")
_add_http_value_to_dict(response, "Last-Modified", saved_fields, "_mod")
_add_http_value_to_dict(
response, "Cache-Control", saved_fields, "_cache_control"
)
state.clear()
state.update(saved_fields)
return json_str
def _add_http_value_to_dict(resp, http_key, d, dict_key):
value = resp.headers.get(http_key)
if value:
d[dict_key] = value
@contextmanager
def conda_http_errors(url, repodata_fn):
"""Use in a with: statement to translate requests exceptions to conda ones."""
try:
yield
except RequestsProxyError:
raise ProxyError() # see #3962
except InvalidSchema as e:
if "SOCKS" in str(e):
message = """\
Requests has identified that your current working environment is configured
to use a SOCKS proxy, but pysocks is not installed. To proceed, remove your
proxy configuration, run `conda install pysocks`, and then you can re-enable
your proxy configuration.
"""
raise CondaDependencyError(message)
else:
raise
except SSLError as e:
# SSLError: either an invalid certificate or OpenSSL is unavailable
try:
import ssl # noqa: F401
except ImportError:
raise CondaSSLError(
f"""\
OpenSSL appears to be unavailable on this machine. OpenSSL is required to
download and install packages.
Exception: {e}
"""
)
else:
raise CondaSSLError(
f"""\
Encountered an SSL error. Most likely a certificate verification issue.
Exception: {e}
"""
)
except (ConnectionError, HTTPError, ChunkedEncodingError) as e:
status_code = getattr(e.response, "status_code", None)
if status_code in (403, 404):
if not url.endswith("/noarch"):
log.info(
"Unable to retrieve repodata (response: %d) for %s",
status_code,
url + "/" + repodata_fn,
)
raise RepodataIsEmpty(
Channel(dirname(url)),
status_code,
response=e.response,
)
else:
if context.allow_non_channel_urls:
stderrlog.warning(
"Unable to retrieve repodata (response: %d) for %s",
status_code,
url + "/" + repodata_fn,
)
raise RepodataIsEmpty(
Channel(dirname(url)),
status_code,
response=e.response,
)
else:
raise UnavailableInvalidChannel(
Channel(dirname(url)),
status_code,
response=e.response,
)
elif status_code == 401:
channel = Channel(url)
if channel.token:
help_message = """\
The token '{}' given for the URL is invalid.
If this token was pulled from anaconda-client, you will need to use
anaconda-client to reauthenticate.
If you supplied this token to conda directly, you will need to adjust your
conda configuration to proceed.
Use `conda config --show` to view your configuration's current state.
Further configuration help can be found at <{}>.
""".format(
channel.token,
join_url(CONDA_HOMEPAGE_URL, "docs/config.html"),
)
elif context.channel_alias.location in url:
# Note, this will not trigger if the binstar configured url does
# not match the conda configured one.
help_message = """\
The remote server has indicated you are using invalid credentials for this channel.
If the remote site is anaconda.org or follows the Anaconda Server API, you
will need to
(a) remove the invalid token from your system with `anaconda logout`, optionally
followed by collecting a new token with `anaconda login`, or
(b) provide conda with a valid token directly.
Further configuration help can be found at <{}>.
""".format(join_url(CONDA_HOMEPAGE_URL, "docs/config.html"))
else:
help_message = """\
The credentials you have provided for this URL are invalid.
You will need to modify your conda configuration to proceed.
Use `conda config --show` to view your configuration's current state.
Further configuration help can be found at <{}>.
""".format(join_url(CONDA_HOMEPAGE_URL, "docs/config.html"))
elif status_code is not None and 500 <= status_code < 600:
help_message = """\
A remote server error occurred when trying to retrieve this URL.
A 500-type error (e.g. 500, 501, 502, 503, etc.) indicates the server failed to
fulfill a valid request. The problem may be spurious, and will resolve itself if you
try your request again. If the problem persists, consider notifying the maintainer
of the remote server.
"""
else:
if url.startswith("https://repo.anaconda.com/"):
help_message = f"""\
An HTTP error occurred when trying to retrieve this URL.
HTTP errors are often intermittent, and a simple retry will get you on your way.
If your current network has https://repo.anaconda.com blocked, please file
a support request with your network engineering team.
{maybe_unquote(repr(url))}
"""
else:
help_message = f"""\
An HTTP error occurred when trying to retrieve this URL.
HTTP errors are often intermittent, and a simple retry will get you on your way.
{maybe_unquote(repr(url))}
"""
raise CondaHTTPError(
help_message,
join_url(url, repodata_fn),
status_code,
getattr(e.response, "reason", None),
getattr(e.response, "elapsed", None),
e.response,
caused_by=e,
)
| CondaRepoInterface |
python | kamyu104__LeetCode-Solutions | Python/allocate-mailboxes.py | {
"start": 35,
"end": 725
} | class ____(object):
def minDistance(self, houses, k):
"""
:type houses: List[int]
:type k: int
:rtype: int
"""
def cost(prefix, i, j):
return (prefix[j+1]-prefix[(i+j+1)//2])-(prefix[(i+j)//2+1]-prefix[i])
houses.sort()
prefix = [0]*(len(houses)+1)
for i, h in enumerate(houses):
prefix[i+1] = prefix[i]+h
dp = [cost(prefix, 0, j) for j in xrange(len(houses))]
for m in xrange(1, k):
for j in reversed(xrange(m, len(houses))):
for i in xrange(m, j+1):
dp[j] = min(dp[j], dp[i-1]+cost(prefix, i, j))
return dp[-1]
| Solution |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 18191,
"end": 18248
} | class ____(BaseInheritTracking3):
pass
| InheritTracking3 |
python | mlflow__mlflow | tests/pyfunc/test_scoring_server.py | {
"start": 3745,
"end": 40157
} | class ____(PythonModel):
# Example model that takes "input" as model input
def predict(self, context, model_input, params=None):
if isinstance(model_input, pd.DataFrame):
model_input = model_input.to_dict(orient="records")[0]
return {
"data": [
{
"index": 0,
"embedding": [0.1, 0.2, 0.3],
}
],
# Echo model input and params for testing purposes
"model_input": model_input,
"params": params,
}
@pytest.fixture
def pandas_df_with_all_types():
pdf = pd.DataFrame(
{
"boolean": [True, False, True],
"integer": np.array([1, 2, 3], np.int32),
"long": np.array([1, 2, 3], np.int64),
"float": np.array([math.pi, 2 * math.pi, 3 * math.pi], np.float32),
"double": [math.pi, 2 * math.pi, 3 * math.pi],
"binary": [bytearray([1, 2, 3]), bytearray([4, 5, 6]), bytearray([7, 8, 9])],
"datetime": [
np.datetime64("2021-01-01 00:00:00"),
np.datetime64("2021-02-02 00:00:00"),
np.datetime64("2021-03-03 12:00:00"),
],
}
)
pdf["string"] = pd.Series(["a", "b", "c"], dtype=DataType.string.to_pandas())
return pdf
@pytest.fixture
def pandas_df_with_csv_types():
pdf = pd.DataFrame(
{
"boolean": [True, False, True],
"integer": np.array([1, 2, 3], np.int32),
"long": np.array([1, 2, 3], np.int64),
"float": np.array([math.pi, 2 * math.pi, 3 * math.pi], np.float32),
"double": [math.pi, 2 * math.pi, 3 * math.pi],
}
)
pdf["string"] = pd.Series(["a", "b", "c"], dtype=DataType.string.to_pandas())
return pdf
@pytest.fixture(scope="module")
def sklearn_model():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
knn_model = knn.KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
@pytest.fixture(scope="module")
def keras_model():
iris = datasets.load_iris()
data = pd.DataFrame(
data=np.c_[iris["data"], iris["target"]], columns=iris["feature_names"] + ["target"]
)
y = data["target"]
X = data.drop("target", axis=1).values
input_a = Input(shape=(2,), name="a")
input_b = Input(shape=(2,), name="b")
output = Dense(1)(Dense(3, input_dim=4)(Concatenate()([input_a, input_b])))
model = Model(inputs=[input_a, input_b], outputs=output)
model.compile(loss="mean_squared_error", optimizer=SGD())
model.fit([X[:, :2], X[:, -2:]], y)
return ModelWithData(model=model, inference_data=X)
@pytest.fixture
def model_path(tmp_path):
return os.path.join(tmp_path, "model")
def test_scoring_server_responds_to_malformed_json_input_with_error_code_and_message(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
malformed_json_content = "this is,,,, not valid json"
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=malformed_json_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
message = response_json.get("message")
expected_message = "Invalid input. Ensure that input is a valid JSON formatted string."
assert expected_message in message
def test_scoring_server_responds_to_invalid_json_format_with_error_code_and_message(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
for not_a_dict_content in [1, "1", [1]]:
incorrect_json_content = json.dumps(not_a_dict_content)
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=incorrect_json_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
assert "message" in response_json
message = response_json.get("message")
assert "The input must be a JSON dictionary with exactly one of the input fields" in message
for incorrect_format in [
{"not": "a serialized dataframe"},
{"dataframe_records": [], "dataframe_split": {"data": []}},
]:
incorrect_json_content = json.dumps(incorrect_format)
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=incorrect_json_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
message = response_json.get("message")
assert "The input must be a JSON dictionary with exactly one of the input fields" in message
def test_scoring_server_responds_to_invalid_pandas_input_format_with_stacktrace_and_error_code(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pdf = pd.DataFrame(sklearn_model.inference_data)
wrong_records_content = json.dumps({"dataframe_records": pdf.to_dict(orient="split")})
wrong_split_content = json.dumps({"dataframe_split": pdf.to_dict(orient="records")})
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=wrong_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
message = response_json.get("message")
assert "Dataframe split format must be a dictionary. Got list" in message
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=wrong_records_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
message = response_json.get("message")
assert "Dataframe records format must be a list of records. Got dictionary." in message
def test_scoring_server_responds_to_invalid_dataframe_with_stacktrace_and_error_code(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
invalid_dataframe_content = json.dumps(
{"dataframe_split": {"index": [1, 2], "data": [[1], [2], [3]]}}
)
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=invalid_dataframe_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert response_json.get("error_code") == ErrorCode.Name(BAD_REQUEST)
message = response_json.get("message")
assert "Provided dataframe_split field is not a valid dataframe representation" in message
def test_scoring_server_responds_to_incompatible_inference_dataframe_with_stacktrace_and_error_code(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
incompatible_df = pd.DataFrame(np.array(range(10)))
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=incompatible_df,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(BAD_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
def test_scoring_server_responds_to_invalid_csv_input_with_stacktrace_and_error_code(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
# Any empty string is not valid pandas CSV
incorrect_csv_content = ""
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=incorrect_csv_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_CSV,
)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(BAD_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_records_orientation(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_record_content = json.dumps(
{"dataframe_records": pd.DataFrame(sklearn_model.inference_data).to_dict(orient="records")}
)
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_record_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response_records_content_type, 200)
# Testing the charset parameter
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_record_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; charset=UTF-8",
)
expect_status_code(response_records_content_type, 200)
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_split_orientation(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_split_content = json.dumps(
{"dataframe_split": pd.DataFrame(sklearn_model.inference_data).to_dict(orient="split")}
)
# Testing the charset parameter
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; charset=UTF-8",
)
expect_status_code(response, 200)
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response, 200)
def test_scoring_server_responds_to_invalid_content_type_request_with_unsupported_content_type_code(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split")
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_split_content,
content_type="not_a_supported_content_type",
)
expect_status_code(response, 415)
def test_scoring_server_responds_to_invalid_content_type_request_with_unrecognized_content_param(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split")
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; something=something",
)
expect_status_code(response, 415)
def test_scoring_server_successfully_evaluates_correct_tf_serving_sklearn(
sklearn_model, model_path
):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
inp_dict = {"instances": sklearn_model.inference_data.tolist()}
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(inp_dict),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response_records_content_type, 200)
def test_scoring_server_successfully_evaluates_correct_tf_serving_keras_instances(
keras_model, model_path
):
mlflow.tensorflow.save_model(keras_model.model, path=model_path)
inp_dict = {
"instances": [
{"a": a.tolist(), "b": b.tolist()}
for (a, b) in zip(keras_model.inference_data[:, :2], keras_model.inference_data[:, -2:])
]
}
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(inp_dict),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response_records_content_type, 200)
def test_scoring_server_successfully_evaluates_correct_tf_serving_keras_inputs(
keras_model, model_path
):
mlflow.tensorflow.save_model(keras_model.model, path=model_path)
inp_dict = {
"inputs": {
"a": keras_model.inference_data[:, :2].tolist(),
"b": keras_model.inference_data[:, -2:].tolist(),
}
}
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(inp_dict),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response_records_content_type, 200)
def test_parse_json_input_records_oriented():
size = 2
data = {
"col_m": [random_int(0, 1000) for _ in range(size)],
"col_z": [random_str() for _ in range(size)],
"col_a": [random_int() for _ in range(size)],
}
p1 = pd.DataFrame.from_dict(data)
records_content = json.dumps({"dataframe_records": p1.to_dict(orient="records")})
records_content, _ = pyfunc_scoring_server._split_data_and_params(records_content)
p2 = pyfunc_scoring_server.infer_and_parse_data(records_content)
# "records" orient may shuffle column ordering. Hence comparing each column Series
for col in data:
assert all(p1[col] == p2[col])
def test_parse_json_input_split_oriented():
size = 200
data = {
"col_m": [random_int(0, 1000) for _ in range(size)],
"col_z": [random_str() for _ in range(size)],
"col_a": [random_int() for _ in range(size)],
}
p1 = pd.DataFrame.from_dict(data)
split_content = json.dumps({"dataframe_split": p1.to_dict(orient="split")})
split_content, _ = pyfunc_scoring_server._split_data_and_params(split_content)
p2 = pyfunc_scoring_server.infer_and_parse_data(split_content)
assert all(p1 == p2)
def test_records_oriented_json_to_df():
# test that datatype for "zip" column is not converted to "int64"
jstr = """
{
"dataframe_records": [
{"zip":"95120","cost":10.45,"score":8},
{"zip":"95128","cost":23.0,"score":0},
{"zip":"95128","cost":12.1,"score":10}
]
}
"""
jstr, _ = pyfunc_scoring_server._split_data_and_params(jstr)
df = pyfunc_scoring_server.infer_and_parse_data(jstr)
assert set(df.columns) == {"zip", "cost", "score"}
assert {str(dt) for dt in df.dtypes} == {"object", "float64", "int64"}
def _shuffle_pdf(pdf):
cols = list(pdf.columns)
random.shuffle(cols)
return pdf[cols]
def test_split_oriented_json_to_df():
# test that datatype for "zip" column is not converted to "int64"
jstr = """
{
"dataframe_split": {
"columns":["zip","cost","count"],
"index":[0,1,2],
"data":[["95120",10.45,-8],["95128",23.0,-1],["95128",12.1,1000]]
}
}
"""
jstr, _ = pyfunc_scoring_server._split_data_and_params(jstr)
df = pyfunc_scoring_server.infer_and_parse_data(jstr)
assert set(df.columns) == {"zip", "cost", "count"}
assert {str(dt) for dt in df.dtypes} == {"object", "float64", "int64"}
def test_parse_with_schema_csv(pandas_df_with_csv_types):
schema = Schema([ColSpec(c, c) for c in pandas_df_with_csv_types.columns])
df = _shuffle_pdf(pandas_df_with_csv_types)
csv_str = df.to_csv(index=False)
df = pyfunc_scoring_server.parse_csv_input(StringIO(csv_str), schema=schema)
assert schema == infer_signature(df[schema.input_names()]).inputs
def test_parse_with_schema(pandas_df_with_all_types):
schema = Schema([ColSpec(c, c) for c in pandas_df_with_all_types.columns])
df = _shuffle_pdf(pandas_df_with_all_types)
json_str = json.dumps({"dataframe_split": df.to_dict(orient="split")}, cls=NumpyEncoder)
json_str, _ = pyfunc_scoring_server._split_data_and_params(json_str)
df = pyfunc_scoring_server.infer_and_parse_data(json_str, schema=schema)
json_str = json.dumps({"dataframe_records": df.to_dict(orient="records")}, cls=NumpyEncoder)
json_str, _ = pyfunc_scoring_server._split_data_and_params(json_str)
df = pyfunc_scoring_server.infer_and_parse_data(json_str, schema=schema)
assert schema == infer_signature(df[schema.input_names()]).inputs
# The current behavior with pandas json parse with type hints is weird. In some cases, the
# types are forced ignoring overflow and loss of precision:
bad_df = """
{
"dataframe_split": {
"columns":["bad_integer", "bad_float", "bad_string", "bad_boolean"],
"data":[
[9007199254740991.0, 1.1, 1, 1.5],
[9007199254740992.0, 9007199254740992.0, 2, 0],
[9007199254740994.0, 3.3, 3, "some arbitrary string"]
]
}
}
"""
schema = Schema(
[
ColSpec("integer", "bad_integer"),
ColSpec("float", "bad_float"),
ColSpec("string", "bad_string"),
ColSpec("boolean", "bad_boolean"),
]
)
bad_df, _ = pyfunc_scoring_server._split_data_and_params(bad_df)
df = pyfunc_scoring_server.infer_and_parse_data(bad_df, schema=schema)
# Unfortunately, the current behavior of pandas parse is to force numbers to int32 even if
# they don't fit:
assert df["bad_integer"].dtype == np.int32
assert all(df["bad_integer"] == [-2147483648, -2147483648, -2147483648])
# The same goes for floats:
assert df["bad_float"].dtype == np.float32
assert all(df["bad_float"] == np.array([1.1, 9007199254740992, 3.3], dtype=np.float32))
# However bad string is recognized as int64:
assert all(df["bad_string"] == np.array([1, 2, 3], dtype=object))
# Boolean is forced - zero and empty string is false, everything else is true:
assert df["bad_boolean"].dtype == bool
assert all(df["bad_boolean"] == [True, False, True])
def test_serving_model_with_schema(pandas_df_with_all_types):
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return [[k, str(v)] for k, v in model_input.dtypes.items()]
schema = Schema([ColSpec(c, c) for c in pandas_df_with_all_types.columns])
df = _shuffle_pdf(pandas_df_with_all_types)
with TempDir(chdr=True):
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=TestModel(), signature=ModelSignature(schema)
)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=json.dumps({"dataframe_split": df.to_dict(orient="split")}, cls=NumpyEncoder),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
response_json = json.loads(response.content)["predictions"]
# objects are not converted to pandas Strings at the moment
expected_types = {**pandas_df_with_all_types.dtypes, "string": np.dtype(object)}
assert response_json == [[k, str(v)] for k, v in expected_types.items()]
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=json.dumps(
{"dataframe_records": pandas_df_with_all_types.to_dict(orient="records")},
cls=NumpyEncoder,
),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
response_json = json.loads(response.content)["predictions"]
assert response_json == [[k, str(v)] for k, v in expected_types.items()]
# Test 'inputs' format
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=json.dumps({"inputs": df.to_dict(orient="list")}, cls=NumpyEncoder),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
response_json = json.loads(response.content)["predictions"]
assert response_json == [[k, str(v)] for k, v in expected_types.items()]
def test_serving_model_with_param_schema(sklearn_model, model_path):
dataframe = {
"dataframe_split": pd.DataFrame(sklearn_model.inference_data).to_dict(orient="split")
}
signature = infer_signature(sklearn_model.inference_data)
param_schema = ParamSchema(
[ParamSpec("param1", DataType.datetime, np.datetime64("2023-07-01"))]
)
signature.params = param_schema
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path, signature=signature)
# Success if passing no parameters
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(dataframe),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; charset=UTF-8",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
# Raise error if invalid value is passed
payload = dataframe.copy()
payload.update({"params": {"param1": "invalid_value1"}})
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; charset=UTF-8",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 400)
assert (
" Failed to convert value `invalid_value1` from type `<class 'str'>` "
"to `DataType.datetime`" in json.loads(response.content.decode("utf-8"))["message"]
)
# Ignore parameters specified in payload if it is not defined in ParamSchema
payload = dataframe.copy()
payload.update({"params": {"invalid_param": "value"}})
response = pyfunc_serve_and_score_model(
model_uri=os.path.abspath(model_path),
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON + "; charset=UTF-8",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
def test_get_jsonnable_obj():
py_ary = [["a", "b", "c"], ["e", "f", "g"]]
np_ary = _get_jsonable_obj(np.array(py_ary))
assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder)
np_ary = _get_jsonable_obj(np.array(py_ary, dtype=type(str)))
assert json.dumps(py_ary, cls=NumpyEncoder) == json.dumps(np_ary, cls=NumpyEncoder)
def test_numpy_encoder_for_pydantic():
class Message(pydantic.BaseModel):
role: str
content: str
class Messages(pydantic.BaseModel):
messages: list[Message]
messages = Messages(
messages=[Message(role="user", content="hello!"), Message(role="assistant", content="hi!")]
)
msg_dict = messages.model_dump()
assert json.dumps(_get_jsonable_obj(messages), cls=NumpyEncoder) == json.dumps(
msg_dict, cls=NumpyEncoder
)
def test_parse_json_input_including_path():
class TestModel(PythonModel):
def predict(self, context, model_input, params=None):
return 1
with mlflow.start_run() as run:
mlflow.pyfunc.log_model(name="model", python_model=TestModel())
pandas_split_content = pd.DataFrame(
{
"url": ["http://foo.com", "https://bar.com"],
"bad_protocol": ["aaa://bbb", "address:/path"],
}
)
response_records_content_type = pyfunc_serve_and_score_model(
model_uri=f"runs:/{run.info.run_id}/model",
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
expect_status_code(response_records_content_type, 200)
@pytest.mark.parametrize(
("args", "expected", "timeout"),
[
(
{"port": 5000, "host": "0.0.0.0", "nworkers": 4, "timeout": 60},
"--host 0.0.0.0 --port 5000 --workers 4",
"60",
),
(
{"host": "0.0.0.0", "nworkers": 4, "timeout": 60},
"--host 0.0.0.0 --workers 4",
"60",
),
(
{"port": 5000, "nworkers": 4, "timeout": 60},
"--port 5000 --workers 4",
"60",
),
({"nworkers": 4, "timeout": 60}, "--workers 4", "60"),
({"timeout": 30}, "", "30"),
],
)
def test_get_cmd(args: dict[str, Any], expected: str, timeout: str):
cmd, env = get_cmd(model_uri="foo", **args)
assert cmd == (f"uvicorn {expected} mlflow.pyfunc.scoring_server.app:app")
assert env[MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT.name] == timeout
def test_scoring_server_client(sklearn_model, model_path):
from mlflow.models.flavor_backend_registry import get_flavor_backend
from mlflow.pyfunc.scoring_server.client import ScoringServerClient
from mlflow.utils import find_free_port
mlflow.sklearn.save_model(
sk_model=sklearn_model.model, path=model_path, metadata={"metadata_key": "value"}
)
expected_result = sklearn_model.model.predict(sklearn_model.inference_data)
port = find_free_port()
timeout = 60
server_proc = None
try:
server_proc = get_flavor_backend(
model_path, env_manager=_EnvManager.CONDA, workers=1, install_mlflow=False
).serve(
model_uri=model_path,
port=port,
host="127.0.0.1",
timeout=timeout,
enable_mlserver=False,
synchronous=False,
)
client = ScoringServerClient(host="127.0.0.1", port=port)
client.wait_server_ready()
data = pd.DataFrame(sklearn_model.inference_data)
result = client.invoke(data).get_predictions().to_numpy()[:, 0]
np.testing.assert_allclose(result, expected_result, rtol=1e-5)
version = client.get_version()
assert version == VERSION
finally:
if server_proc is not None:
os.kill(server_proc.pid, signal.SIGTERM)
_LLM_CHAT_INPUT_SCHEMA = Schema(
[
ColSpec(
Array(
Object(
[
Property("role", DataType.string),
Property("content", DataType.string),
]
),
),
name="messages",
)
]
)
@pytest.mark.parametrize(
("signature", "expected_model_input", "expected_params"),
[
# Test case: no signature, everything should go to data
(
None,
{
"messages": [{"role": "user", "content": "hello!"}],
"max_tokens": 20,
"temperature": 0.5,
},
{},
),
# Test case: signature with params, split params and data
(
ModelSignature(
inputs=_LLM_CHAT_INPUT_SCHEMA,
params=ParamSchema(
[
ParamSpec("temperature", DataType.double, default=0.5),
ParamSpec("max_tokens", DataType.integer, default=20),
ParamSpec("top_p", DataType.double, default=0.9),
]
),
),
{
"messages": [{"role": "user", "content": "hello!"}],
},
{
"temperature": 0.5,
"max_tokens": 20,
"top_p": 0.9, # filled with the default value
},
),
# Test case: if some params are not defined in either input and params schema,
# they will be dropped
(
ModelSignature(
inputs=_LLM_CHAT_INPUT_SCHEMA,
params=ParamSchema(
[
ParamSpec("temperature", DataType.double, default=0.5),
]
),
),
{
"messages": [{"role": "user", "content": "hello!"}],
},
{
# only params defined in the schema are passed
"temperature": 0.5,
},
),
# Test case: params can be defined in the input schema
(
ModelSignature(
inputs=Schema(
[
*_LLM_CHAT_INPUT_SCHEMA.inputs,
ColSpec(DataType.long, "max_tokens", required=False),
ColSpec(DataType.double, "temperature", required=False),
]
),
),
{
"messages": [{"role": "user", "content": "hello!"}],
"temperature": 0.5,
"max_tokens": 20,
},
{},
),
],
)
def test_scoring_server_allows_payloads_with_llm_chat_keys_for_pyfunc(
model_path, signature, expected_model_input, expected_params
):
mlflow.pyfunc.save_model(model_path, python_model=MyChatLLM(), signature=signature)
payload = json.dumps(
{
"messages": [{"role": "user", "content": "hello!"}],
"temperature": 0.5,
"max_tokens": 20,
}
)
response = pyfunc_serve_and_score_model(
model_uri=model_path,
data=payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
assert json.loads(response.content)["choices"][0]["message"]["content"] == "hello!"
assert json.loads(response.content)["model_input"] == expected_model_input
assert json.loads(response.content)["params"] == expected_params
_LLM_COMPLETIONS_INPUT_SCHEMA = Schema(
[
ColSpec(
DataType.string,
name="prompt",
)
]
)
@pytest.mark.parametrize(
("signature", "expected_model_input", "expected_params"),
[
# Test case: no signature, everything should go to data
(
None,
{
"prompt": "hello!",
"max_tokens": 20,
"temperature": 0.5,
},
{},
),
# Test case: signature with params, split params and data
(
ModelSignature(
inputs=_LLM_COMPLETIONS_INPUT_SCHEMA,
params=ParamSchema(
[
ParamSpec("temperature", DataType.double, default=0.5),
ParamSpec("max_tokens", DataType.integer, default=20),
ParamSpec("top_p", DataType.double, default=0.9),
]
),
),
{
"prompt": "hello!",
},
{
"temperature": 0.5,
"max_tokens": 20,
"top_p": 0.9, # filled with the default value
},
),
],
)
def test_scoring_server_allows_payloads_with_llm_completions_keys_for_pyfunc(
model_path, signature, expected_model_input, expected_params
):
mlflow.pyfunc.save_model(model_path, python_model=MyCompletionsLLM(), signature=signature)
payload = json.dumps(
{
"prompt": "hello!",
"temperature": 0.5,
"max_tokens": 20,
}
)
response = pyfunc_serve_and_score_model(
model_uri=model_path,
data=payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
assert json.loads(response.content)["choices"][0]["text"] == "hello!"
assert json.loads(response.content)["model_input"] == expected_model_input
assert json.loads(response.content)["params"] == expected_params
_LLM_EMBEDDINGS_INPUT_SCHEMA = Schema(
[
ColSpec(
DataType.string,
name="input",
)
]
)
@pytest.mark.parametrize(
("signature", "expected_model_input", "expected_params"),
[
# Test case: no signature, everything should go to data
(
None,
{
"input": "hello!",
"random": "test",
},
{},
),
# Test case: signature with no params accepted, ignores params
(
ModelSignature(
inputs=_LLM_EMBEDDINGS_INPUT_SCHEMA,
),
{
"input": "hello!",
},
{},
),
],
)
def test_scoring_server_allows_payloads_with_llm_embeddings_keys_for_pyfunc(
model_path, signature, expected_model_input, expected_params
):
mlflow.pyfunc.save_model(model_path, python_model=MyEmbeddingsLLM(), signature=signature)
payload = json.dumps(
{
"input": "hello!",
"random": "test",
}
)
response = pyfunc_serve_and_score_model(
model_uri=model_path,
data=payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
assert json.loads(response.content)["data"][0]["embedding"] == [0.1, 0.2, 0.3]
assert json.loads(response.content)["model_input"] == expected_model_input
assert json.loads(response.content)["params"] == expected_params
def test_scoring_server_allows_payloads_with_messages_for_pyfunc_wrapped(model_path):
sklearn_path = model_path + "-sklearn"
build_and_save_sklearn_model(sklearn_path)
# wrapped pyfuncs count as pyfuncs (sklearn is not present in model.metadata.flavors)
class WrappedSklearn(PythonModel):
def load_context(self, context):
self.model = mlflow.pyfunc.load_model(context.artifacts["model_path"])
# note: model_input is the value of "messages", not a dict
def predict(self, context, model_input):
weird_but_valid_parse = [json.loads(model_input["messages"][0]["content"])]
return self.model.predict(weird_but_valid_parse)
mlflow.pyfunc.save_model(
model_path, python_model=WrappedSklearn(), artifacts={"model_path": sklearn_path}
)
payload = json.dumps(
{
"messages": [{"role": "user", "content": "[2,2,2,2]"}],
"max_tokens": 20,
}
)
response = pyfunc_serve_and_score_model(
model_uri=model_path,
data=payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
@pytest.mark.parametrize(
("dict_input", "param_schema", "expected"),
[
(
# no param signature, everything should go
# to data no params should get dropped
{"messages": ["test"], "max_tokens": 20, "random": "test"},
None,
({"messages": ["test"], "max_tokens": 20, "random": "test"}, {}),
),
(
# params defined in the param schema should go to params
# rest should go to data
{"messages": ["test"], "max_tokens": 20, "random": "test"},
ParamSchema(
[
ParamSpec("max_tokens", DataType.integer, default=20),
]
),
({"messages": ["test"], "random": "test"}, {"max_tokens": 20}),
),
],
)
def test_split_data_and_params_for_llm_input(dict_input, param_schema, expected):
data, params = pyfunc_scoring_server._split_data_and_params_for_llm_input(
dict_input, param_schema
)
expected_data, expected_params = expected
assert data == expected_data
assert params == expected_params
| MyEmbeddingsLLM |
python | openai__openai-python | src/openai/types/upload_complete_params.py | {
"start": 248,
"end": 527
} | class ____(TypedDict, total=False):
part_ids: Required[SequenceNotStr[str]]
"""The ordered list of Part IDs."""
md5: str
"""
The optional md5 checksum for the file contents to verify if the bytes uploaded
matches what you expect.
"""
| UploadCompleteParams |
python | aimacode__aima-python | search.py | {
"start": 34951,
"end": 42212
} | class ____:
"""A graph connects nodes (vertices) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, graph_dict=None, directed=True):
self.graph_dict = graph_dict or {}
self.directed = directed
if not directed:
self.make_undirected()
def make_undirected(self):
"""Make a digraph into an undirected graph by adding symmetric edges."""
for a in list(self.graph_dict.keys()):
for (b, dist) in self.graph_dict[a].items():
self.connect1(b, a, dist)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed:
self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"""Add a link from A to B of given distance, in one direction only."""
self.graph_dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.graph_dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"""Return a list of nodes in the graph."""
s1 = set([k for k in self.graph_dict.keys()])
s2 = set([k2 for v in self.graph_dict.values() for k2, v2 in v.items()])
nodes = s1.union(s2)
return list(nodes)
def UndirectedGraph(graph_dict=None):
"""Build a Graph where every edge (including future ones) goes both ways."""
return Graph(graph_dict=graph_dict, directed=False)
def RandomGraph(nodes=list(range(10)), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
# Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
# Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n):
return np.inf
return distance(g.locations[n], here)
neighbor = min(nodes, key=distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
""" [Figure 3.2]
Simplified road map of Romania
"""
romania_map = UndirectedGraph(dict(
Arad=dict(Zerind=75, Sibiu=140, Timisoara=118),
Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211),
Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138),
Drobeta=dict(Mehadia=75),
Eforie=dict(Hirsova=86),
Fagaras=dict(Sibiu=99),
Hirsova=dict(Urziceni=98),
Iasi=dict(Vaslui=92, Neamt=87),
Lugoj=dict(Timisoara=111, Mehadia=70),
Oradea=dict(Zerind=71, Sibiu=151),
Pitesti=dict(Rimnicu=97),
Rimnicu=dict(Sibiu=80),
Urziceni=dict(Vaslui=142)))
romania_map.locations = dict(
Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288),
Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449),
Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506),
Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537),
Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410),
Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350),
Vaslui=(509, 444), Zerind=(108, 531))
""" [Figure 4.9]
Eight possible states of the vacumm world
Each state is represented as
* "State of the left room" "State of the right room" "Room in which the agent
is present"
1 - DDL Dirty Dirty Left
2 - DDR Dirty Dirty Right
3 - DCL Dirty Clean Left
4 - DCR Dirty Clean Right
5 - CDL Clean Dirty Left
6 - CDR Clean Dirty Right
7 - CCL Clean Clean Left
8 - CCR Clean Clean Right
"""
vacuum_world = Graph(dict(
State_1=dict(Suck=['State_7', 'State_5'], Right=['State_2']),
State_2=dict(Suck=['State_8', 'State_4'], Left=['State_2']),
State_3=dict(Suck=['State_7'], Right=['State_4']),
State_4=dict(Suck=['State_4', 'State_2'], Left=['State_3']),
State_5=dict(Suck=['State_5', 'State_1'], Right=['State_6']),
State_6=dict(Suck=['State_8'], Left=['State_5']),
State_7=dict(Suck=['State_7', 'State_3'], Right=['State_8']),
State_8=dict(Suck=['State_8', 'State_6'], Left=['State_7'])
))
""" [Figure 4.23]
One-dimensional state space Graph
"""
one_dim_state_space = Graph(dict(
State_1=dict(Right='State_2'),
State_2=dict(Right='State_3', Left='State_1'),
State_3=dict(Right='State_4', Left='State_2'),
State_4=dict(Right='State_5', Left='State_3'),
State_5=dict(Right='State_6', Left='State_4'),
State_6=dict(Left='State_5')
))
one_dim_state_space.least_costs = dict(
State_1=8,
State_2=9,
State_3=2,
State_4=2,
State_5=4,
State_6=3)
""" [Figure 6.1]
Principal states and territories of Australia
"""
australia_map = UndirectedGraph(dict(
T=dict(),
SA=dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=dict(WA=1, Q=1),
NSW=dict(Q=1, V=1)))
australia_map.locations = dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42),
V=(145, 37))
| Graph |
python | ansible__ansible | test/units/module_utils/facts/test_facts.py | {
"start": 4752,
"end": 4945
} | class ____(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = network.generic_bsd.GenericBsdIfconfigNetwork
collector_class = None
| TestGenericBsdIfconfigNetwork |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/basic_auth.py | {
"start": 132,
"end": 1418
} | class ____(AirflowAuthBackend):
"""A :py:class:`dagster_airlift.core.AirflowAuthBackend` that authenticates using basic auth.
Args:
webserver_url (str): The URL of the webserver.
username (str): The username to authenticate with.
password (str): The password to authenticate with.
Examples:
Creating a :py:class:`AirflowInstance` using this backend.
.. code-block:: python
from dagster_airlift.core import AirflowInstance, AirflowBasicAuthBackend
af_instance = AirflowInstance(
name="my-instance",
auth_backend=AirflowBasicAuthBackend(
webserver_url="https://my-webserver-hostname",
username="my-username",
password="my-password"
)
)
"""
def __init__(self, webserver_url: str, username: str, password: str):
self._webserver_url = webserver_url
self.username = username
self.password = password
def get_session(self) -> requests.Session:
session = requests.Session()
session.auth = (self.username, self.password)
return session
def get_webserver_url(self) -> str:
return self._webserver_url
| AirflowBasicAuthBackend |
python | pennersr__django-allauth | allauth/mfa/webauthn/internal/auth.py | {
"start": 5767,
"end": 6837
} | class ____:
def __init__(self, instance):
self.instance = instance
@classmethod
def add(cls, user, name: str, credential: dict) -> "WebAuthn":
instance = Authenticator(
user=user,
type=Authenticator.Type.WEBAUTHN,
data={
"name": name,
"credential": credential,
},
)
instance.save()
return cls(instance)
@property
def name(self) -> str:
return self.instance.data["name"]
@name.setter
def name(self, name: str):
self.instance.data["name"] = name
@property
def authenticator_data(self) -> AuthenticatorData:
return parse_registration_response(
self.instance.data["credential"]
).response.attestation_object.auth_data
@property
def is_passwordless(self) -> Optional[bool]:
return (
self.instance.data.get("credential", {})
.get("clientExtensionResults", {})
.get("credProps", {})
.get("rk")
)
| WebAuthn |
python | facelessuser__soupsieve | tests/test_level4/test_host_context.py | {
"start": 57,
"end": 426
} | class ____(util.TestCase):
"""Test host context selectors."""
def test_host_context(self):
"""Test host context (not supported)."""
markup = """<h1>header</h1><div><p>some text</p></div>"""
self.assert_selector(
markup,
":host-context(h1, h2)",
[],
flags=util.HTML
)
| TestHostContext |
python | viewflow__viewflow | viewflow/workflow/migrations/0005_rename_flowcls.py | {
"start": 108,
"end": 397
} | class ____(migrations.Migration):
dependencies = [
("viewflow", "0004_extend_fields_length"),
]
operations = [
migrations.RenameField(
model_name="process",
old_name="flow_cls",
new_name="flow_class",
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/training/optimizer.py | {
"start": 5727,
"end": 7651
} | class ____(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, indexed_slices.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
if context.xla_sharding_for_resource_variables_enabled():
# For each slot variable that is annotated with an XLA sharding, we read
# the variable and assign the value to itself. This is done to trigger the
# creation of an XlaShardingOp when a ReadVariableOp is created upon the
# call to `slot_var.read_value()`. This is needed to ensure that slot
# variables with XLA sharding are sharded correctly. Please see
# b/307541427 for more details.
assign_ops = []
for variable_dict in optimizer._slots.values():
for slot_var in variable_dict.values():
if (
isinstance(slot_var, resource_variable_ops.BaseResourceVariable)
and slot_var._get_xla_sharding() is not None
):
assign_ops.append(slot_var.assign(slot_var.read_value()))
# The assign_ops created above are added as a control dependency for the
# update op to make sure these appear before the update_op.
with ops.control_dependencies(assign_ops):
update_op = optimizer._resource_apply_dense(g, self._v)
else:
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
| _DenseResourceVariableProcessor |
python | encode__django-rest-framework | tests/test_validation.py | {
"start": 616,
"end": 803
} | class ____(generics.RetrieveUpdateDestroyAPIView):
queryset = ValidationModel.objects.all()
serializer_class = ValidationModelSerializer
# Regression for #653
| UpdateValidationModel |
python | django-haystack__django-haystack | test_haystack/mocks.py | {
"start": 4666,
"end": 5266
} | class ____(BaseSearchQuery):
def build_query(self):
return ""
def clean(self, query_fragment):
return query_fragment
# def run_mlt(self):
# # To simulate the chunking behavior of a regular search, return a slice
# # of our results using start/end offset.
# final_query = self.build_query()
# results = self.backend.more_like_this(self._mlt_instance, final_query)
# import pdb; pdb.set_trace()
# self._results = results['results'][self.start_offset:self.end_offset]
# self._hit_count = results['hits']
| MockSearchQuery |
python | kamyu104__LeetCode-Solutions | Python/range-sum-query-mutable.py | {
"start": 89,
"end": 1453
} | class ____(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
if not nums:
return
self.__nums = nums
self.__bit = [0] * (len(self.__nums) + 1)
for i in xrange(1, len(self.__bit)):
self.__bit[i] = nums[i-1] + self.__bit[i-1]
for i in reversed(xrange(1, len(self.__bit))):
last_i = i - (i & -i)
self.__bit[i] -= self.__bit[last_i]
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
if val - self.__nums[i]:
self.__add(i, val - self.__nums[i])
self.__nums[i] = val
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.__sum(j) - self.__sum(i-1)
def __sum(self, i):
i += 1
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
def __add(self, i, val):
i += 1
while i <= len(self.__nums):
self.__bit[i] += val
i += (i & -i)
# Time: ctor: O(n),
# update: O(logn),
# query: O(logn)
# Space: O(n)
# Segment Tree solution.
| NumArray |
python | run-llama__llama_index | llama-index-core/llama_index/core/llama_dataset/evaluator_evaluation.py | {
"start": 500,
"end": 1493
} | class ____(BaseLlamaExamplePrediction):
"""
Evaluation example prediction class.
Args:
feedback (Optional[str]): The evaluator's feedback.
score (Optional[float]): The evaluator's score.
"""
feedback: str = Field(
default_factory=str,
description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.",
)
score: Optional[float] = Field(
default=None,
description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.",
)
invalid_prediction: bool = Field(
default=False, description="Whether or not the prediction is a valid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason as to why prediction is invalid."
)
@property
def class_name(self) -> str:
"""Data example class name."""
return "EvaluatorExamplePrediction"
| EvaluatorExamplePrediction |
python | scrapy__scrapy | tests/test_spidermiddleware_httperror.py | {
"start": 3212,
"end": 4654
} | class ____:
"""Similar test, but with settings"""
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider, {"HTTPERROR_ALLOWED_CODES": (402,)})
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self,
mw: HttpErrorMiddleware,
res200: Response,
res402: Response,
res404: Response,
) -> None:
mw.process_spider_input(res200)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
mw.process_spider_input(res402)
def test_meta_overrides_settings(self, mw: HttpErrorMiddleware) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res404 = _response(request, 404)
res402 = _response(request, 402)
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
def test_spider_override_settings(
self, mw: HttpErrorMiddleware, res402: Response, res404: Response
) -> None:
assert mw.crawler.spider
mw.crawler.spider.handle_httpstatus_list = [404] # type: ignore[attr-defined]
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
| TestHttpErrorMiddlewareSettings |
python | langchain-ai__langchain | libs/core/langchain_core/messages/tool.py | {
"start": 482,
"end": 792
} | class ____:
"""Mixin for objects that tools can return directly.
If a custom BaseTool is invoked with a `ToolCall` and the output of custom code is
not an instance of `ToolOutputMixin`, the output will automatically be coerced to
a string and wrapped in a `ToolMessage`.
"""
| ToolOutputMixin |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 11364,
"end": 12345
} | class ____(DebugTestCase):
def test_print(self):
self.break_and_run('c = 2')
result = gdb.execute('cy print b', to_string=True)
self.assertEqual('b = (int) 1\n', result)
result = gdb.execute('cy print python_var', to_string=True)
self.assertEqual('python_var = 13\n', result)
result = gdb.execute('cy print c_var', to_string=True)
self.assertEqual('c_var = (int) 12\n', result)
correct_result_test_list_inside_func = '''\
14 int b, c
15
16 b = c = d = 0
17
18 b = 1
> 19 c = 2
20 int(10)
21 puts("spam")
22 os.path.join("foo", "bar")
23 some_c_function()
'''
correct_result_test_list_outside_func = '''\
5 void some_c_function()
6
7 import os
8
9 cdef int c_var = 12
> 10 python_var = 13
11
12 def spam(a=0):
13 cdef:
14 int b, c
'''
| TestPrint |
python | huggingface__transformers | src/transformers/models/wavlm/modular_wavlm.py | {
"start": 17457,
"end": 20579
} | class ____(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible"
f" by `config.num_codevector_groups` {self.num_groups} "
"for concatenation."
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True)
codevector_probs = codevector_probs.type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
| WavLMGumbelVectorQuantizer |
python | modin-project__modin | modin/tests/pandas/test_io.py | {
"start": 90247,
"end": 96708
} | class ____:
@pytest.mark.parametrize("read_sql_engine", ["Pandas", "Connectorx"])
def test_read_sql(self, tmp_path, make_sql_connection, read_sql_engine):
filename = get_unique_filename(".db")
table = "test_read_sql"
conn = make_sql_connection(tmp_path / filename, table)
query = f"select * from {table}"
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=conn,
)
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=conn,
index_col="index",
)
with warns_that_defaulting_to_pandas_if(not current_execution_is_native()):
pd.read_sql_query(query, conn)
with warns_that_defaulting_to_pandas_if(not current_execution_is_native()):
pd.read_sql_table(table, conn)
# Test SQLAlchemy engine
sqlalchemy_engine = sa.create_engine(conn)
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=sqlalchemy_engine,
)
# Test SQLAlchemy Connection
sqlalchemy_connection = sqlalchemy_engine.connect()
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=sqlalchemy_connection,
)
old_sql_engine = ReadSqlEngine.get()
ReadSqlEngine.put(read_sql_engine)
if ReadSqlEngine.get() == "Connectorx":
modin_df = pd.read_sql(sql=query, con=conn)
else:
modin_df = pd.read_sql(
sql=query, con=ModinDatabaseConnection("sqlalchemy", conn)
)
ReadSqlEngine.put(old_sql_engine)
pandas_df = pandas.read_sql(sql=query, con=sqlalchemy_connection)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize(
"dtype_backend", [lib.no_default, "numpy_nullable", "pyarrow"]
)
def test_read_sql_dtype_backend(self, tmp_path, make_sql_connection, dtype_backend):
filename = get_unique_filename(extension="db")
table = "test_read_sql_dtype_backend"
conn = make_sql_connection(tmp_path / filename, table)
query = f"select * from {table}"
def comparator(df1, df2):
df_equals(df1, df2)
df_equals(df1.dtypes, df2.dtypes)
eval_io(
fn_name="read_sql",
# read_sql kwargs
sql=query,
con=conn,
dtype_backend=dtype_backend,
comparator=comparator,
)
@pytest.mark.skipif(
not TestReadFromSqlServer.get(),
reason="Skip the test when the test SQL server is not set up.",
)
def test_read_sql_from_sql_server(self):
table_name = "test_1000x256"
query = f"SELECT * FROM {table_name}"
sqlalchemy_connection_string = (
"mssql+pymssql://sa:Strong.Pwd-123@0.0.0.0:1433/master"
)
pandas_df_to_read = pandas.DataFrame(
np.arange(
1000 * 256,
).reshape(1000, 256)
).add_prefix("col")
pandas_df_to_read.to_sql(
table_name, sqlalchemy_connection_string, if_exists="replace"
)
modin_df = pd.read_sql(
query,
ModinDatabaseConnection("sqlalchemy", sqlalchemy_connection_string),
)
pandas_df = pandas.read_sql(query, sqlalchemy_connection_string)
df_equals(modin_df, pandas_df)
@pytest.mark.skipif(
not TestReadFromPostgres.get(),
reason="Skip the test when the postgres server is not set up.",
)
def test_read_sql_from_postgres(self):
table_name = "test_1000x256"
query = f"SELECT * FROM {table_name}"
connection = "postgresql://sa:Strong.Pwd-123@localhost:2345/postgres"
pandas_df_to_read = pandas.DataFrame(
np.arange(
1000 * 256,
).reshape(1000, 256)
).add_prefix("col")
pandas_df_to_read.to_sql(table_name, connection, if_exists="replace")
modin_df = pd.read_sql(
query,
ModinDatabaseConnection("psycopg2", connection),
)
pandas_df = pandas.read_sql(query, connection)
df_equals(modin_df, pandas_df)
def test_invalid_modin_database_connections(self):
with pytest.raises(UnsupportedDatabaseException):
ModinDatabaseConnection("unsupported_database")
def test_read_sql_with_chunksize(self, make_sql_connection):
filename = get_unique_filename(extension="db")
table = "test_read_sql_with_chunksize"
conn = make_sql_connection(filename, table)
query = f"select * from {table}"
pandas_gen = pandas.read_sql(query, conn, chunksize=10)
modin_gen = pd.read_sql(query, conn, chunksize=10)
for modin_df, pandas_df in zip(modin_gen, pandas_gen):
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("conn_type", ["str", "sqlalchemy", "sqlalchemy+connect"])
def test_to_sql(self, tmp_path, make_sql_connection, index, conn_type):
table_name = f"test_to_sql_{str(index)}"
modin_df, pandas_df = create_test_dfs(TEST_DATA)
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection(tmp_path / f"{table_name}_modin.db")
if conn_type.startswith("sqlalchemy"):
conn = sa.create_engine(conn)
if conn_type == "sqlalchemy+connect":
conn = conn.connect()
modin_df.to_sql(table_name, conn, index=index)
df_modin_sql = pandas.read_sql(
table_name, con=conn, index_col="index" if index else None
)
# We do not pass the table name so the fixture won't generate a table
conn = make_sql_connection(tmp_path / f"{table_name}_pandas.db")
if conn_type.startswith("sqlalchemy"):
conn = sa.create_engine(conn)
if conn_type == "sqlalchemy+connect":
conn = conn.connect()
pandas_df.to_sql(table_name, conn, index=index)
df_pandas_sql = pandas.read_sql(
table_name, con=conn, index_col="index" if index else None
)
assert df_modin_sql.sort_index().equals(df_pandas_sql.sort_index())
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
| TestSql |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 2966,
"end": 3409
} | class ____(BaseModelForm):
class Meta:
"""
before Django1.6, one cannot use __all__ shortcut for fields
without getting the following error:
django.core.exceptions.FieldError: Unknown field(s) (a, l, _) specified for CrispyTestModel
because obviously it casts the string to a set
"""
model = CrispyTestModel
fields = "__all__" # eliminate RemovedInDjango18Warning
| SampleForm4 |
python | realpython__materials | inheritance-and-composition/inheritance/employees.py | {
"start": 138,
"end": 236
} | class ____:
def __init__(self, id, name):
self.id = id
self.name = name
| Employee |
python | run-llama__llama_index | llama-index-integrations/graph_stores/llama-index-graph-stores-falkordb/tests/test_graph_stores_falkordb.py | {
"start": 172,
"end": 2024
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup method called once for the entire test class."""
# Start FalkorDB container
try:
cls.container = docker_client.containers.run(
"falkordb/falkordb:latest",
detach=True,
name="falkordb_test_instance",
ports={"6379/tcp": 6379},
)
time.sleep(2) # Allow time for the container to initialize
except Exception as e:
print(f"Error starting FalkorDB container: {e}")
raise
# Set up the FalkorDB Graph store
cls.graph_store = FalkorDBGraphStore(url="redis://localhost:6379")
@classmethod
def tearDownClass(cls):
"""Teardown method called once after all tests are done."""
try:
cls.container.stop()
cls.container.remove()
except Exception as e:
print(f"Error stopping/removing container: {e}")
def test_base_graph(self):
self.graph_store.upsert_triplet("node1", "related_to", "node2")
# Check if the data has been inserted correctly
result = self.graph_store.get("node1")
expected_result = [
"RELATED_TO",
"node2",
] # Expected data
self.assertIn(expected_result, result)
result = self.graph_store.get_rel_map(["node1"], 1)
self.assertIn(expected_result, result["node1"])
self.graph_store.delete("node1", "related_to", "node2")
result = self.graph_store.get("node1")
expected_result = [] # Expected data
self.assertEqual(expected_result, result)
self.graph_store.switch_graph("new_graph")
self.graph_store.refresh_schema()
if __name__ == "__main__":
unittest.main()
| TestFalkorDBGraphStore |
python | kamyu104__LeetCode-Solutions | Python/kth-smallest-path-xor-sum.py | {
"start": 147,
"end": 1939
} | class ____(object):
def kthSmallest(self, par, vals, queries):
"""
:type par: List[int]
:type vals: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def small_to_large_merge(sl1, sl2): # Total Time: O(n * (logn)^2)
if len(sl1) < len(sl2):
sl1, sl2 = sl2, sl1
for x in sl2: # each node is merged at most O(logn) times
if x not in sl1:
sl1.add(x) # each add costs O(logn)
return sl1
def iter_dfs():
sl = [SortedList() for _ in xrange(len(adj))]
result = [-1]*len(queries)
stk = [(1, (0, 0))]
while stk:
step, (u, curr) = stk.pop()
if step == 1:
curr ^= vals[u]
sl[u].add(curr)
stk.append((2, (u, curr)))
for v in reversed(adj[u]):
stk.append((1, (v, curr)))
elif step == 2:
for v in adj[u]:
sl[u] = small_to_large_merge(sl[u], sl[v])
for i in lookup[u]: # Total Time: O(qlogn)
if queries[i][1]-1 < len(sl[u]):
result[i] = sl[u][queries[i][1]-1]
return result
adj = [[] for _ in xrange(len(par))]
for u, p in enumerate(par):
if p != -1:
adj[p].append(u)
lookup = [[] for _ in xrange(len(adj))]
for i, (u, _) in enumerate(queries):
lookup[u].append(i)
return iter_dfs()
# Time: O(n * (logn)^2 + qlogn)
# Space: O(n + q)
from sortedcontainers import SortedList
# dfs, small-to-large merging, sorted list
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 606,
"end": 2126
} | class ____(enum.IntEnum): # Must subclass int for the macOS backend.
"""Backend-independent cursor types."""
POINTER = enum.auto()
HAND = enum.auto()
SELECT_REGION = enum.auto()
MOVE = enum.auto()
WAIT = enum.auto()
RESIZE_HORIZONTAL = enum.auto()
RESIZE_VERTICAL = enum.auto()
cursors = Cursors # Backcompat.
# _tool_registry, _register_tool_class, and _find_tool_class implement a
# mechanism through which ToolManager.add_tool can determine whether a subclass
# of the requested tool class has been registered (either for the current
# canvas class or for a parent class), in which case that tool subclass will be
# instantiated instead. This is the mechanism used e.g. to allow different
# GUI backends to implement different specializations for ConfigureSubplots.
_tool_registry = set()
def _register_tool_class(canvas_cls, tool_cls=None):
"""Decorator registering *tool_cls* as a tool class for *canvas_cls*."""
if tool_cls is None:
return functools.partial(_register_tool_class, canvas_cls)
_tool_registry.add((canvas_cls, tool_cls))
return tool_cls
def _find_tool_class(canvas_cls, tool_cls):
"""Find a subclass of *tool_cls* registered for *canvas_cls*."""
for canvas_parent in canvas_cls.__mro__:
for tool_child in _api.recursive_subclasses(tool_cls):
if (canvas_parent, tool_child) in _tool_registry:
return tool_child
return tool_cls
# Views positions tool
_views_positions = 'viewpos'
| Cursors |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 45792,
"end": 61149
} | class ____(unittest.TestCase):
def test_sub_factory(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 3
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
two = factory.SubFactory(TestModelFactory, one=1)
test_model = TestModel2Factory(two__one=4)
self.assertEqual(4, test_model.two.one)
self.assertEqual(1, test_model.id)
self.assertEqual(1, test_model.two.id)
def test_sub_factory_with_lazy_fields(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
two = factory.SubFactory(
TestModelFactory,
one=factory.Sequence(lambda n: 'x%dx' % n),
two=factory.LazyAttribute(lambda o: f'{o.one}{o.one}'),
)
test_model = TestModel2Factory(one=42)
self.assertEqual('x0x', test_model.two.one)
self.assertEqual('x0xx0x', test_model.two.two)
def test_sub_factory_with_lazy_fields_access_factory_parent(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 3
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
one = 'parent'
child = factory.SubFactory(
TestModelFactory,
one=factory.LazyAttribute(lambda o: '%s child' % o.factory_parent.one),
)
test_model = TestModel2Factory()
self.assertEqual('parent child', test_model.child.one)
def test_sub_factory_and_sequence(self):
class TestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: int(n))
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrapped = factory.SubFactory(TestObjectFactory)
wrapping = WrappingTestObjectFactory.build()
self.assertEqual(0, wrapping.wrapped.one)
wrapping = WrappingTestObjectFactory.build()
self.assertEqual(1, wrapping.wrapped.one)
def test_sub_factory_overriding(self):
class TestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
class OtherTestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = OtherTestObject
wrapped = factory.SubFactory(TestObjectFactory, two=2, four=4)
wrapped__two = 4
wrapped__three = 3
wrapping = WrappingTestObjectFactory.build()
self.assertEqual(wrapping.wrapped.two, 4)
self.assertEqual(wrapping.wrapped.three, 3)
self.assertEqual(wrapping.wrapped.four, 4)
def test_sub_factory_deep_overrides(self):
Author = collections.namedtuple('Author', ['name', 'country'])
Book = collections.namedtuple('Book', ['title', 'author'])
Chapter = collections.namedtuple('Chapter', ['book', 'number'])
class AuthorFactory(factory.Factory):
class Meta:
model = Author
name = "John"
country = 'XX'
class BookFactory(factory.Factory):
class Meta:
model = Book
title = "The mighty adventures of nobody."
author = factory.SubFactory(AuthorFactory)
class ChapterFactory(factory.Factory):
class Meta:
model = Chapter
book = factory.SubFactory(BookFactory)
number = factory.Sequence(lambda n: n)
book__author__country = factory.LazyAttribute(lambda o: 'FR')
chapter = ChapterFactory()
self.assertEqual('FR', chapter.book.author.country)
def test_nested_sub_factory(self):
"""Test nested sub-factories."""
class TestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrapped = factory.SubFactory(TestObjectFactory)
wrapped_bis = factory.SubFactory(TestObjectFactory, one=1)
class OuterWrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrap = factory.SubFactory(WrappingTestObjectFactory, wrapped__two=2)
outer = OuterWrappingTestObjectFactory.build()
self.assertEqual(outer.wrap.wrapped.two, 2)
self.assertEqual(outer.wrap.wrapped_bis.one, 1)
def test_nested_sub_factory_with_overridden_sub_factories(self):
"""Test nested sub-factories, with attributes overridden with subfactories."""
class TestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
two = 'two'
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrapped = factory.SubFactory(TestObjectFactory)
friend = factory.LazyAttribute(lambda o: o.wrapped.two.four + 1)
class OuterWrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrap = factory.SubFactory(
WrappingTestObjectFactory,
wrapped__two=factory.SubFactory(TestObjectFactory, four=4),
)
outer = OuterWrappingTestObjectFactory.build()
self.assertEqual(outer.wrap.wrapped.two.four, 4)
self.assertEqual(outer.wrap.friend, 5)
def test_nested_subfactory_with_override(self):
"""Tests replacing a SubFactory field with an actual value."""
# The test class
class TestObject:
def __init__(self, two='one', wrapped=None):
self.two = two
self.wrapped = wrapped
# Innermost factory
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
two = 'two'
# Intermediary factory
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrapped = factory.SubFactory(TestObjectFactory)
wrapped__two = 'three'
obj = TestObject(two='four')
outer = WrappingTestObjectFactory(wrapped=obj)
self.assertEqual(obj, outer.wrapped)
self.assertEqual('four', outer.wrapped.two)
def test_deep_nested_subfactory(self):
counter = iter(range(100))
class Node:
def __init__(self, label, child=None):
self.id = next(counter)
self.label = label
self.child = child
class LeafFactory(factory.Factory):
class Meta:
model = Node
label = 'leaf'
class BranchFactory(factory.Factory):
class Meta:
model = Node
label = 'branch'
child = factory.SubFactory(LeafFactory)
class TreeFactory(factory.Factory):
class Meta:
model = Node
label = 'tree'
child = factory.SubFactory(BranchFactory)
child__child__label = 'magic-leaf'
leaf = LeafFactory()
# Magic corruption did happen here once:
# forcing child__child=X while another part already set another value
# on child__child__label meant that the value passed for child__child
# was merged into the factory's inner declaration dict.
mtree_1 = TreeFactory(child__child=leaf)
mtree_2 = TreeFactory()
self.assertEqual(0, mtree_1.child.child.id)
self.assertEqual('leaf', mtree_1.child.child.label)
self.assertEqual(1, mtree_1.child.id)
self.assertEqual('branch', mtree_1.child.label)
self.assertEqual(2, mtree_1.id)
self.assertEqual('tree', mtree_1.label)
self.assertEqual(3, mtree_2.child.child.id)
self.assertEqual('magic-leaf', mtree_2.child.child.label)
self.assertEqual(4, mtree_2.child.id)
self.assertEqual('branch', mtree_2.child.label)
self.assertEqual(5, mtree_2.id)
self.assertEqual('tree', mtree_2.label)
def test_sub_factory_and_inheritance(self):
"""Test inheriting from a factory with subfactories, overriding."""
class TestObject:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
two = 'two'
class WrappingTestObjectFactory(factory.Factory):
class Meta:
model = TestObject
wrapped = factory.SubFactory(TestObjectFactory)
friend = factory.LazyAttribute(lambda o: o.wrapped.two + 1)
class ExtendedWrappingTestObjectFactory(WrappingTestObjectFactory):
wrapped__two = 4
wrapping = ExtendedWrappingTestObjectFactory.build()
self.assertEqual(wrapping.wrapped.two, 4)
self.assertEqual(wrapping.friend, 5)
def test_diamond_sub_factory(self):
"""Tests the case where an object has two fields with a common field."""
class InnerMost:
def __init__(self, a, b):
self.a = a
self.b = b
class SideA:
def __init__(self, inner_from_a):
self.inner_from_a = inner_from_a
class SideB:
def __init__(self, inner_from_b):
self.inner_from_b = inner_from_b
class OuterMost:
def __init__(self, foo, side_a, side_b):
self.foo = foo
self.side_a = side_a
self.side_b = side_b
class InnerMostFactory(factory.Factory):
class Meta:
model = InnerMost
a = 15
b = 20
class SideAFactory(factory.Factory):
class Meta:
model = SideA
inner_from_a = factory.SubFactory(InnerMostFactory, a=20)
class SideBFactory(factory.Factory):
class Meta:
model = SideB
inner_from_b = factory.SubFactory(InnerMostFactory, b=15)
class OuterMostFactory(factory.Factory):
class Meta:
model = OuterMost
foo = 30
side_a = factory.SubFactory(
SideAFactory,
inner_from_a__a=factory.ContainerAttribute(
lambda obj, containers: containers[1].foo * 2,
)
)
side_b = factory.SubFactory(
SideBFactory,
inner_from_b=factory.ContainerAttribute(
lambda obj, containers: containers[0].side_a.inner_from_a,
)
)
outer = OuterMostFactory.build()
self.assertEqual(outer.foo, 30)
self.assertEqual(outer.side_a.inner_from_a, outer.side_b.inner_from_b)
self.assertEqual(outer.side_a.inner_from_a.a, outer.foo * 2)
self.assertEqual(outer.side_a.inner_from_a.b, 20)
outer = OuterMostFactory.build(side_a__inner_from_a__b=4)
self.assertEqual(outer.foo, 30)
self.assertEqual(outer.side_a.inner_from_a, outer.side_b.inner_from_b)
self.assertEqual(outer.side_a.inner_from_a.a, outer.foo * 2)
self.assertEqual(outer.side_a.inner_from_a.b, 4)
def test_nonstrict_container_attribute(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 3
two = factory.ContainerAttribute(lambda obj, containers: len(containers or []), strict=False)
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
one = 1
two = factory.SubFactory(TestModelFactory, one=1)
obj = TestModel2Factory.build()
self.assertEqual(1, obj.one)
self.assertEqual(1, obj.two.one)
self.assertEqual(1, obj.two.two)
obj = TestModelFactory()
self.assertEqual(3, obj.one)
self.assertEqual(0, obj.two)
def test_strict_container_attribute(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
sample_int = 3
container_len = factory.ContainerAttribute(lambda obj, containers: len(containers or []), strict=True)
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
sample_int = 1
descendant = factory.SubFactory(TestModelFactory, sample_int=1)
obj = TestModel2Factory.build()
self.assertEqual(1, obj.sample_int)
self.assertEqual(1, obj.descendant.sample_int)
self.assertEqual(1, obj.descendant.container_len)
with self.assertRaises(TypeError):
TestModelFactory.build()
def test_function_container_attribute(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
sample_int = 3
@factory.container_attribute
def container_len(self, containers):
if containers:
return len(containers)
return 42
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
sample_int = 1
descendant = factory.SubFactory(TestModelFactory, sample_int=1)
obj = TestModel2Factory.build()
self.assertEqual(1, obj.sample_int)
self.assertEqual(1, obj.descendant.sample_int)
self.assertEqual(1, obj.descendant.container_len)
obj = TestModelFactory()
self.assertEqual(3, obj.sample_int)
self.assertEqual(42, obj.container_len)
| SubFactoryTestCase |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 6543,
"end": 6745
} | class ____(Message):
"""
Conditional test is a non-empty tuple literal, which are always True.
"""
message = '\'if tuple literal\' is always true, perhaps remove accidental comma?'
| IfTuple |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 20667,
"end": 24268
} | class ____(PreTrainedModel):
config: RTDetrV2Config
base_model_prefix = "rt_detr_v2"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = [r"RTDetrV2HybridEncoder", r"RTDetrV2DecoderLayer"]
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, RTDetrV2ForObjectDetection):
if module.model.decoder.class_embed is not None:
for layer in module.model.decoder.class_embed:
prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1)
bias = float(-math.log((1 - prior_prob) / prior_prob))
init.xavier_uniform_(layer.weight)
init.constant_(layer.bias, bias)
if module.model.decoder.bbox_embed is not None:
for layer in module.model.decoder.bbox_embed:
init.constant_(layer.layers[-1].weight, 0)
init.constant_(layer.layers[-1].bias, 0)
elif isinstance(module, RTDetrV2MultiscaleDeformableAttention):
init.constant_(module.sampling_offsets.weight, 0.0)
default_dtype = torch.get_default_dtype()
thetas = torch.arange(module.n_heads, dtype=torch.int64).to(default_dtype) * (
2.0 * math.pi / module.n_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(module.n_heads, 1, 1, 2)
.repeat(1, module.n_levels, module.n_points, 1)
)
for i in range(module.n_points):
grid_init[:, :, i, :] *= i + 1
init.copy_(module.sampling_offsets.bias, grid_init.view(-1))
init.constant_(module.attention_weights.weight, 0.0)
init.constant_(module.attention_weights.bias, 0.0)
init.xavier_uniform_(module.value_proj.weight)
init.constant_(module.value_proj.bias, 0.0)
init.xavier_uniform_(module.output_proj.weight)
init.constant_(module.output_proj.bias, 0.0)
elif isinstance(module, RTDetrV2Model):
prior_prob = self.config.initializer_bias_prior_prob or 1 / (self.config.num_labels + 1)
bias = float(-math.log((1 - prior_prob) / prior_prob))
init.xavier_uniform_(module.enc_score_head.weight)
init.constant_(module.enc_score_head.bias, bias)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
if hasattr(module, "weight_embedding") and self.config.learn_initial_query:
init.xavier_uniform_(module.weight_embedding.weight)
if hasattr(module, "denoising_class_embed") and self.config.num_denoising > 0:
init.xavier_uniform_(module.denoising_class_embed.weight)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the RTDetrV2Decoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
"""
)
| RTDetrV2PreTrainedModel |
python | getsentry__sentry | src/sentry/workflow_engine/models/data_source.py | {
"start": 825,
"end": 909
} | class ____(Generic[T]):
source_id: str
packet: T
@region_silo_model
| DataPacket |
python | pallets__werkzeug | tests/conftest.py | {
"start": 751,
"end": 835
} | class ____(http.client.HTTPResponse):
data: bytes
json: t.Any
| DataHTTPResponse |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/alloy_db.py | {
"start": 1514,
"end": 1708
} | class ____(BaseGoogleLink):
"""Helper class for constructing AlloyDB users Link."""
name = "AlloyDB Users"
key = "alloy_db_users"
format_str = ALLOY_DB_USERS_LINK
| AlloyDBUsersLink |
python | simplejson__simplejson | simplejson/tests/test_recursion.py | {
"start": 91,
"end": 387
} | class ____(json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return json.JSONEncoder.default(o)
| RecursiveJSONEncoder |
python | davidhalter__jedi | jedi/plugins/stdlib.py | {
"start": 13508,
"end": 16157
} | class ____(AttributeOverwrite, ValueWrapper):
api_type = 'property'
def __init__(self, property_obj, function):
super().__init__(property_obj)
self._function = function
def py__get__(self, instance, class_value):
if instance is None:
return ValueSet([self])
return self._function.execute_with_values(instance)
@publish_method('deleter')
@publish_method('getter')
@publish_method('setter')
def _return_self(self, arguments):
return ValueSet({self})
@argument_clinic('func, /', want_callback=True)
def builtins_property(functions, callback):
return ValueSet(
PropertyObject(property_value, function)
for property_value in callback()
for function in functions
)
def collections_namedtuple(value, arguments, callback):
"""
Implementation of the namedtuple function.
This has to be done by processing the namedtuple class template and
inferring the result.
"""
inference_state = value.inference_state
# Process arguments
name = 'jedi_unknown_namedtuple'
for c in _follow_param(inference_state, arguments, 0):
x = get_str_or_none(c)
if x is not None:
name = x
break
# TODO here we only use one of the types, we should use all.
param_values = _follow_param(inference_state, arguments, 1)
if not param_values:
return NO_VALUES
_fields = list(param_values)[0]
string = get_str_or_none(_fields)
if string is not None:
fields = string.replace(',', ' ').split()
elif isinstance(_fields, iterable.Sequence):
fields = [
get_str_or_none(v)
for lazy_value in _fields.py__iter__()
for v in lazy_value.infer()
]
fields = [f for f in fields if f is not None]
else:
return NO_VALUES
# Build source code
code = _NAMEDTUPLE_CLASS_TEMPLATE.format(
typename=name,
field_names=tuple(fields),
num_fields=len(fields),
arg_list=repr(tuple(fields)).replace("'", "")[1:-1],
repr_fmt='',
field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name)
for index, name in enumerate(fields))
)
# Parse source code
module = inference_state.grammar.parse(code)
generated_class = next(module.iter_classdefs())
parent_context = ModuleValue(
inference_state, module,
code_lines=parso.split_lines(code, keepends=True),
).as_context()
return ValueSet([ClassValue(inference_state, parent_context, generated_class)])
| PropertyObject |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 21052,
"end": 22390
} | class ____(RayError):
"""Indicates that the object is lost from distributed memory, due to
node failure or system error.
Args:
object_ref_hex: Hex ID of the object.
"""
def __init__(self, object_ref_hex, owner_address, call_site):
self.object_ref_hex = object_ref_hex
self.owner_address = owner_address
self.call_site = call_site.replace(
ray_constants.CALL_STACK_LINE_DELIMITER, "\n "
)
def _base_str(self):
msg = f"Failed to retrieve object {self.object_ref_hex}. "
if self.call_site:
msg += f"The ObjectRef was created at: {self.call_site}"
else:
msg += (
"To see information about where this ObjectRef was created "
"in Python, set the environment variable "
"RAY_record_ref_creation_sites=1 during `ray start` and "
"`ray.init()`."
)
return msg
def __str__(self):
return (
self._base_str()
+ "\n\n"
+ (
f"All copies of {self.object_ref_hex} have been lost due to node "
"failure. Check cluster logs (`/tmp/ray/session_latest/logs`) for "
"more information about the failure."
)
)
@PublicAPI
| ObjectLostError |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 16458,
"end": 17144
} | class ____(TypedDict, total=False):
type: Required[Literal['to-string']]
when_used: WhenUsed # default: 'json-unless-none'
def to_string_ser_schema(*, when_used: WhenUsed = 'json-unless-none') -> ToStringSerSchema:
"""
Returns a schema for serialization using python's `str()` / `__str__` method.
Args:
when_used: Same meaning as for [general_function_plain_ser_schema], but with a different default
"""
s = dict(type='to-string')
if when_used != 'json-unless-none':
# just to avoid extra elements in schema, and to use the actual default defined in rust
s['when_used'] = when_used
return s # type: ignore
| ToStringSerSchema |
python | ansible__ansible | test/units/_internal/templating/test_access.py | {
"start": 882,
"end": 1124
} | class ____(LoggingTagAccessNotifier):
_type_interest = frozenset([ExampleTagWithContent])
def _notify(self, o: t.Any) -> t.Any:
super()._log(o) # get parent logging behavior
return o
| ExampleTagWithContentAccessNotifier |
python | kamyu104__LeetCode-Solutions | Python/query-kth-smallest-trimmed-number.py | {
"start": 1108,
"end": 2926
} | class ____(object):
def smallestTrimmedNumbers(self, nums, queries):
"""
:type nums: List[str]
:type queries: List[List[int]]
:rtype: List[int]
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
def compare(a, b):
for i in xrange(len(nums[a])-t, len(nums[a])):
if nums[a][i] < nums[b][i]:
return True
if nums[a][i] > nums[b][i]:
return False
return cmp(a, b) < 0
result = []
idxs = range(len(nums))
for k, t in queries:
nth_element(idxs, k-1, compare=compare)
result.append(idxs[k-1])
return result
# Time: O(q + nlogn * t)
# Space: O(t + n + q)
# sort
| Solution2 |
python | wandb__wandb | wandb/sdk/launch/builder/docker_builder.py | {
"start": 923,
"end": 6316
} | class ____(AbstractBuilder):
"""Builds a docker image for a project.
Attributes:
builder_config (Dict[str, Any]): The builder config.
"""
builder_type = "docker"
target_platform = "linux/amd64"
def __init__(
self,
environment: AbstractEnvironment,
registry: AbstractRegistry,
config: Dict[str, Any],
):
"""Initialize a DockerBuilder.
Arguments:
environment (AbstractEnvironment): The environment to use.
registry (AbstractRegistry): The registry to use.
Raises:
LaunchError: If docker is not installed
"""
self.environment = environment # Docker builder doesn't actually use this.
self.registry = registry
self.config = config
@classmethod
def from_config(
cls,
config: Dict[str, Any],
environment: AbstractEnvironment,
registry: AbstractRegistry,
) -> "DockerBuilder":
"""Create a DockerBuilder from a config.
Arguments:
config (Dict[str, Any]): The config.
registry (AbstractRegistry): The registry to use.
verify (bool, optional): Whether to verify the functionality of the builder.
login (bool, optional): Whether to login to the registry.
Returns:
DockerBuilder: The DockerBuilder.
"""
# If the user provided a destination URI in the builder config
# we use that as the registry.
image_uri = config.get("destination")
if image_uri:
if registry is not None:
wandb.termwarn(
f"{LOG_PREFIX}Overriding registry from registry config"
f" with {image_uri} from builder config."
)
registry = registry_from_uri(image_uri)
return cls(environment, registry, config)
async def verify(self) -> None:
"""Verify the builder."""
await validate_docker_installation()
async def login(self) -> None:
"""Login to the registry."""
if isinstance(self.registry, LocalRegistry):
_logger.info(f"{LOG_PREFIX}No registry configured, skipping login.")
elif isinstance(self.registry, AnonynmousRegistry):
_logger.info(f"{LOG_PREFIX}Anonymous registry, skipping login.")
else:
username, password = await self.registry.get_username_password()
login = event_loop_thread_exec(docker.login)
await login(username, password, self.registry.uri)
async def build_image(
self,
launch_project: LaunchProject,
entrypoint: EntryPoint,
job_tracker: Optional[JobAndRunStatusTracker] = None,
) -> str:
"""Build the image for the given project.
Arguments:
launch_project (LaunchProject): The project to build.
entrypoint (EntryPoint): The entrypoint to use.
"""
await self.verify()
await self.login()
build_context_manager = BuildContextManager(launch_project=launch_project)
build_ctx_path, image_tag = build_context_manager.create_build_context("docker")
dockerfile = os.path.join(build_ctx_path, _WANDB_DOCKERFILE_NAME)
repository = None if not self.registry else await self.registry.get_repo_uri()
# if repo is set, use the repo name as the image name
if repository:
image_uri = f"{repository}:{image_tag}"
# otherwise, base the image name off of the source
# which the launch_project checks in image_name
else:
image_uri = f"{launch_project.image_name}:{image_tag}"
if (
not launch_project.build_required()
and await self.registry.check_image_exists(image_uri)
):
return image_uri
_logger.info(
f"image {image_uri} does not already exist in repository, building."
)
try:
output = await event_loop_thread_exec(docker.build)(
tags=[image_uri],
file=dockerfile,
context_path=build_ctx_path,
platform=self.config.get("platform"),
)
warn_failed_packages_from_build_logs(
output, image_uri, launch_project.api, job_tracker
)
except docker.DockerError as e:
if job_tracker:
job_tracker.set_err_stage("build")
raise LaunchDockerError(f"Error communicating with docker client: {e}")
try:
os.remove(build_ctx_path)
except Exception:
_msg = f"{LOG_PREFIX}Temporary docker context file {build_ctx_path} was not deleted."
_logger.info(_msg)
if repository:
reg, tag = image_uri.split(":")
wandb.termlog(f"{LOG_PREFIX}Pushing image {image_uri}")
push_resp = await event_loop_thread_exec(docker.push)(reg, tag)
if push_resp is None:
raise LaunchError("Failed to push image to repository")
elif (
launch_project.resource == "sagemaker"
and f"The push refers to repository [{repository}]" not in push_resp
):
raise LaunchError(f"Unable to push image to ECR, response: {push_resp}")
return image_uri
| DockerBuilder |
python | python-jsonschema__jsonschema | jsonschema/tests/test_exceptions.py | {
"start": 153,
"end": 11006
} | class ____(TestCase):
def best_match_of(self, instance, schema):
errors = list(_LATEST_VERSION(schema).iter_errors(instance))
msg = f"No errors found for {instance} under {schema!r}!"
self.assertTrue(errors, msg=msg)
best = exceptions.best_match(iter(errors))
reversed_best = exceptions.best_match(reversed(errors))
self.assertEqual(
best._contents(),
reversed_best._contents(),
f"No consistent best match!\nGot: {best}\n\nThen: {reversed_best}",
)
return best
def test_shallower_errors_are_better_matches(self):
schema = {
"properties": {
"foo": {
"minProperties": 2,
"properties": {"bar": {"type": "object"}},
},
},
}
best = self.best_match_of(instance={"foo": {"bar": []}}, schema=schema)
self.assertEqual(best.validator, "minProperties")
def test_oneOf_and_anyOf_are_weak_matches(self):
"""
A property you *must* match is probably better than one you have to
match a part of.
"""
schema = {
"minProperties": 2,
"anyOf": [{"type": "string"}, {"type": "number"}],
"oneOf": [{"type": "string"}, {"type": "number"}],
}
best = self.best_match_of(instance={}, schema=schema)
self.assertEqual(best.validator, "minProperties")
def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
"""
If the most relevant error is an anyOf, then we traverse its context
and select the otherwise *least* relevant error, since in this case
that means the most specific, deep, error inside the instance.
I.e. since only one of the schemas must match, we look for the most
relevant one.
"""
schema = {
"properties": {
"foo": {
"anyOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
}
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
self.assertEqual(best.validator_value, "array")
def test_no_anyOf_traversal_for_equally_relevant_errors(self):
"""
We don't traverse into an anyOf (as above) if all of its context errors
seem to be equally "wrong" against the instance.
"""
schema = {
"anyOf": [
{"type": "string"},
{"type": "integer"},
{"type": "object"},
],
}
best = self.best_match_of(instance=[], schema=schema)
self.assertEqual(best.validator, "anyOf")
def test_anyOf_traversal_for_single_equally_relevant_error(self):
"""
We *do* traverse anyOf with a single nested error, even though it is
vacuously equally relevant to itself.
"""
schema = {
"anyOf": [
{"type": "string"},
],
}
best = self.best_match_of(instance=[], schema=schema)
self.assertEqual(best.validator, "type")
def test_anyOf_traversal_for_single_sibling_errors(self):
"""
We *do* traverse anyOf with a single subschema that fails multiple
times (e.g. on multiple items).
"""
schema = {
"anyOf": [
{"items": {"const": 37}},
],
}
best = self.best_match_of(instance=[12, 12], schema=schema)
self.assertEqual(best.validator, "const")
def test_anyOf_traversal_for_non_type_matching_sibling_errors(self):
"""
We *do* traverse anyOf with multiple subschemas when one does not type
match.
"""
schema = {
"anyOf": [
{"type": "object"},
{"items": {"const": 37}},
],
}
best = self.best_match_of(instance=[12, 12], schema=schema)
self.assertEqual(best.validator, "const")
def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
"""
If the most relevant error is an oneOf, then we traverse its context
and select the otherwise *least* relevant error, since in this case
that means the most specific, deep, error inside the instance.
I.e. since only one of the schemas must match, we look for the most
relevant one.
"""
schema = {
"properties": {
"foo": {
"oneOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
}
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
self.assertEqual(best.validator_value, "array")
def test_no_oneOf_traversal_for_equally_relevant_errors(self):
"""
We don't traverse into an oneOf (as above) if all of its context errors
seem to be equally "wrong" against the instance.
"""
schema = {
"oneOf": [
{"type": "string"},
{"type": "integer"},
{"type": "object"},
],
}
best = self.best_match_of(instance=[], schema=schema)
self.assertEqual(best.validator, "oneOf")
def test_oneOf_traversal_for_single_equally_relevant_error(self):
"""
We *do* traverse oneOf with a single nested error, even though it is
vacuously equally relevant to itself.
"""
schema = {
"oneOf": [
{"type": "string"},
],
}
best = self.best_match_of(instance=[], schema=schema)
self.assertEqual(best.validator, "type")
def test_oneOf_traversal_for_single_sibling_errors(self):
"""
We *do* traverse oneOf with a single subschema that fails multiple
times (e.g. on multiple items).
"""
schema = {
"oneOf": [
{"items": {"const": 37}},
],
}
best = self.best_match_of(instance=[12, 12], schema=schema)
self.assertEqual(best.validator, "const")
def test_oneOf_traversal_for_non_type_matching_sibling_errors(self):
"""
We *do* traverse oneOf with multiple subschemas when one does not type
match.
"""
schema = {
"oneOf": [
{"type": "object"},
{"items": {"const": 37}},
],
}
best = self.best_match_of(instance=[12, 12], schema=schema)
self.assertEqual(best.validator, "const")
def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
"""
Now, if the error is allOf, we traverse but select the *most* relevant
error from the context, because all schemas here must match anyways.
"""
schema = {
"properties": {
"foo": {
"allOf": [
{"type": "string"},
{"properties": {"bar": {"type": "array"}}},
],
},
},
}
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
self.assertEqual(best.validator_value, "string")
def test_nested_context_for_oneOf(self):
"""
We traverse into nested contexts (a oneOf containing an error in a
nested oneOf here).
"""
schema = {
"properties": {
"foo": {
"oneOf": [
{"type": "string"},
{
"oneOf": [
{"type": "string"},
{
"properties": {
"bar": {"type": "array"},
},
},
],
},
],
},
},
}
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
self.assertEqual(best.validator_value, "array")
def test_it_prioritizes_matching_types(self):
schema = {
"properties": {
"foo": {
"anyOf": [
{"type": "array", "minItems": 2},
{"type": "string", "minLength": 10},
],
},
},
}
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
self.assertEqual(best.validator, "minLength")
reordered = {
"properties": {
"foo": {
"anyOf": [
{"type": "string", "minLength": 10},
{"type": "array", "minItems": 2},
],
},
},
}
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
self.assertEqual(best.validator, "minLength")
def test_it_prioritizes_matching_union_types(self):
schema = {
"properties": {
"foo": {
"anyOf": [
{"type": ["array", "object"], "minItems": 2},
{"type": ["integer", "string"], "minLength": 10},
],
},
},
}
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
self.assertEqual(best.validator, "minLength")
reordered = {
"properties": {
"foo": {
"anyOf": [
{"type": "string", "minLength": 10},
{"type": "array", "minItems": 2},
],
},
},
}
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
self.assertEqual(best.validator, "minLength")
def test_boolean_schemas(self):
schema = {"properties": {"foo": False}}
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
self.assertIsNone(best.validator)
def test_one_error(self):
validator = _LATEST_VERSION({"minProperties": 2})
validator.iter_errors({})
self.assertEqual(
exceptions.best_match(validator.iter_errors({})).validator,
"minProperties",
)
def test_no_errors(self):
validator = _LATEST_VERSION({})
self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
| TestBestMatch |
python | openai__gym | tests/test_core.py | {
"start": 875,
"end": 1692
} | class ____(core.Env):
"""This environment defines its observation & action spaces only
after the first call to reset. Although this pattern is sometimes
necessary when implementing a new environment (e.g. if it depends
on external resources), it is not encouraged.
"""
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
self.observation_space = spaces.Box(
low=0, high=255, shape=(64, 64, 3), dtype=np.uint8
)
self.action_space = spaces.Discrete(3)
return self.observation_space.sample(), {} # Dummy observation with info
def step(self, action):
observation = self.observation_space.sample() # Dummy observation
return (observation, 0.0, False, {})
| UnknownSpacesEnv |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 35677,
"end": 39753
} | class ____(Module):
r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
step size determined by the target output size.
The number of output features is equal to the number of input planes.
.. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
Args:
kernel_size: the size of the window to take a max over.
Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
output_size: the target output size of the image of the form `oH x oW`.
Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`.
Note that we must have :math:`kH + oH - 1 <= H_{in}` and :math:`kW + oW - 1 <= W_{in}`
output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
This has to be a number or tuple in the range (0, 1).
Note that we must have :math:`kH + (output\_ratio\_H * H_{in}) - 1 <= H_{in}`
and :math:`kW + (output\_ratio\_W * W_{in}) - 1 <= W_{in}`
return_indices: if ``True``, will return the indices along with the outputs.
Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`(H_{out}, W_{out})=\text{output\_size}` or
:math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
Examples:
>>> # pool of square window of size=3, and target output size 13x12
>>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
>>> # pool of square window and target output size being half of input image size
>>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
.. _Fractional MaxPooling:
https://arxiv.org/abs/1412.6071
"""
__constants__ = ["kernel_size", "return_indices", "output_size", "output_ratio"]
kernel_size: _size_2_t
return_indices: bool
output_size: _size_2_t
output_ratio: _ratio_2_t
def __init__(
self,
kernel_size: _size_2_t,
output_size: Optional[_size_2_t] = None,
output_ratio: Optional[_ratio_2_t] = None,
return_indices: bool = False,
_random_samples=None,
) -> None:
super().__init__()
self.kernel_size = _pair(kernel_size)
self.return_indices = return_indices
self.register_buffer("_random_samples", _random_samples)
self.output_size = _pair(output_size) if output_size is not None else None
self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
if output_size is None and output_ratio is None:
raise ValueError(
"FractionalMaxPool2d requires specifying either "
"an output size, or a pooling ratio"
)
if output_size is not None and output_ratio is not None:
raise ValueError(
"only one of output_size and output_ratio may be specified"
)
if self.output_ratio is not None:
if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
raise ValueError(
f"output_ratio must be between 0 and 1 (got {output_ratio})"
)
def forward(self, input: Tensor):
return F.fractional_max_pool2d(
input,
self.kernel_size,
self.output_size,
self.output_ratio,
self.return_indices,
_random_samples=self._random_samples,
)
| FractionalMaxPool2d |
python | cherrypy__cherrypy | cherrypy/_cpmodpy.py | {
"start": 3917,
"end": 10279
} | class ____:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
"""Invoke the HTTP handler."""
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(
local[0],
local[1],
req.connection.local_host or '',
)
remote = req.connection.remote_addr
remote = httputil.Host(
remote[0],
remote[1],
req.connection.remote_host or '',
)
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = (
"You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
'of mod_python < 3.1'
)
options = req.get_options()
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % 'multithread')
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % 'multiprocess')
sn = cherrypy.tree.script_name(req.uri or '/')
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ''
reqproto = req.protocol
headers = list(req.headers_in.copy().items())
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local,
remote,
scheme,
'HTTP/1.1',
)
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same URL '
'twice: %r' % ir.path,
)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
send_response(
req,
response.output_status,
response.header_list,
response.body,
response.stream,
)
finally:
app.release_serving()
except Exception:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
"""Send the HTTP response to the client."""
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = 'text/plain'
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
for seg in always_iterable(body):
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
try:
import subprocess
def popen(fullcmd):
"""Invoke a subprocess via :mod:`subprocess`."""
p = subprocess.Popen(
fullcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
)
return p.stdout
except ImportError:
def popen(fullcmd):
"""Invoke a subprocess via :mod:`os`."""
pipein, pipeout = os.popen4(fullcmd)
return pipeout
def read_process(cmd, args=''):
"""Return a subprocess standard output."""
fullcmd = '%s %s' % (cmd, args)
pipeout = popen(fullcmd)
try:
firstline = pipeout.readline()
cmd_not_found = re.search(
b'(not recognized|No such file|not found)',
firstline,
re.IGNORECASE,
)
if cmd_not_found:
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
| _ReadOnlyRequest |
python | getsentry__sentry | src/sentry/discover/endpoints/discover_key_transactions.py | {
"start": 6715,
"end": 8089
} | class ____(Serializer):
def __init__(self, projects):
self.project_ids = {project.id for project in projects}
def get_attrs(self, item_list, user, **kwargs):
team_key_transactions = (
TeamKeyTransaction.objects.filter(
project_team__in=ProjectTeam.objects.filter(team__in=item_list),
)
.select_related("project_team__project", "project_team__team")
.order_by("transaction", "project_team__project_id")
)
attrs: dict[Team, dict[str, Any]] = defaultdict(
lambda: {
"count": 0,
"key_transactions": [],
}
)
for kt in team_key_transactions:
team = kt.project_team.team
project = kt.project_team.project
attrs[team]["count"] += 1
if project.id in self.project_ids:
attrs[team]["key_transactions"].append(
{
"project_id": str(project.id),
"transaction": kt.transaction,
}
)
return attrs
def serialize(self, obj, attrs, user, **kwargs):
return {
"team": str(obj.id),
"count": attrs.get("count", 0),
"keyed": attrs.get("key_transactions", []),
}
| KeyTransactionTeamSerializer |
python | google__jax | tests/random_test.py | {
"start": 1427,
"end": 1515
} | class ____(enum.Enum):
ALSO = enum.auto()
SKIP = enum.auto()
ONLY = enum.auto()
| OnX64 |
python | simonw__datasette | datasette/utils/asgi.py | {
"start": 477,
"end": 522
} | class ____(Exception):
status = 400
| Base400 |
python | numba__numba | numba/tests/test_withlifting.py | {
"start": 8021,
"end": 31783
} | class ____(MemoryLeak, TestCase):
def setUp(self):
warnings.simplefilter("error", errors.NumbaWarning)
def tearDown(self):
warnings.resetwarnings()
def assert_equal_return_and_stdout(self, pyfunc, *args):
py_args = copy.deepcopy(args)
c_args = copy.deepcopy(args)
cfunc = njit(pyfunc)
with captured_stdout() as stream:
expect_res = pyfunc(*py_args)
expect_out = stream.getvalue()
# avoid compiling during stdout-capturing for easier print-debugging
cfunc.compile(tuple(map(typeof, c_args)))
with captured_stdout() as stream:
got_res = cfunc(*c_args)
got_out = stream.getvalue()
self.assertEqual(expect_out, got_out)
self.assertPreciseEqual(expect_res, got_res)
def test_lift_objmode_basic(self):
def bar(ival):
print("ival =", {'ival': ival // 2})
def foo(ival):
ival += 1
with objmode_context:
bar(ival)
return ival + 1
def foo_nonglobal(ival):
ival += 1
with numba.objmode:
bar(ival)
return ival + 1
self.assert_equal_return_and_stdout(foo, 123)
self.assert_equal_return_and_stdout(foo_nonglobal, 123)
def test_lift_objmode_array_in(self):
def bar(arr):
print({'arr': arr // 2})
# arr is modified. the effect is visible outside.
arr *= 2
def foo(nelem):
arr = np.arange(nelem).astype(np.int64)
with objmode_context:
# arr is modified inplace inside bar()
bar(arr)
return arr + 1
nelem = 10
self.assert_equal_return_and_stdout(foo, nelem)
def test_lift_objmode_define_new_unused(self):
def bar(y):
print(y)
def foo(x):
with objmode_context():
y = 2 + x # defined but unused outside
a = np.arange(y) # defined but unused outside
bar(a)
return x
arg = 123
self.assert_equal_return_and_stdout(foo, arg)
def test_lift_objmode_return_simple(self):
def inverse(x):
print(x)
return 1 / x
def foo(x):
with objmode_context(y="float64"):
y = inverse(x)
return x, y
def foo_nonglobal(x):
with numba.objmode(y="float64"):
y = inverse(x)
return x, y
arg = 123
self.assert_equal_return_and_stdout(foo, arg)
self.assert_equal_return_and_stdout(foo_nonglobal, arg)
def test_lift_objmode_return_array(self):
def inverse(x):
print(x)
return 1 / x
def foo(x):
with objmode_context(y="float64[:]", z="int64"):
y = inverse(x)
z = int(y[0])
return x, y, z
arg = np.arange(1, 10, dtype=np.float64)
self.assert_equal_return_and_stdout(foo, arg)
@expected_failure_for_list_arg
def test_lift_objmode_using_list(self):
def foo(x):
with objmode_context(y="float64[:]"):
print(x)
x[0] = 4
print(x)
y = [1, 2, 3] + x
y = np.asarray([1 / i for i in y])
return x, y
arg = [1, 2, 3]
self.assert_equal_return_and_stdout(foo, arg)
def test_lift_objmode_var_redef(self):
def foo(x):
for x in range(x):
pass
if x:
x += 1
with objmode_context(x="intp"):
print(x)
x -= 1
print(x)
for i in range(x):
x += i
print(x)
return x
arg = 123
self.assert_equal_return_and_stdout(foo, arg)
@expected_failure_for_list_arg
def test_case01_mutate_list_ahead_of_ctx(self):
def foo(x, z):
x[2] = z
with objmode_context():
# should print [1, 2, 15] but prints [1, 2, 3]
print(x)
with objmode_context():
x[2] = 2 * z
# should print [1, 2, 30] but prints [1, 2, 15]
print(x)
return x
self.assert_equal_return_and_stdout(foo, [1, 2, 3], 15)
def test_case02_mutate_array_ahead_of_ctx(self):
def foo(x, z):
x[2] = z
with objmode_context():
# should print [1, 2, 15]
print(x)
with objmode_context():
x[2] = 2 * z
# should print [1, 2, 30]
print(x)
return x
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x, 15)
@expected_failure_for_list_arg
def test_case03_create_and_mutate(self):
def foo(x):
with objmode_context(y='List(int64)'):
y = [1, 2, 3]
with objmode_context():
y[2] = 10
return y
self.assert_equal_return_and_stdout(foo, 1)
def test_case04_bogus_variable_type_info(self):
def foo(x):
# should specifying nonsense type info be considered valid?
with objmode_context(k="float64[:]"):
print(x)
return x
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(errors.TypingError) as raises:
cfoo(x)
self.assertIn(
"Invalid type annotation on non-outgoing variables",
str(raises.exception),
)
def test_case05_bogus_type_info(self):
def foo(x):
# should specifying the wrong type info be considered valid?
# z is complex.
# Note: for now, we will coerce for scalar and raise for array
with objmode_context(z="float64[:]"):
z = x + 1.j
return z
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(TypeError) as raises:
got = cfoo(x)
self.assertIn(
("can't unbox array from PyObject into native value."
" The object maybe of a different type"),
str(raises.exception),
)
def test_case06_double_objmode(self):
def foo(x):
# would nested ctx in the same scope ever make sense? Is this
# pattern useful?
with objmode_context():
#with npmmode_context(): not implemented yet
with objmode_context():
print(x)
return x
with self.assertRaises(errors.TypingError) as raises:
njit(foo)(123)
# Check that an error occurred in with-lifting in objmode
pat = ("During: resolving callee type: "
r"type\(ObjModeLiftedWith\(<.*>\)\)")
self.assertRegex(str(raises.exception), pat)
def test_case07_mystery_key_error(self):
# this raises a key error
def foo(x):
with objmode_context():
t = {'a': x}
u = 3
return x, t, u
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(errors.TypingError) as raises:
cfoo(x)
exstr = str(raises.exception)
self.assertIn("Missing type annotation on outgoing variable(s): "
"['t', 'u']",
exstr)
self.assertIn("Example code: with objmode"
"(t='<add_type_as_string_here>')",
exstr)
def test_case08_raise_from_external(self):
# this segfaults, expect its because the dict needs to raise as '2' is
# not in the keys until a later loop (looking for `d['0']` works fine).
d = dict()
def foo(x):
for i in range(len(x)):
with objmode_context():
k = str(i)
v = x[i]
d[k] = v
print(d['2'])
return x
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(KeyError) as raises:
cfoo(x)
self.assertEqual(str(raises.exception), "'2'")
def test_case09_explicit_raise(self):
def foo(x):
with objmode_context():
raise ValueError()
return x
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(errors.CompilerError) as raises:
cfoo(x)
self.assertIn(
('unsupported control flow due to raise statements inside '
'with block'),
str(raises.exception),
)
@expected_failure_for_list_arg
def test_case10_mutate_across_contexts(self):
# This shouldn't work due to using List as input.
def foo(x):
with objmode_context(y='List(int64)'):
y = [1, 2, 3]
with objmode_context():
y[2] = 10
return y
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_case10_mutate_array_across_contexts(self):
# Sub-case of case-10.
def foo(x):
with objmode_context(y='int64[:]'):
y = np.asarray([1, 2, 3], dtype='int64')
with objmode_context():
# Note: `y` is not an output.
y[2] = 10
return y
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_case11_define_function_in_context(self):
# should this work? no, global name 'bar' is not defined
def foo(x):
with objmode_context():
def bar(y):
return y + 1
return x
x = np.array([1, 2, 3])
cfoo = njit(foo)
with self.assertRaises(NameError) as raises:
cfoo(x)
self.assertIn(
"global name 'bar' is not defined",
str(raises.exception),
)
def test_case12_njit_inside_a_objmode_ctx(self):
# TODO: is this still the cases?
# this works locally but not inside this test, probably due to the way
# compilation is being done
def bar(y):
return y + 1
def foo(x):
with objmode_context(y='int64[:]'):
y = njit(bar)(x).astype('int64')
return x + y
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_case14_return_direct_from_objmode_ctx(self):
def foo(x):
with objmode_context(x='int64[:]'):
x += 1
return x
result = foo(np.array([1, 2, 3]))
np.testing.assert_array_equal(np.array([2, 3, 4]), result)
# No easy way to handle this yet.
@unittest.expectedFailure
def test_case15_close_over_objmode_ctx(self):
# Fails with Unsupported constraint encountered: enter_with $phi8.1
def foo(x):
j = 10
def bar(x):
with objmode_context(x='int64[:]'):
print(x)
return x + j
return bar(x) + 2
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
@skip_unless_scipy
def test_case16_scipy_call_in_objmode_ctx(self):
from scipy import sparse as sp
def foo(x):
with objmode_context(k='int64'):
print(x)
spx = sp.csr_matrix(x)
# the np.int64 call is pointless, works around:
# https://github.com/scipy/scipy/issues/10206
# which hit the SciPy 1.3 release.
k = np.int64(spx[0, 0])
return k
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_case17_print_own_bytecode(self):
import dis
def foo(x):
with objmode_context():
dis.dis(foo)
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
@expected_failure_for_function_arg
def test_case18_njitfunc_passed_to_objmode_ctx(self):
def foo(func, x):
with objmode_context():
func(x[0])
x = np.array([1, 2, 3])
fn = njit(lambda z: z + 5)
self.assert_equal_return_and_stdout(foo, fn, x)
@expected_failure_py311
@expected_failure_py312
@expected_failure_py313
@expected_failure_py314
def test_case19_recursion(self):
def foo(x):
with objmode_context():
if x == 0:
return 7
ret = foo(x - 1)
return ret
with self.assertRaises((errors.TypingError, errors.CompilerError)) as raises:
cfoo = njit(foo)
cfoo(np.array([1, 2, 3]))
msg = "Untyped global name 'foo'"
self.assertIn(msg, str(raises.exception))
@unittest.expectedFailure
def test_case20_rng_works_ok(self):
def foo(x):
np.random.seed(0)
y = np.random.rand()
with objmode_context(z="float64"):
# It's known that the random state does not sync
z = np.random.rand()
return x + z + y
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_case21_rng_seed_works_ok(self):
def foo(x):
np.random.seed(0)
y = np.random.rand()
with objmode_context(z="float64"):
# Similar to test_case20_rng_works_ok but call seed
np.random.seed(0)
z = np.random.rand()
return x + z + y
x = np.array([1, 2, 3])
self.assert_equal_return_and_stdout(foo, x)
def test_example01(self):
# Example from _ObjModeContextType.__doc__
def bar(x):
return np.asarray(list(reversed(x.tolist())))
@njit
def foo():
x = np.arange(5)
with objmode(y='intp[:]'): # annotate return type
# this region is executed by object-mode.
y = x + bar(x)
return y
self.assertPreciseEqual(foo(), foo.py_func())
self.assertIs(objmode, objmode_context)
def test_objmode_in_overload(self):
def foo(s):
pass
@overload(foo)
def foo_overload(s):
def impl(s):
with objmode(out='intp'):
out = s + 3
return out
return impl
@numba.njit
def f():
return foo(1)
self.assertEqual(f(), 1 + 3)
def test_objmode_gv_variable(self):
@njit
def global_var():
with objmode(val=gv_type):
val = 12.3
return val
ret = global_var()
# the result is truncated because of the intp return-type
self.assertIsInstance(ret, int)
self.assertEqual(ret, 12)
def test_objmode_gv_variable_error(self):
@njit
def global_var():
with objmode(val=gv_type2):
val = 123
return val
with self.assertRaisesRegex(
errors.CompilerError,
("Error handling objmode argument 'val'. "
r"Global 'gv_type2' is not defined.")
):
global_var()
def test_objmode_gv_mod_attr(self):
@njit
def modattr1():
with objmode(val=types.intp):
val = 12.3
return val
@njit
def modattr2():
with objmode(val=numba.types.intp):
val = 12.3
return val
for fn in (modattr1, modattr2):
with self.subTest(fn=str(fn)):
ret = fn()
# the result is truncated because of the intp return-type
self.assertIsInstance(ret, int)
self.assertEqual(ret, 12)
def test_objmode_gv_mod_attr_error(self):
@njit
def moderror():
with objmode(val=types.THIS_DOES_NOT_EXIST):
val = 12.3
return val
with self.assertRaisesRegex(
errors.CompilerError,
("Error handling objmode argument 'val'. "
"Getattr cannot be resolved at compile-time"),
):
moderror()
def test_objmode_gv_mod_attr_error_multiple(self):
@njit
def moderror():
with objmode(v1=types.intp, v2=types.THIS_DOES_NOT_EXIST,
v3=types.float32):
v1 = 12.3
v2 = 12.3
v3 = 12.3
return val
with self.assertRaisesRegex(
errors.CompilerError,
("Error handling objmode argument 'v2'. "
"Getattr cannot be resolved at compile-time"),
):
moderror()
def test_objmode_closure_type_in_overload(self):
def foo():
pass
@overload(foo)
def foo_overload():
shrubbery = types.float64[:]
def impl():
with objmode(out=shrubbery):
out = np.arange(10).astype(np.float64)
return out
return impl
@njit
def bar():
return foo()
self.assertPreciseEqual(bar(), np.arange(10).astype(np.float64))
def test_objmode_closure_type_in_overload_error(self):
def foo():
pass
@overload(foo)
def foo_overload():
shrubbery = types.float64[:]
def impl():
with objmode(out=shrubbery):
out = np.arange(10).astype(np.float64)
return out
# Remove closure var.
# Otherwise, it will "shrubbery" will be a global
del shrubbery
return impl
@njit
def bar():
return foo()
with self.assertRaisesRegex(
errors.TypingError,
("Error handling objmode argument 'out'. "
"Freevar 'shrubbery' is not defined"),
):
bar()
def test_objmode_invalid_use(self):
@njit
def moderror():
with objmode(bad=1 + 1):
out = 1
return val
with self.assertRaisesRegex(
errors.CompilerError,
("Error handling objmode argument 'bad'. "
"The value must be a compile-time constant either as "
"a non-local variable or a getattr expression that "
"refers to a Numba type."),
):
moderror()
def test_objmode_multi_type_args(self):
array_ty = types.int32[:]
@njit
def foo():
# t1 is a string
# t2 is a global type
# t3 is a non-local/freevar
with objmode(t1="float64", t2=gv_type, t3=array_ty):
t1 = 793856.5
t2 = t1 # to observe truncation
t3 = np.arange(5).astype(np.int32)
return t1, t2, t3
t1, t2, t3 = foo()
self.assertPreciseEqual(t1, 793856.5)
self.assertPreciseEqual(t2, 793856)
self.assertPreciseEqual(t3, np.arange(5).astype(np.int32))
def test_objmode_jitclass(self):
spec = [
('value', types.int32), # a simple scalar field
('array', types.float32[:]), # an array field
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
@staticmethod
def add(x, y):
return x + y
n = 21
mybag = Bag(n)
def foo():
pass
@overload(foo)
def foo_overload():
shrubbery = mybag._numba_type_
def impl():
with objmode(out=shrubbery):
out = Bag(123)
out.increment(3)
return out
return impl
@njit
def bar():
return foo()
z = bar()
self.assertIsInstance(z, Bag)
self.assertEqual(z.add(2, 3), 2 + 3)
exp_array = np.zeros(123, dtype=np.float32) + 3
self.assertPreciseEqual(z.array, exp_array)
@staticmethod
def case_objmode_cache(x):
with objmode(output='float64'):
output = x / 10
return output
def test_objmode_reflected_list(self):
ret_type = typeof([1, 2, 3, 4, 5])
@njit
def test2():
with objmode(out=ret_type):
out = [1, 2, 3, 4, 5]
return out
with self.assertRaises(errors.CompilerError) as raises:
test2()
self.assertRegex(
str(raises.exception),
(r"Objmode context failed. "
r"Argument 'out' is declared as an unsupported type: "
r"reflected list\(int(32|64)\)<iv=None>. "
r"Reflected types are not supported."),
)
def test_objmode_reflected_set(self):
ret_type = typeof({1, 2, 3, 4, 5})
@njit
def test2():
with objmode(result=ret_type):
result = {1, 2, 3, 4, 5}
return result
with self.assertRaises(errors.CompilerError) as raises:
test2()
self.assertRegex(
str(raises.exception),
(r"Objmode context failed. "
r"Argument 'result' is declared as an unsupported type: "
r"reflected set\(int(32|64)\). "
r"Reflected types are not supported."),
)
def test_objmode_typed_dict(self):
ret_type = types.DictType(types.unicode_type, types.int64)
@njit
def test4():
with objmode(res=ret_type):
res = {'A': 1, 'B': 2}
return res
with self.assertRaises(TypeError) as raises:
test4()
self.assertIn(
("can't unbox a <class 'dict'> "
"as a <class 'numba.typed.typeddict.Dict'>"),
str(raises.exception),
)
def test_objmode_typed_list(self):
ret_type = types.ListType(types.int64)
@njit
def test4():
with objmode(res=ret_type):
res = [1, 2]
return res
with self.assertRaises(TypeError) as raises:
test4()
self.assertRegex(
str(raises.exception),
(r"can't unbox a <class 'list'> "
r"as a (<class ')?numba.typed.typedlist.List('>)?"),
)
def test_objmode_use_of_view(self):
# See issue #7158, npm functionality should only be validated if in
# npm.
@njit
def foo(x):
with numba.objmode(y="int64[::1]"):
y = x.view("int64")
return y
a = np.ones(1, np.int64).view('float64')
expected = foo.py_func(a)
got = foo(a)
self.assertPreciseEqual(expected, got)
def case_inner_pyfunc(x):
return x / 10
def case_objmode_cache(x):
with objmode(output='float64'):
output = case_inner_pyfunc(x)
return output
| TestLiftObj |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/source_s3/stream.py | {
"start": 558,
"end": 4178
} | class ____(IncrementalFileStream):
@property
def storagefile_class(self) -> type:
return S3File
def filepath_iterator(self, stream_state: Mapping[str, Any] = None) -> Iterator[FileInfo]:
"""
:yield: url filepath to use in S3File()
"""
stream_state = self._get_converted_stream_state(stream_state)
prefix = self._provider.get("path_prefix")
if prefix is None:
prefix = ""
msg = f"Iterating S3 bucket '{self._provider['bucket']}'"
self.logger.info(msg + f" with prefix: '{prefix}' " if prefix != "" else msg)
provider = self._provider
client_config = None
if S3File.use_aws_account(provider):
session = boto3session.Session(
aws_access_key_id=provider["aws_access_key_id"], aws_secret_access_key=provider["aws_secret_access_key"]
)
else:
session = boto3session.Session()
client_config = Config(signature_version=UNSIGNED)
client = make_s3_client(provider, config=client_config, session=session)
ctoken = None
while True:
# list_objects_v2 doesn't like a None value for ContinuationToken
# so we don't set it if we don't have one.
if ctoken:
kwargs = dict(Bucket=provider["bucket"], Prefix=provider.get("path_prefix", ""), ContinuationToken=ctoken) # type: ignore[unreachable]
else:
kwargs = dict(Bucket=provider["bucket"], Prefix=provider.get("path_prefix", ""))
try:
response = client.list_objects_v2(**kwargs)
content = response["Contents"]
except ClientError as e:
message = e.response.get("Error", {}).get("Message", {})
raise AirbyteTracedException(message, message, failure_type=FailureType.config_error)
except KeyError:
pass
else:
for file in content:
if self.is_not_folder(file) and self._filter_by_last_modified_date(file, stream_state):
yield FileInfo(key=file["Key"], last_modified=file["LastModified"], size=file["Size"])
ctoken = response.get("NextContinuationToken", None)
if not ctoken:
break
@staticmethod
def is_not_folder(file) -> bool:
return not file["Key"].endswith("/")
def _filter_by_last_modified_date(self, file: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None):
cursor_date = pendulum.parse(stream_state.get(self.cursor_field)) if stream_state else self.start_date
file_in_history_and_last_modified_is_earlier_than_cursor_value = (
stream_state is not None
and self.cursor_field in stream_state.keys()
and file.get("LastModified") <= self._get_datetime_from_stream_state(stream_state)
and self.file_in_history(file["Key"], stream_state.get("history", {}))
)
file_is_not_in_history_and_last_modified_plus_buffer_days_is_earlier_than_cursor_value = file.get("LastModified") + timedelta(
days=self.buffer_days
) < self._get_datetime_from_stream_state(stream_state) and not self.file_in_history(file["Key"], stream_state.get("history", {}))
return (
file.get("LastModified") > cursor_date
and not file_in_history_and_last_modified_is_earlier_than_cursor_value
and not file_is_not_in_history_and_last_modified_plus_buffer_days_is_earlier_than_cursor_value
)
| IncrementalFileStreamS3 |
python | Pylons__pyramid | tests/test_i18n.py | {
"start": 8926,
"end": 14571
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.i18n import Translations
return Translations
def _makeOne(self):
messages1 = [('foo', 'Voh'), (('foo1', 1), 'Voh1')]
messages2 = [('foo', 'VohD'), (('foo1', 1), 'VohD1')]
klass = self._getTargetClass()
translations1 = klass(None, domain='messages')
translations1._catalog = dict(messages1)
translations1.plural = lambda *arg: 1
translations2 = klass(None, domain='messages1')
translations2._catalog = dict(messages2)
translations2.plural = lambda *arg: 1
translations = translations1.add(translations2, merge=False)
return translations
def test_load_locales_None(self):
import gettext
klass = self._getTargetClass()
result = klass.load(localedir, None, domain=None)
self.assertEqual(result.__class__, gettext.NullTranslations)
def test_load_domain_None(self):
import gettext
locales = ['de', 'en']
klass = self._getTargetClass()
result = klass.load(localedir, locales, domain=None)
self.assertEqual(result.__class__, gettext.NullTranslations)
def test_load_found_locale_and_domain(self):
locales = ['de', 'en']
klass = self._getTargetClass()
result = klass.load(localedir, locales, domain='deformsite')
self.assertEqual(result.__class__, klass)
def test_load_found_locale_and_domain_locale_is_string(self):
locales = 'de'
klass = self._getTargetClass()
result = klass.load(localedir, locales, domain='deformsite')
self.assertEqual(result.__class__, klass)
def test___repr__(self):
inst = self._makeOne()
result = repr(inst)
self.assertEqual(result, '<Translations: "None">')
def test_merge_not_gnutranslations(self):
inst = self._makeOne()
self.assertEqual(inst.merge(None), inst)
def test_merge_gnutranslations(self):
inst = self._makeOne()
inst2 = self._makeOne()
inst2._catalog['a'] = 'b'
inst.merge(inst2)
self.assertEqual(inst._catalog['a'], 'b')
def test_merge_gnutranslations_not_translations(self):
import gettext
t = gettext.GNUTranslations()
t._catalog = {'a': 'b'}
inst = self._makeOne()
inst.merge(t)
self.assertEqual(inst._catalog['a'], 'b')
def test_add_different_domain_merge_true_notexisting(self):
inst = self._makeOne()
inst2 = self._makeOne()
inst2.domain = 'domain2'
inst.add(inst2)
self.assertEqual(inst._domains['domain2'], inst2)
def test_add_different_domain_merge_true_existing(self):
inst = self._makeOne()
inst2 = self._makeOne()
inst3 = self._makeOne()
inst2.domain = 'domain2'
inst2._catalog['a'] = 'b'
inst3.domain = 'domain2'
inst._domains['domain2'] = inst3
inst.add(inst2)
self.assertEqual(inst._domains['domain2'], inst3)
self.assertEqual(inst3._catalog['a'], 'b')
def test_add_same_domain_merge_true(self):
inst = self._makeOne()
inst2 = self._makeOne()
inst2._catalog['a'] = 'b'
inst.add(inst2)
self.assertEqual(inst._catalog['a'], 'b')
def test_add_default_domain_replaces_plural_first_time(self):
# Create three empty message catalogs in the default domain
inst = self._getTargetClass()(None, domain='messages')
inst2 = self._getTargetClass()(None, domain='messages')
inst3 = self._getTargetClass()(None, domain='messages')
inst._catalog = {}
inst2._catalog = {}
inst3._catalog = {}
# The default plural scheme is the germanic one
self.assertEqual(inst.plural(0), 1)
self.assertEqual(inst.plural(1), 0)
self.assertEqual(inst.plural(2), 1)
# inst2 represents a message file that declares french plurals
inst2.plural = lambda n: n > 1
inst.add(inst2)
# that plural rule should now apply to inst
self.assertEqual(inst.plural(0), 0)
self.assertEqual(inst.plural(1), 0)
self.assertEqual(inst.plural(2), 1)
# We load a second message file with different plural rules
inst3.plural = lambda n: n > 0
inst.add(inst3)
# It doesn't override the previously loaded rule
self.assertEqual(inst.plural(0), 0)
self.assertEqual(inst.plural(1), 0)
self.assertEqual(inst.plural(2), 1)
def test_dgettext(self):
t = self._makeOne()
self.assertEqual(t.dgettext('messages', 'foo'), 'Voh')
self.assertEqual(t.dgettext('messages1', 'foo'), 'VohD')
def test_dugettext(self):
t = self._makeOne()
self.assertEqual(t.dugettext('messages', 'foo'), 'Voh')
self.assertEqual(t.dugettext('messages1', 'foo'), 'VohD')
def test_dngettext(self):
t = self._makeOne()
self.assertEqual(t.dngettext('messages', 'foo1', 'foos1', 1), 'Voh1')
self.assertEqual(t.dngettext('messages1', 'foo1', 'foos1', 1), 'VohD1')
def test_dungettext(self):
t = self._makeOne()
self.assertEqual(t.dungettext('messages', 'foo1', 'foos1', 1), 'Voh1')
self.assertEqual(
t.dungettext('messages1', 'foo1', 'foos1', 1), 'VohD1'
)
def test_default_germanic_pluralization(self):
t = self._getTargetClass()()
t._catalog = {}
result = t.dungettext('messages', 'foo1', 'foos1', 2)
self.assertEqual(result, 'foos1')
| TestTranslations |
python | python__mypy | mypy/checkstrformat.py | {
"start": 10622,
"end": 46007
} | class ____:
"""String interpolation/formatter type checker.
This class works closely together with checker.ExpressionChecker.
"""
# Some services are provided by a TypeChecker instance.
chk: TypeCheckerSharedApi
# This is shared with TypeChecker, but stored also here for convenience.
msg: MessageBuilder
def __init__(self, chk: TypeCheckerSharedApi, msg: MessageBuilder) -> None:
"""Construct an expression type checker."""
self.chk = chk
self.msg = msg
def check_str_format_call(self, call: CallExpr, format_value: str) -> None:
"""Perform more precise checks for str.format() calls when possible.
Currently the checks are performed for:
* Actual string literals
* Literal types with string values
* Final names with string values
The checks that we currently perform:
* Check generic validity (e.g. unmatched { or }, and {} in invalid positions)
* Check consistency of specifiers' auto-numbering
* Verify that replacements can be found for all conversion specifiers,
and all arguments were used
* Non-standard format specs are only allowed for types with custom __format__
* Type check replacements with accessors applied (if any).
* Verify that specifier type is known and matches replacement type
* Perform special checks for some specifier types:
- 'c' requires a single character string
- 's' must not accept bytes
- non-empty flags are only allowed for numeric types
"""
conv_specs = parse_format_value(format_value, call, self.msg)
if conv_specs is None:
return
if not self.auto_generate_keys(conv_specs, call):
return
self.check_specs_in_format_call(call, conv_specs, format_value)
def check_specs_in_format_call(
self, call: CallExpr, specs: list[ConversionSpecifier], format_value: str
) -> None:
"""Perform pairwise checks for conversion specifiers vs their replacements.
The core logic for format checking is implemented in this method.
"""
assert all(s.key for s in specs), "Keys must be auto-generated first!"
replacements = self.find_replacements_in_call(call, [cast(str, s.key) for s in specs])
assert len(replacements) == len(specs)
for spec, repl in zip(specs, replacements):
repl = self.apply_field_accessors(spec, repl, ctx=call)
actual_type = repl.type if isinstance(repl, TempNode) else self.chk.lookup_type(repl)
assert actual_type is not None
# Special case custom formatting.
if (
spec.format_spec
and spec.non_standard_format_spec
and
# Exclude "dynamic" specifiers (i.e. containing nested formatting).
not ("{" in spec.format_spec or "}" in spec.format_spec)
):
if (
not custom_special_method(actual_type, "__format__", check_all=True)
or spec.conversion
):
# TODO: add support for some custom specs like datetime?
self.msg.fail(
f'Unrecognized format specification "{spec.format_spec[1:]}"',
call,
code=codes.STRING_FORMATTING,
)
continue
# Adjust expected and actual types.
if not spec.conv_type:
expected_type: Type | None = AnyType(TypeOfAny.special_form)
else:
assert isinstance(call.callee, MemberExpr)
if isinstance(call.callee.expr, StrExpr):
format_str = call.callee.expr
else:
format_str = StrExpr(format_value)
expected_type = self.conversion_type(
spec.conv_type, call, format_str, format_call=True
)
if spec.conversion is not None:
# If the explicit conversion is given, then explicit conversion is called _first_.
if spec.conversion[1] not in "rsa":
self.msg.fail(
(
f'Invalid conversion type "{spec.conversion[1]}", '
f'must be one of "r", "s" or "a"'
),
call,
code=codes.STRING_FORMATTING,
)
actual_type = self.named_type("builtins.str")
# Perform the checks for given types.
if expected_type is None:
continue
a_type = get_proper_type(actual_type)
actual_items = (
get_proper_types(a_type.items) if isinstance(a_type, UnionType) else [a_type]
)
for a_type in actual_items:
if custom_special_method(a_type, "__format__"):
continue
self.check_placeholder_type(a_type, expected_type, call)
self.perform_special_format_checks(spec, call, repl, a_type, expected_type)
def perform_special_format_checks(
self,
spec: ConversionSpecifier,
call: CallExpr,
repl: Expression,
actual_type: Type,
expected_type: Type,
) -> None:
# TODO: try refactoring to combine this logic with % formatting.
if spec.conv_type == "c":
if isinstance(repl, (StrExpr, BytesExpr)) and len(repl.value) != 1:
self.msg.requires_int_or_char(call, format_call=True)
c_typ = get_proper_type(self.chk.lookup_type(repl))
if isinstance(c_typ, Instance) and c_typ.last_known_value:
c_typ = c_typ.last_known_value
if isinstance(c_typ, LiteralType) and isinstance(c_typ.value, str):
if len(c_typ.value) != 1:
self.msg.requires_int_or_char(call, format_call=True)
if (not spec.conv_type or spec.conv_type == "s") and not spec.conversion:
if has_type_component(actual_type, "builtins.bytes") and not custom_special_method(
actual_type, "__str__"
):
self.msg.fail(
'If x = b\'abc\' then f"{x}" or "{}".format(x) produces "b\'abc\'", '
'not "abc". If this is desired behavior, use f"{x!r}" or "{!r}".format(x). '
"Otherwise, decode the bytes",
call,
code=codes.STR_BYTES_PY3,
)
if spec.flags:
numeric_types = UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.float")]
)
if (
spec.conv_type
and spec.conv_type not in NUMERIC_TYPES_NEW
or not spec.conv_type
and not is_subtype(actual_type, numeric_types)
and not custom_special_method(actual_type, "__format__")
):
self.msg.fail(
"Numeric flags are only allowed for numeric types",
call,
code=codes.STRING_FORMATTING,
)
def find_replacements_in_call(self, call: CallExpr, keys: list[str]) -> list[Expression]:
"""Find replacement expression for every specifier in str.format() call.
In case of an error use TempNode(AnyType).
"""
result: list[Expression] = []
used: set[Expression] = set()
for key in keys:
if key.isdecimal():
expr = self.get_expr_by_position(int(key), call)
if not expr:
self.msg.fail(
f"Cannot find replacement for positional format specifier {key}",
call,
code=codes.STRING_FORMATTING,
)
expr = TempNode(AnyType(TypeOfAny.from_error))
else:
expr = self.get_expr_by_name(key, call)
if not expr:
self.msg.fail(
f'Cannot find replacement for named format specifier "{key}"',
call,
code=codes.STRING_FORMATTING,
)
expr = TempNode(AnyType(TypeOfAny.from_error))
result.append(expr)
if not isinstance(expr, TempNode):
used.add(expr)
# Strictly speaking not using all replacements is not a type error, but most likely
# a typo in user code, so we show an error like we do for % formatting.
total_explicit = len([kind for kind in call.arg_kinds if kind in (ARG_POS, ARG_NAMED)])
if len(used) < total_explicit:
self.msg.too_many_string_formatting_arguments(call)
return result
def get_expr_by_position(self, pos: int, call: CallExpr) -> Expression | None:
"""Get positional replacement expression from '{0}, {1}'.format(x, y, ...) call.
If the type is from *args, return TempNode(<item type>). Return None in case of
an error.
"""
pos_args = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_POS]
if pos < len(pos_args):
return pos_args[pos]
star_args = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_STAR]
if not star_args:
return None
# Fall back to *args when present in call.
star_arg = star_args[0]
varargs_type = get_proper_type(self.chk.lookup_type(star_arg))
if not isinstance(varargs_type, Instance) or not varargs_type.type.has_base(
"typing.Sequence"
):
# Error should be already reported.
return TempNode(AnyType(TypeOfAny.special_form))
iter_info = self.chk.named_generic_type(
"typing.Sequence", [AnyType(TypeOfAny.special_form)]
).type
return TempNode(map_instance_to_supertype(varargs_type, iter_info).args[0])
def get_expr_by_name(self, key: str, call: CallExpr) -> Expression | None:
"""Get named replacement expression from '{name}'.format(name=...) call.
If the type is from **kwargs, return TempNode(<item type>). Return None in case of
an error.
"""
named_args = [
arg
for arg, kind, name in zip(call.args, call.arg_kinds, call.arg_names)
if kind == ARG_NAMED and name == key
]
if named_args:
return named_args[0]
star_args_2 = [arg for arg, kind in zip(call.args, call.arg_kinds) if kind == ARG_STAR2]
if not star_args_2:
return None
star_arg_2 = star_args_2[0]
kwargs_type = get_proper_type(self.chk.lookup_type(star_arg_2))
if not isinstance(kwargs_type, Instance) or not kwargs_type.type.has_base(
"typing.Mapping"
):
# Error should be already reported.
return TempNode(AnyType(TypeOfAny.special_form))
any_type = AnyType(TypeOfAny.special_form)
mapping_info = self.chk.named_generic_type("typing.Mapping", [any_type, any_type]).type
return TempNode(map_instance_to_supertype(kwargs_type, mapping_info).args[1])
def auto_generate_keys(self, all_specs: list[ConversionSpecifier], ctx: Context) -> bool:
"""Translate '{} {name} {}' to '{0} {name} {1}'.
Return True if generation was successful, otherwise report an error and return false.
"""
some_defined = any(s.key and s.key.isdecimal() for s in all_specs)
all_defined = all(bool(s.key) for s in all_specs)
if some_defined and not all_defined:
self.msg.fail(
"Cannot combine automatic field numbering and manual field specification",
ctx,
code=codes.STRING_FORMATTING,
)
return False
if all_defined:
return True
next_index = 0
for spec in all_specs:
if not spec.key:
str_index = str(next_index)
spec.key = str_index
# Update also the full field (i.e. turn {.x} into {0.x}).
if not spec.field:
spec.field = str_index
else:
spec.field = str_index + spec.field
next_index += 1
return True
def apply_field_accessors(
self, spec: ConversionSpecifier, repl: Expression, ctx: Context
) -> Expression:
"""Transform and validate expr in '{.attr[item]}'.format(expr) into expr.attr['item'].
If validation fails, return TempNode(AnyType).
"""
assert spec.key, "Keys must be auto-generated first!"
if spec.field == spec.key:
return repl
assert spec.field
temp_errors = Errors(self.chk.options)
dummy = DUMMY_FIELD_NAME + spec.field[len(spec.key) :]
temp_ast: Node = parse(
dummy, fnam="<format>", module=None, options=self.chk.options, errors=temp_errors
)
if temp_errors.is_errors():
self.msg.fail(
f'Syntax error in format specifier "{spec.field}"',
ctx,
code=codes.STRING_FORMATTING,
)
return TempNode(AnyType(TypeOfAny.from_error))
# These asserts are guaranteed by the original regexp.
assert isinstance(temp_ast, MypyFile)
temp_ast = temp_ast.defs[0]
assert isinstance(temp_ast, ExpressionStmt)
temp_ast = temp_ast.expr
if not self.validate_and_transform_accessors(temp_ast, repl, spec, ctx=ctx):
return TempNode(AnyType(TypeOfAny.from_error))
# Check if there are any other errors (like missing members).
# TODO: fix column to point to actual start of the format specifier _within_ string.
temp_ast.line = ctx.line
temp_ast.column = ctx.column
self.chk.expr_checker.accept(temp_ast)
return temp_ast
def validate_and_transform_accessors(
self,
temp_ast: Expression,
original_repl: Expression,
spec: ConversionSpecifier,
ctx: Context,
) -> bool:
"""Validate and transform (in-place) format field accessors.
On error, report it and return False. The transformations include replacing the dummy
variable with actual replacement expression and translating any name expressions in an
index into strings, so that this will work:
class User(TypedDict):
name: str
id: int
u: User
'{[id]:d} -> {[name]}'.format(u)
"""
if not isinstance(temp_ast, (MemberExpr, IndexExpr)):
self.msg.fail(
"Only index and member expressions are allowed in"
' format field accessors; got "{}"'.format(spec.field),
ctx,
code=codes.STRING_FORMATTING,
)
return False
if isinstance(temp_ast, MemberExpr):
node = temp_ast.expr
else:
node = temp_ast.base
if not isinstance(temp_ast.index, (NameExpr, IntExpr)):
assert spec.key, "Call this method only after auto-generating keys!"
assert spec.field
self.msg.fail(
'Invalid index expression in format field accessor "{}"'.format(
spec.field[len(spec.key) :]
),
ctx,
code=codes.STRING_FORMATTING,
)
return False
if isinstance(temp_ast.index, NameExpr):
temp_ast.index = StrExpr(temp_ast.index.name)
if isinstance(node, NameExpr) and node.name == DUMMY_FIELD_NAME:
# Replace it with the actual replacement expression.
assert isinstance(temp_ast, (IndexExpr, MemberExpr)) # XXX: this is redundant
if isinstance(temp_ast, IndexExpr):
temp_ast.base = original_repl
else:
temp_ast.expr = original_repl
return True
node.line = ctx.line
node.column = ctx.column
return self.validate_and_transform_accessors(
node, original_repl=original_repl, spec=spec, ctx=ctx
)
# TODO: In Python 3, the bytes formatting has a more restricted set of options
# compared to string formatting.
def check_str_interpolation(self, expr: FormatStringExpr, replacements: Expression) -> Type:
"""Check the types of the 'replacements' in a string interpolation
expression: str % replacements.
"""
self.chk.expr_checker.accept(expr)
specifiers = parse_conversion_specifiers(expr.value)
has_mapping_keys = self.analyze_conversion_specifiers(specifiers, expr)
if has_mapping_keys is None:
pass # Error was reported
elif has_mapping_keys:
self.check_mapping_str_interpolation(specifiers, replacements, expr)
else:
self.check_simple_str_interpolation(specifiers, replacements, expr)
if isinstance(expr, BytesExpr):
return self.named_type("builtins.bytes")
elif isinstance(expr, StrExpr):
return self.named_type("builtins.str")
else:
assert False
def analyze_conversion_specifiers(
self, specifiers: list[ConversionSpecifier], context: Context
) -> bool | None:
has_star = any(specifier.has_star() for specifier in specifiers)
has_key = any(specifier.has_key() for specifier in specifiers)
all_have_keys = all(
specifier.has_key() or specifier.conv_type == "%" for specifier in specifiers
)
if has_key and has_star:
self.msg.string_interpolation_with_star_and_key(context)
return None
if has_key and not all_have_keys:
self.msg.string_interpolation_mixing_key_and_non_keys(context)
return None
return has_key
def check_simple_str_interpolation(
self,
specifiers: list[ConversionSpecifier],
replacements: Expression,
expr: FormatStringExpr,
) -> None:
"""Check % string interpolation with positional specifiers '%s, %d' % ('yes, 42')."""
checkers = self.build_replacement_checkers(specifiers, replacements, expr)
if checkers is None:
return
rhs_type = get_proper_type(self.accept(replacements))
rep_types: list[Type] = []
if isinstance(rhs_type, TupleType):
rep_types = rhs_type.items
unpack_index = find_unpack_in_list(rep_types)
if unpack_index is not None:
# TODO: we should probably warn about potentially short tuple.
# However, without special-casing for tuple(f(i) for in other_tuple)
# this causes false positive on mypy self-check in report.py.
extras = max(0, len(checkers) - len(rep_types) + 1)
unpacked = rep_types[unpack_index]
assert isinstance(unpacked, UnpackType)
unpacked = get_proper_type(unpacked.type)
if isinstance(unpacked, TypeVarTupleType):
unpacked = get_proper_type(unpacked.upper_bound)
assert (
isinstance(unpacked, Instance) and unpacked.type.fullname == "builtins.tuple"
)
unpack_items = [unpacked.args[0]] * extras
rep_types = rep_types[:unpack_index] + unpack_items + rep_types[unpack_index + 1 :]
elif isinstance(rhs_type, AnyType):
return
elif isinstance(rhs_type, Instance) and rhs_type.type.fullname == "builtins.tuple":
# Assume that an arbitrary-length tuple has the right number of items.
rep_types = [rhs_type.args[0]] * len(checkers)
elif isinstance(rhs_type, UnionType):
for typ in rhs_type.relevant_items():
temp_node = TempNode(typ)
temp_node.line = replacements.line
self.check_simple_str_interpolation(specifiers, temp_node, expr)
return
else:
rep_types = [rhs_type]
if len(checkers) > len(rep_types):
# Only check the fix-length Tuple type. Other Iterable types would skip.
if is_subtype(rhs_type, self.chk.named_type("typing.Iterable")) and not isinstance(
rhs_type, TupleType
):
return
else:
self.msg.too_few_string_formatting_arguments(replacements)
elif len(checkers) < len(rep_types):
self.msg.too_many_string_formatting_arguments(replacements)
else:
if len(checkers) == 1:
check_node, check_type = checkers[0]
if isinstance(rhs_type, TupleType) and len(rhs_type.items) == 1:
check_type(rhs_type.items[0])
else:
check_node(replacements)
elif isinstance(replacements, TupleExpr) and not any(
isinstance(item, StarExpr) for item in replacements.items
):
for checks, rep_node in zip(checkers, replacements.items):
check_node, check_type = checks
check_node(rep_node)
else:
for checks, rep_type in zip(checkers, rep_types):
check_node, check_type = checks
check_type(rep_type)
def check_mapping_str_interpolation(
self,
specifiers: list[ConversionSpecifier],
replacements: Expression,
expr: FormatStringExpr,
) -> None:
"""Check % string interpolation with names specifiers '%(name)s' % {'name': 'John'}."""
if isinstance(replacements, DictExpr) and all(
isinstance(k, (StrExpr, BytesExpr)) for k, v in replacements.items
):
mapping: dict[str, Type] = {}
for k, v in replacements.items:
if isinstance(expr, BytesExpr):
# Special case: for bytes formatting keys must be bytes.
if not isinstance(k, BytesExpr):
self.msg.fail(
"Dictionary keys in bytes formatting must be bytes, not strings",
expr,
code=codes.STRING_FORMATTING,
)
key_str = cast(FormatStringExpr, k).value
mapping[key_str] = self.accept(v)
for specifier in specifiers:
if specifier.conv_type == "%":
# %% is allowed in mappings, no checking is required
continue
assert specifier.key is not None
if specifier.key not in mapping:
self.msg.key_not_in_mapping(specifier.key, replacements)
return
rep_type = mapping[specifier.key]
assert specifier.conv_type is not None
expected_type = self.conversion_type(specifier.conv_type, replacements, expr)
if expected_type is None:
return
self.chk.check_subtype(
rep_type,
expected_type,
replacements,
message_registry.INCOMPATIBLE_TYPES_IN_STR_INTERPOLATION,
"expression has type",
f"placeholder with key '{specifier.key}' has type",
code=codes.STRING_FORMATTING,
)
if specifier.conv_type == "s":
self.check_s_special_cases(expr, rep_type, expr)
else:
rep_type = self.accept(replacements)
dict_type = self.build_dict_type(expr)
self.chk.check_subtype(
rep_type,
dict_type,
replacements,
message_registry.FORMAT_REQUIRES_MAPPING,
"expression has type",
"expected type for mapping is",
code=codes.STRING_FORMATTING,
)
def build_dict_type(self, expr: FormatStringExpr) -> Type:
"""Build expected mapping type for right operand in % formatting."""
any_type = AnyType(TypeOfAny.special_form)
if isinstance(expr, BytesExpr):
bytes_type = self.chk.named_generic_type("builtins.bytes", [])
return self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem", [bytes_type, any_type]
)
elif isinstance(expr, StrExpr):
str_type = self.chk.named_generic_type("builtins.str", [])
return self.chk.named_generic_type(
"_typeshed.SupportsKeysAndGetItem", [str_type, any_type]
)
else:
assert False, "Unreachable"
def build_replacement_checkers(
self, specifiers: list[ConversionSpecifier], context: Context, expr: FormatStringExpr
) -> list[Checkers] | None:
checkers: list[Checkers] = []
for specifier in specifiers:
checker = self.replacement_checkers(specifier, context, expr)
if checker is None:
return None
checkers.extend(checker)
return checkers
def replacement_checkers(
self, specifier: ConversionSpecifier, context: Context, expr: FormatStringExpr
) -> list[Checkers] | None:
"""Returns a list of tuples of two functions that check whether a replacement is
of the right type for the specifier. The first function takes a node and checks
its type in the right type context. The second function just checks a type.
"""
checkers: list[Checkers] = []
if specifier.width == "*":
checkers.append(self.checkers_for_star(context))
if specifier.precision == "*":
checkers.append(self.checkers_for_star(context))
if specifier.conv_type == "c":
c = self.checkers_for_c_type(specifier.conv_type, context, expr)
if c is None:
return None
checkers.append(c)
elif specifier.conv_type is not None and specifier.conv_type != "%":
c = self.checkers_for_regular_type(specifier.conv_type, context, expr)
if c is None:
return None
checkers.append(c)
return checkers
def checkers_for_star(self, context: Context) -> Checkers:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with a star in a conversion specifier.
"""
expected = self.named_type("builtins.int")
def check_type(type: Type) -> bool:
expected = self.named_type("builtins.int")
return self.chk.check_subtype(
type, expected, context, "* wants int", code=codes.STRING_FORMATTING
)
def check_expr(expr: Expression) -> None:
type = self.accept(expr, expected)
check_type(type)
return check_expr, check_type
def check_placeholder_type(self, typ: Type, expected_type: Type, context: Context) -> bool:
return self.chk.check_subtype(
typ,
expected_type,
context,
message_registry.INCOMPATIBLE_TYPES_IN_STR_INTERPOLATION,
"expression has type",
"placeholder has type",
code=codes.STRING_FORMATTING,
)
def checkers_for_regular_type(
self, conv_type: str, context: Context, expr: FormatStringExpr
) -> Checkers | None:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with 'type'. Return None in case of an error.
"""
expected_type = self.conversion_type(conv_type, context, expr)
if expected_type is None:
return None
def check_type(typ: Type) -> bool:
assert expected_type is not None
ret = self.check_placeholder_type(typ, expected_type, context)
if ret and conv_type == "s":
ret = self.check_s_special_cases(expr, typ, context)
return ret
def check_expr(expr: Expression) -> None:
type = self.accept(expr, expected_type)
check_type(type)
return check_expr, check_type
def check_s_special_cases(self, expr: FormatStringExpr, typ: Type, context: Context) -> bool:
"""Additional special cases for %s in bytes vs string context."""
if isinstance(expr, StrExpr):
# Couple special cases for string formatting.
if has_type_component(typ, "builtins.bytes"):
self.msg.fail(
'If x = b\'abc\' then "%s" % x produces "b\'abc\'", not "abc". '
'If this is desired behavior use "%r" % x. Otherwise, decode the bytes',
context,
code=codes.STR_BYTES_PY3,
)
return False
if isinstance(expr, BytesExpr):
# A special case for bytes formatting: b'%s' actually requires bytes on Python 3.
if has_type_component(typ, "builtins.str"):
self.msg.fail(
"On Python 3 b'%s' requires bytes, not string",
context,
code=codes.STRING_FORMATTING,
)
return False
return True
def checkers_for_c_type(
self, type: str, context: Context, format_expr: FormatStringExpr
) -> Checkers | None:
"""Returns a tuple of check functions that check whether, respectively,
a node or a type is compatible with 'type' that is a character type.
"""
expected_type = self.conversion_type(type, context, format_expr)
if expected_type is None:
return None
def check_type(type: Type) -> bool:
assert expected_type is not None
if isinstance(format_expr, BytesExpr):
err_msg = '"%c" requires an integer in range(256) or a single byte'
else:
err_msg = '"%c" requires int or char'
return self.chk.check_subtype(
type,
expected_type,
context,
err_msg,
"expression has type",
code=codes.STRING_FORMATTING,
)
def check_expr(expr: Expression) -> None:
"""int, or str with length 1"""
type = self.accept(expr, expected_type)
# We need further check with expr to make sure that
# it has exact one char or one single byte.
if check_type(type):
# Python 3 doesn't support b'%c' % str
if (
isinstance(format_expr, BytesExpr)
and isinstance(expr, BytesExpr)
and len(expr.value) != 1
):
self.msg.requires_int_or_single_byte(context)
elif isinstance(expr, (StrExpr, BytesExpr)) and len(expr.value) != 1:
self.msg.requires_int_or_char(context)
return check_expr, check_type
def conversion_type(
self, p: str, context: Context, expr: FormatStringExpr, format_call: bool = False
) -> Type | None:
"""Return the type that is accepted for a string interpolation conversion specifier type.
Note that both Python's float (e.g. %f) and integer (e.g. %d)
specifier types accept both float and integers.
The 'format_call' argument indicates whether this type came from % interpolation or from
a str.format() call, the meaning of few formatting types are different.
"""
NUMERIC_TYPES = NUMERIC_TYPES_NEW if format_call else NUMERIC_TYPES_OLD
INT_TYPES = REQUIRE_INT_NEW if format_call else REQUIRE_INT_OLD
if p == "b" and not format_call:
if not isinstance(expr, BytesExpr):
self.msg.fail(
'Format character "b" is only supported on bytes patterns',
context,
code=codes.STRING_FORMATTING,
)
return None
return self.named_type("builtins.bytes")
elif p == "a":
# TODO: return type object?
return AnyType(TypeOfAny.special_form)
elif p in ["s", "r"]:
return AnyType(TypeOfAny.special_form)
elif p in NUMERIC_TYPES:
if p in INT_TYPES:
numeric_types = [self.named_type("builtins.int")]
else:
numeric_types = [
self.named_type("builtins.int"),
self.named_type("builtins.float"),
]
if not format_call:
if p in FLOAT_TYPES:
numeric_types.append(self.named_type("typing.SupportsFloat"))
else:
numeric_types.append(self.named_type("typing.SupportsInt"))
return UnionType.make_union(numeric_types)
elif p in ["c"]:
if isinstance(expr, BytesExpr):
return UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.bytes")]
)
else:
return UnionType(
[self.named_type("builtins.int"), self.named_type("builtins.str")]
)
else:
self.msg.unsupported_placeholder(p, context)
return None
#
# Helpers
#
def named_type(self, name: str) -> Instance:
"""Return an instance type with type given by the name and no type
arguments. Alias for TypeChecker.named_type.
"""
return self.chk.named_type(name)
def accept(self, expr: Expression, context: Type | None = None) -> Type:
"""Type check a node. Alias for TypeChecker.accept."""
return self.chk.expr_checker.accept(expr, context)
def has_type_component(typ: Type, fullname: str) -> bool:
"""Is this a specific instance type, or a union that contains it?
We use this ad-hoc function instead of a proper visitor or subtype check
because some str vs bytes errors are strictly speaking not runtime errors,
but rather highly counter-intuitive behavior. This is similar to what is used for
--strict-equality.
"""
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return typ.type.has_base(fullname)
elif isinstance(typ, TypeVarType):
return has_type_component(typ.upper_bound, fullname) or any(
has_type_component(v, fullname) for v in typ.values
)
elif isinstance(typ, UnionType):
return any(has_type_component(t, fullname) for t in typ.relevant_items())
return False
| StringFormatterChecker |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_engine.py | {
"start": 13854,
"end": 19798
} | class ____(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
__requires__ = ("pyodbc_fast_executemany",)
def test_flag_on(self, metadata):
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
t.create(testing.db)
eng = engines.testing_engine(
options={"fast_executemany": True, "use_insertmanyvalues": False}
)
@event.listens_for(eng, "after_cursor_execute")
def after_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if executemany:
assert cursor.fast_executemany
with eng.begin() as conn:
conn.execute(
t.insert(),
[{"id": i, "data": "data_%d" % i} for i in range(100)],
)
conn.execute(t.insert(), {"id": 200, "data": "data_200"})
@testing.variation("add_event", [True, False])
@testing.variation("setinputsizes", [True, False])
@testing.variation("fastexecutemany", [True, False])
@testing.variation("insertmanyvalues", [False]) # disabled due to #9603
@testing.variation("broken_types", [True, False])
def test_insert_typing(
self,
metadata,
testing_engine,
add_event,
fastexecutemany,
setinputsizes,
insertmanyvalues,
broken_types,
):
"""tests for executemany + datatypes that are sensitive to
"setinputsizes"
Issues tested here include:
#6058 - turn off setinputsizes by default, since it breaks with
fast_executemany (version 1.4)
#8177 - turn setinputsizes back **on** by default, just skip it only
for cursor.executemany() calls when fast_executemany is set;
otherwise use it. (version 2.0)
#8917 - oops, we added "insertmanyvalues" but forgot to adjust the
check in #8177 above to accommodate for this, so
setinputsizes was getting turned off for "insertmanyvalues"
if fast_executemany was still set
"""
# changes for issue #8177 have eliminated all current expected
# failures, but we'll leave this here in case we need it again
# (... four months pass ...)
# surprise! we need it again. woop! for #8917
expect_failure = (
broken_types
and not setinputsizes
and insertmanyvalues
and not fastexecutemany
)
engine = testing_engine(
options={
"fast_executemany": fastexecutemany,
"use_setinputsizes": setinputsizes,
"use_insertmanyvalues": insertmanyvalues,
}
)
observations = Table(
"Observations",
metadata,
Column("id", Integer, nullable=False, primary_key=True),
Column("obs1", Numeric(19, 15), nullable=True),
Column("obs2", Numeric(19, 15), nullable=True),
Column("obs3", String(10)),
schema="test_schema",
)
with engine.begin() as conn:
metadata.create_all(conn)
records = [
{
"id": 1,
"obs1": Decimal("60.1722066045792"),
"obs2": Decimal("24.929289808227466"),
"obs3": "obs3",
},
{
"id": 2,
"obs1": Decimal("60.16325715615476"),
"obs2": Decimal("24.93886459535008"),
"obs3": 5 if broken_types else "obs3",
},
{
"id": 3,
"obs1": Decimal("60.16445165123469"),
"obs2": Decimal("24.949856300109516"),
"obs3": 7 if broken_types else "obs3",
},
]
assert_records = [
{
"id": rec["id"],
"obs1": rec["obs1"],
"obs2": rec["obs2"],
"obs3": str(rec["obs3"]),
}
for rec in records
]
if add_event:
canary = mock.Mock()
@event.listens_for(engine, "do_setinputsizes")
def do_setinputsizes(
inputsizes, cursor, statement, parameters, context
):
canary(list(inputsizes.values()))
for key in inputsizes:
if isinstance(key.type, Numeric):
inputsizes[key] = (
engine.dialect.dbapi.SQL_DECIMAL,
19,
15,
)
with engine.begin() as conn:
if expect_failure:
with expect_raises(DBAPIError):
conn.execute(observations.insert(), records)
else:
conn.execute(observations.insert(), records)
eq_(
conn.execute(
select(observations).order_by(observations.c.id)
)
.mappings()
.all(),
assert_records,
)
if add_event:
if setinputsizes:
eq_(
canary.mock_calls,
[
# float for int? this seems wrong
mock.call(
[
float,
float,
float,
engine.dialect.dbapi.SQL_VARCHAR,
]
),
mock.call([]),
],
)
else:
eq_(canary.mock_calls, [])
| FastExecutemanyTest |
python | ray-project__ray | python/ray/data/preprocessors/vectorizer.py | {
"start": 372,
"end": 7094
} | class ____(Preprocessor):
"""Count the frequency of tokens using the
`hashing trick <https://en.wikipedia.org/wiki/Feature_hashing>`_.
This preprocessors creates a list column for each input column. For each row,
the list contains the frequency counts of tokens (for CountVectorizer) or hash values
(for HashingVectorizer). For HashingVectorizer, the list will have length
``num_features``. If ``num_features`` is large enough relative to the size of your
vocabulary, then each index approximately corresponds to the frequency of a unique
token.
:class:`HashingVectorizer` is memory efficient and quick to pickle. However, given a
transformed column, you can't know which tokens correspond to it. This might make it
hard to determine which tokens are important to your model.
.. note::
This preprocessor transforms each input column to a
`document-term matrix <https://en.wikipedia.org/wiki/Document-term_matrix>`_.
A document-term matrix is a table that describes the frequency of tokens in a
collection of documents. For example, the strings `"I like Python"` and `"I
dislike Python"` might have the document-term matrix below:
.. code-block::
corpus_I corpus_Python corpus_dislike corpus_like
0 1 1 1 0
1 1 1 0 1
To generate the matrix, you typically map each token to a unique index. For
example:
.. code-block::
token index
0 I 0
1 Python 1
2 dislike 2
3 like 3
The problem with this approach is that memory use scales linearly with the size
of your vocabulary. :class:`HashingVectorizer` circumvents this problem by
computing indices with a hash function:
:math:`\\texttt{index} = hash(\\texttt{token})`.
.. warning::
Sparse matrices aren't currently supported. If you use a large ``num_features``,
this preprocessor might behave poorly.
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import HashingVectorizer
>>>
>>> df = pd.DataFrame({
... "corpus": [
... "Jimmy likes volleyball",
... "Bob likes volleyball too",
... "Bob also likes fruit jerky"
... ]
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>>
>>> vectorizer = HashingVectorizer(["corpus"], num_features=8)
>>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
corpus
0 [1, 0, 1, 0, 0, 0, 0, 1]
1 [1, 0, 1, 0, 0, 0, 1, 1]
2 [0, 0, 1, 1, 0, 2, 1, 0]
:class:`HashingVectorizer` can also be used in append mode by providing the
name of the output_columns that should hold the encoded values.
>>> vectorizer = HashingVectorizer(["corpus"], num_features=8, output_columns=["corpus_hashed"])
>>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
corpus corpus_hashed
0 Jimmy likes volleyball [1, 0, 1, 0, 0, 0, 0, 1]
1 Bob likes volleyball too [1, 0, 1, 0, 0, 0, 1, 1]
2 Bob also likes fruit jerky [0, 0, 1, 1, 0, 2, 1, 0]
Args:
columns: The columns to separately tokenize and count.
num_features: The number of features used to represent the vocabulary. You
should choose a value large enough to prevent hash collisions between
distinct tokens.
tokenization_fn: The function used to generate tokens. This function
should accept a string as input and return a list of tokens as
output. If unspecified, the tokenizer uses a function equivalent to
``lambda s: s.split(" ")``.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
.. seealso::
:class:`CountVectorizer`
Another method for counting token frequencies. Unlike :class:`HashingVectorizer`,
:class:`CountVectorizer` creates a feature for each unique token. This
enables you to compute the inverse transformation.
:class:`FeatureHasher`
This preprocessor is similar to :class:`HashingVectorizer`, except it expects
a table describing token frequencies. In contrast,
:class:`FeatureHasher` expects a column containing documents.
""" # noqa: E501
_is_fittable = False
def __init__(
self,
columns: List[str],
num_features: int,
tokenization_fn: Optional[Callable[[str], List[str]]] = None,
*,
output_columns: Optional[List[str]] = None,
):
super().__init__()
self.columns = columns
self.num_features = num_features
self.tokenization_fn = tokenization_fn or simple_split_tokenizer
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _transform_pandas(self, df: pd.DataFrame):
def hash_count(tokens: List[str]) -> Counter:
hashed_tokens = [simple_hash(token, self.num_features) for token in tokens]
return Counter(hashed_tokens)
for col, output_col in zip(self.columns, self.output_columns):
tokenized = df[col].map(self.tokenization_fn)
hashed = tokenized.map(hash_count)
# Create a list to store the hash columns
hash_columns = []
for i in range(self.num_features):
series = hashed.map(lambda counts: counts[i])
series.name = f"hash_{i}"
hash_columns.append(series)
# Concatenate all hash columns into a single list column
df[output_col] = pd.concat(hash_columns, axis=1).values.tolist()
return df
def __repr__(self):
fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn)
return (
f"{self.__class__.__name__}(columns={self.columns!r}, "
f"num_features={self.num_features!r}, tokenization_fn={fn_name}, "
f"output_columns={self.output_columns!r})"
)
@PublicAPI(stability="alpha")
| HashingVectorizer |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace.py | {
"start": 8373,
"end": 31402
} | class ____(
OrganizationEventsTraceEndpointBase, UptimeResultEAPTestCase
):
url_name = "sentry-api-0-organization-trace"
FEATURES = ["organizations:trace-spans-format"]
def assert_event(self, result, event_data, message):
assert result["transaction"] == event_data.transaction, message
assert result["event_id"] == event_data.data["contexts"]["trace"]["span_id"], message
assert result["start_timestamp"] == event_data.data["start_timestamp"], message
assert result["project_slug"] == event_data.project.slug, message
assert result["sdk_name"] == event_data.data["sdk"]["name"], message
assert result["transaction_id"] == event_data.event_id, message
def get_transaction_children(self, event):
"""Assumes that the test setup only gives each event 1 txn child"""
children = []
for child in event["children"]:
if child["is_transaction"]:
children.append(child)
elif child["event_type"] == "span":
children.extend(child["children"])
return sorted(children, key=lambda event: event["description"])
def assert_trace_data(self, root, gen2_no_children=True):
"""see the setUp docstring for an idea of what the response structure looks like"""
self.assert_event(root, self.root_event, "root")
assert root["parent_span_id"] is None
assert root["duration"] == 3000
# 3 transactions, 2 child spans
assert len(root["children"]) == 5
transaction_children = self.get_transaction_children(root)
assert len(transaction_children) == 3
assert (
root["measurements"]["measurements.lcp"]
== self.root_event.data["measurements"]["lcp"]["value"]
)
assert (
root["measurements"]["measurements.fcp"]
== self.root_event.data["measurements"]["fcp"]["value"]
)
self.assert_performance_issues(root)
for i, gen1 in enumerate(transaction_children):
self.assert_event(gen1, self.gen1_events[i], f"gen1_{i}")
assert gen1["parent_span_id"] == self.root_span_ids[i]
assert gen1["duration"] == 2000
assert len(gen1["children"]) == 1
gen2 = self.get_transaction_children(gen1)[0]
self.assert_event(gen2, self.gen2_events[i], f"gen2_{i}")
assert gen2["parent_span_id"] == self.gen1_span_ids[i]
assert gen2["duration"] == 1000
# Only the first gen2 descendent has a child
if i == 0:
assert len(gen2["children"]) == 4
gen3 = self.get_transaction_children(gen2)[0]
self.assert_event(gen3, self.gen3_event, f"gen3_{i}")
assert gen3["parent_span_id"] == self.gen2_span_id
assert gen3["duration"] == 500
assert len(gen3["children"]) == 0
elif gen2_no_children:
assert len(gen2["children"]) == 0
def assert_performance_issues(self, root):
"""Broken in the non-spans endpoint, but we're not maintaining that anymore"""
def client_get(self, data, url=None):
if url is None:
url = self.url
return self.client.get(
url,
data,
format="json",
)
def test_no_projects(self) -> None:
user = self.create_user()
org = self.create_organization(owner=user)
self.login_as(user=user)
url = reverse(
self.url_name,
kwargs={"organization_id_or_slug": org.slug, "trace_id": uuid4().hex},
)
with self.feature(self.FEATURES):
response = self.client.get(
url,
format="json",
)
assert response.status_code == 404, response.content
def test_simple(self) -> None:
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
def test_ignore_project_param(self) -> None:
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
# The trace endpoint should ignore the project param
response = self.client_get(
data={"project": self.project.id, "timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
def test_with_errors_data(self) -> None:
self.load_trace(is_eap=True)
_, start = self.get_start_end_from_day_ago(123)
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": self.root_event.data["contexts"]["trace"]["span_id"],
}
error_data["tags"] = [["transaction", "/transaction/gen1-0"]]
error = self.store_event(error_data, project_id=self.gen1_project.id)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
assert len(data[0]["errors"]) == 1
error_event = data[0]["errors"][0]
assert error_event is not None
assert error_event["event_id"] == error.data["event_id"]
assert error_event["project_slug"] == self.gen1_project.slug
assert error_event["level"] == "error"
assert error_event["issue_id"] == error.group_id
assert error_event["start_timestamp"] == error_data["timestamp"]
def test_with_errors_data_with_overlapping_span_id(self) -> None:
self.load_trace(is_eap=True)
_, start = self.get_start_end_from_day_ago(123)
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": self.root_event.data["contexts"]["trace"]["span_id"],
}
error_data["tags"] = [["transaction", "/transaction/gen1-0"]]
error = self.store_event(error_data, project_id=self.gen1_project.id)
error_2 = self.store_event(error_data, project_id=self.gen1_project.id)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
assert len(data[0]["errors"]) == 2
error_event_1 = data[0]["errors"][0]
error_event_2 = data[0]["errors"][1]
assert error_event_1["event_id"] in [error.event_id, error_2.event_id]
assert error_event_2["event_id"] in [error.event_id, error_2.event_id]
assert error_event_1["event_id"] != error_event_2["event_id"]
def test_with_performance_issues(self) -> None:
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
for child in data[0]["children"]:
if child["event_id"] == "0012001200120012":
break
assert len(child["occurrences"]) == 1
error_event = child["occurrences"][0]
assert error_event is not None
assert error_event["event_id"] == self.root_event.event_id
assert error_event["description"] == "File IO on Main Thread"
assert error_event["project_slug"] == self.project.slug
assert error_event["level"] == "info"
def test_with_only_errors(self) -> None:
start, _ = self.get_start_end_from_day_ago(1000)
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": "a" * 16,
}
error_data["tags"] = [["transaction", "/transaction/gen1-0"]]
error = self.store_event(error_data, project_id=self.project.id)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
assert data[0]["event_id"] == error.event_id
def test_with_additional_attributes(self) -> None:
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
response = self.client_get(
data={
"timestamp": self.day_ago,
"additional_attributes": [
"gen_ai.request.model",
"gen_ai.usage.total_tokens",
],
},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
# The root span doesn't have any of the additional attributes and returns defaults
assert data[0]["additional_attributes"]["gen_ai.request.model"] == ""
assert data[0]["additional_attributes"]["gen_ai.usage.total_tokens"] == 0
assert data[0]["children"][0]["additional_attributes"]["gen_ai.request.model"] == "gpt-4o"
assert data[0]["children"][0]["additional_attributes"]["gen_ai.usage.total_tokens"] == 100
def test_with_target_error(self) -> None:
start, _ = self.get_start_end_from_day_ago(1000)
error_data = load_data(
"javascript",
timestamp=start,
)
error_data["contexts"]["trace"] = {
"type": "trace",
"trace_id": self.trace_id,
"span_id": "a" * 16,
}
error_data["tags"] = [["transaction", "/transaction/gen1-0"]]
error = self.store_event(error_data, project_id=self.project.id)
for _ in range(5):
self.store_event(error_data, project_id=self.project.id)
with mock.patch("sentry.snuba.trace.ERROR_LIMIT", 1):
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago, "errorId": error.event_id},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
assert data[0]["event_id"] == error.event_id
def test_with_invalid_error_id(self) -> None:
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago, "errorId": ",blah blah,"},
)
assert response.status_code == 400, response.content
def test_with_date_outside_retention(self) -> None:
with self.options({"system.event-retention-days": 10}):
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": before_now(days=120)},
)
assert response.status_code == 400, response.content
def test_orphan_trace(self) -> None:
self.load_trace(is_eap=True)
orphan_event = self.create_event(
trace_id=self.trace_id,
transaction="/transaction/orphan",
spans=[],
project_id=self.project.id,
# Random span id so there's no parent
parent_span_id=uuid4().hex[:16],
milliseconds=500,
is_eap=True,
)
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 2
if len(data[0]["children"]) == 0:
orphan = data[0]
else:
orphan = data[1]
self.assert_event(orphan, orphan_event, "orphan")
def _find_uptime_checks(self, data):
"""Helper to find all uptime checks in the response data"""
uptime_checks = []
for item in data:
if item.get("event_type") == "uptime_check":
uptime_checks.append(item)
return uptime_checks
def _create_uptime_result_with_original_url(self, original_url=None, **kwargs):
"""Helper to create uptime result with original_url attribute"""
if original_url is None:
original_url = kwargs.get("request_url", "https://example.com")
kwargs["original_url"] = original_url
kwargs.setdefault("request_body_size_bytes", None)
kwargs.setdefault("response_body_size_bytes", None)
return self.create_eap_uptime_result(**kwargs)
def assert_expected_results(self, response_data, input_trace_items, expected_children_ids=None):
"""Assert that API response matches expected results from input trace items."""
uptime_checks = [item for item in response_data if item.get("event_type") == "uptime_check"]
def sort_key(item):
guid = (
item.attributes.get("guid", ProtoAttributeValue(val_str="")).string_value
if hasattr(item, "attributes")
else item.get("additional_attributes", {}).get("guid", "")
)
seq = (
item.attributes.get("request_sequence", ProtoAttributeValue(val_int=0)).int_value
if hasattr(item, "attributes")
else item.get("additional_attributes", {}).get("request_sequence", 0)
)
return guid, seq
sorted_items = sorted(input_trace_items, key=sort_key)
uptime_checks.sort(key=lambda s: sort_key(s))
for i, (actual, expected_item) in enumerate(zip(uptime_checks, sorted_items)):
expected = self._trace_item_to_api_span(expected_item)
actual_without_children = {k: v for k, v in actual.items() if k != "children"}
expected_without_children = {k: v for k, v in expected.items() if k != "children"}
assert (
actual_without_children == expected_without_children
), f"Span {i} differs (excluding children)"
if expected_children_ids:
final_span = max(
uptime_checks,
key=lambda s: s.get("additional_attributes", {}).get("request_sequence", -1),
)
actual_children = final_span.get("children", [])
assert len(actual_children) == len(
expected_children_ids
), f"Expected {len(expected_children_ids)} children, got {len(actual_children)}"
actual_child_txns = {child.get("transaction") for child in actual_children}
for expected_id in expected_children_ids:
assert (
expected_id in actual_child_txns
), f"Expected '{expected_id}' transaction in children"
def _trace_item_to_api_span(self, trace_item: TraceItem, children=None) -> dict:
"""Convert a TraceItem to the exact format returned by the API."""
attrs = trace_item.attributes
row_dict = {}
for attr_name, attr_value in attrs.items():
if attr_value.HasField("string_value"):
row_dict[attr_name] = ProtoAttributeValue(val_str=attr_value.string_value)
elif attr_value.HasField("int_value"):
row_dict[attr_name] = ProtoAttributeValue(val_int=attr_value.int_value)
elif attr_value.HasField("double_value"):
row_dict[attr_name] = ProtoAttributeValue(val_double=attr_value.double_value)
elif attr_value.HasField("bool_value"):
row_dict[attr_name] = ProtoAttributeValue(val_bool=attr_value.bool_value)
row_dict["sentry.item_id"] = ProtoAttributeValue(val_str=trace_item.item_id.hex())
row_dict["sentry.project_id"] = ProtoAttributeValue(val_int=trace_item.project_id)
row_dict["sentry.organization_id"] = ProtoAttributeValue(val_int=trace_item.organization_id)
row_dict["sentry.trace_id"] = ProtoAttributeValue(val_str=trace_item.trace_id)
row_dict["sentry.timestamp"] = ProtoAttributeValue(
val_double=trace_item.timestamp.ToSeconds()
)
row_dict["sentry.item_type"] = ProtoAttributeValue(val_int=trace_item.item_type)
project_slugs = {trace_item.project_id: self.project.slug}
span = _serialize_columnar_uptime_item(row_dict, project_slugs)
if children:
span["children"] = children
return span
def test_with_uptime_results(self):
"""Test that uptime results are included when include_uptime=1"""
self.load_trace(is_eap=True)
features = self.FEATURES
redirect_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-123",
subscription_id="sub-456",
check_status="success",
http_status_code=301,
request_sequence=0,
request_url="https://example.com",
scheduled_check_time=self.day_ago,
check_duration_us=300000,
)
final_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-123",
subscription_id="sub-456",
check_status="success",
http_status_code=200,
request_sequence=1,
request_url="https://www.example.com",
original_url="https://example.com",
scheduled_check_time=self.day_ago,
check_duration_us=500000,
)
self.store_uptime_results([redirect_result, final_result])
with self.feature(features):
response = self.client_get(
data={"timestamp": self.day_ago, "include_uptime": "1"},
)
assert response.status_code == 200, response.content
data = response.data
self.assert_expected_results(
data, [redirect_result, final_result], expected_children_ids=["root"]
)
def test_without_uptime_results(self):
"""Test that uptime results are not queried when include_uptime is not set"""
self.load_trace(is_eap=True)
uptime_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-456",
subscription_id="sub-789",
check_status="success",
http_status_code=200,
request_sequence=0,
request_url="https://test.com",
scheduled_check_time=self.day_ago,
)
self.store_uptime_results([uptime_result])
with self.feature(self.FEATURES):
response = self.client_get(
data={"timestamp": self.day_ago},
)
assert response.status_code == 200, response.content
data = response.data
assert len(data) == 1
self.assert_trace_data(data[0])
uptime_checks = self._find_uptime_checks(data)
assert len(uptime_checks) == 0
def test_uptime_root_tree_with_orphaned_spans(self):
"""Test that orphaned spans are parented to the final uptime request"""
self.load_trace(is_eap=True)
self.create_event(
trace_id=self.trace_id,
transaction="/transaction/orphan",
spans=[],
project_id=self.project.id,
parent_span_id=uuid4().hex[:16],
milliseconds=500,
is_eap=True,
)
redirect_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-123",
check_status="success",
http_status_code=301,
request_sequence=0,
request_url="https://example.com",
scheduled_check_time=self.day_ago,
check_duration_us=300000,
)
final_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-123",
check_status="success",
http_status_code=200,
request_sequence=1,
request_url="https://www.example.com",
scheduled_check_time=self.day_ago,
)
features = self.FEATURES
self.store_uptime_results([redirect_result, final_result])
with self.feature(features):
response = self.client_get(
data={"timestamp": self.day_ago, "include_uptime": "1"},
)
assert response.status_code == 200, response.content
data = response.data
self.assert_expected_results(
data,
[redirect_result, final_result],
expected_children_ids=["root", "/transaction/orphan"],
)
def test_uptime_root_tree_without_orphans(self):
"""Test uptime results when there are no orphaned spans"""
self.load_trace(is_eap=True)
uptime_result = self._create_uptime_result_with_original_url(
organization=self.organization,
project=self.project,
trace_id=self.trace_id,
guid="check-456",
check_status="success",
http_status_code=200,
request_sequence=0,
request_url="https://test.com",
scheduled_check_time=self.day_ago,
check_duration_us=200000,
)
features = self.FEATURES
self.store_uptime_results([uptime_result])
with self.feature(features):
response = self.client_get(
data={"timestamp": self.day_ago, "include_uptime": "1"},
)
assert response.status_code == 200, response.content
data = response.data
self.assert_expected_results(data, [uptime_result], expected_children_ids=["root"])
| OrganizationEventsTraceEndpointTest |
python | scrapy__scrapy | tests/test_utils_log.py | {
"start": 594,
"end": 928
} | class ____:
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
assert exc_info == failure_to_exc_info(failure)
def test_non_failure(self):
assert failure_to_exc_info("test") is None
| TestFailureToExcInfo |
python | huggingface__transformers | tests/models/auto/test_tokenization_auto.py | {
"start": 1966,
"end": 23327
} | class ____(unittest.TestCase):
def setUp(self):
transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0
@slow
def test_tokenizer_from_pretrained(self):
for model_name in ("google-bert/bert-base-uncased", "google-bert/bert-base-cased"):
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, (BertTokenizer))
self.assertGreater(len(tokenizer), 0)
for model_name in ["openai-community/gpt2", "openai-community/gpt2-medium"]:
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, (GPT2Tokenizer))
self.assertGreater(len(tokenizer), 0)
def test_tokenizer_from_pretrained_identifier(self):
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(tokenizer, (BertTokenizer))
self.assertEqual(tokenizer.vocab_size, 12)
def test_tokenizer_from_model_type(self):
tokenizer = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(tokenizer, (RobertaTokenizer))
self.assertEqual(tokenizer.vocab_size, 20)
def test_tokenizer_from_tokenizer_class(self):
config = AutoConfig.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER)
self.assertIsInstance(config, RobertaConfig)
# Check that tokenizer_type ≠ model_type
tokenizer = AutoTokenizer.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER, config=config)
self.assertIsInstance(tokenizer, (BertTokenizer))
self.assertEqual(tokenizer.vocab_size, 12)
def test_tokenizer_from_type(self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt", os.path.join(tmp_dir, "vocab.txt"))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="bert", use_fast=False)
self.assertIsInstance(tokenizer, BertTokenizer)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json", os.path.join(tmp_dir, "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt", os.path.join(tmp_dir, "merges.txt"))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="gpt2", use_fast=False)
self.assertIsInstance(tokenizer, GPT2Tokenizer)
@require_tokenizers
def test_tokenizer_from_type_fast(self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt", os.path.join(tmp_dir, "vocab.txt"))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="bert")
self.assertIsInstance(tokenizer, PreTrainedTokenizerFast)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json", os.path.join(tmp_dir, "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt", os.path.join(tmp_dir, "merges.txt"))
tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="gpt2")
self.assertIsInstance(tokenizer, PreTrainedTokenizerFast)
def test_tokenizer_from_type_incorrect_name(self):
with pytest.raises(ValueError):
AutoTokenizer.from_pretrained("./", tokenizer_type="xxx")
@require_tokenizers
def test_tokenizer_identifier_with_correct_config(self):
for tokenizer_class in [BertTokenizer, AutoTokenizer]:
tokenizer = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased")
self.assertIsInstance(tokenizer, (BertTokenizer))
self.assertEqual(tokenizer.do_lower_case, False)
self.assertEqual(tokenizer.model_max_length, 512)
@require_tokenizers
@is_flaky() # This one is flaky even with the new retry logic because it raises an unusual error
def test_tokenizer_identifier_non_existent(self):
for tokenizer_class in [BertTokenizer, AutoTokenizer]:
with self.assertRaisesRegex(
EnvironmentError,
"julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier",
):
_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists")
def test_model_name_edge_cases_in_mappings(self):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
tokenizers = TOKENIZER_MAPPING.values()
tokenizer_names = []
for tokenizer_entry in tokenizers:
candidates = tokenizer_entry if isinstance(tokenizer_entry, tuple) else (tokenizer_entry,)
for tokenizer_cls in candidates:
if tokenizer_cls is not None:
tokenizer_names.append(tokenizer_cls.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(tokenizer_name)
@require_tokenizers
def test_from_pretrained_use_fast_toggle(self):
self.assertIsInstance(
AutoTokenizer.from_pretrained("google-bert/bert-base-cased", use_fast=False), BertTokenizer
)
self.assertIsInstance(AutoTokenizer.from_pretrained("google-bert/bert-base-cased"), BertTokenizerFast)
@require_tokenizers
def test_voxtral_tokenizer_converts_from_tekken(self):
repo_id = "mistralai/Voxtral-Mini-3B-2507"
tokenization_auto = transformers.models.auto.tokenization_auto
with (
mock.patch("transformers.utils.import_utils.is_mistral_common_available", return_value=False),
mock.patch("transformers.models.auto.tokenization_auto.is_mistral_common_available", return_value=False),
):
tokenization_auto = importlib.reload(tokenization_auto)
tokenizer = tokenization_auto.AutoTokenizer.from_pretrained(repo_id) # should not raise
self.assertIsInstance(tokenizer, PreTrainedTokenizerFast)
self.assertTrue(tokenizer.is_fast)
self.assertGreater(len(tokenizer("Voxtral")["input_ids"]), 0)
@require_tokenizers
def test_do_lower_case(self):
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased", do_lower_case=False)
sample = "Hello, world. How are you?"
tokens = tokenizer.tokenize(sample)
self.assertEqual("[UNK]", tokens[0])
tokenizer = AutoTokenizer.from_pretrained("microsoft/mpnet-base", do_lower_case=False)
tokens = tokenizer.tokenize(sample)
self.assertEqual("[UNK]", tokens[0])
@require_tokenizers
def test_PreTrainedTokenizerFast_from_pretrained(self):
tokenizer = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config")
self.assertEqual(type(tokenizer), PreTrainedTokenizerFast)
self.assertEqual(tokenizer.model_max_length, 512)
self.assertEqual(tokenizer.vocab_size, 30000)
self.assertEqual(tokenizer.unk_token, "[UNK]")
self.assertEqual(tokenizer.padding_side, "right")
self.assertEqual(tokenizer.truncation_side, "right")
def test_auto_tokenizer_from_local_folder(self):
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(tokenizer, (BertTokenizer))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
tokenizer2 = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(tokenizer2, tokenizer.__class__)
self.assertEqual(tokenizer2.vocab_size, 12)
def test_auto_tokenizer_from_local_folder_mistral_detection(self):
"""See #42374 for reference, ensuring proper mistral detection on local tokenizers"""
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-235B-A22B-Thinking-2507")
config = Qwen3MoeConfig.from_pretrained("Qwen/Qwen3-235B-A22B-Thinking-2507")
self.assertIsInstance(tokenizer, (Qwen2Tokenizer, Qwen2TokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
# Case 1: Tokenizer with no config associated
logger = logging.get_logger("transformers.tokenization_utils_base")
with CaptureLogger(logger) as cl:
AutoTokenizer.from_pretrained(tmp_dir)
self.assertNotIn(
"with an incorrect regex pattern: https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503/discussions/84#69121093e8b480e709447d5e",
cl.out,
)
# Case 2: Tokenizer with config associated
# Needed to be saved along the tokenizer to detect (non)mistral
# for a version where the regex bug occurs
config_dict = config.to_diff_dict()
config_dict["transformers_version"] = "4.57.2"
# Manually saving to avoid versioning clashes
config_path = os.path.join(tmp_dir, "config.json")
with open(config_path, "w", encoding="utf-8") as f:
json.dump(config_dict, f, indent=2, sort_keys=True)
tokenizer2 = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(tokenizer2, tokenizer.__class__)
self.assertTrue(tokenizer2.vocab_size > 100_000)
def test_auto_tokenizer_fast_no_slow(self):
tokenizer = AutoTokenizer.from_pretrained("Salesforce/ctrl")
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(tokenizer, CTRLTokenizer)
def test_get_tokenizer_config(self):
# Check we can load the tokenizer config of an online model.
config = get_tokenizer_config("google-bert/bert-base-cased")
_ = config.pop("_commit_hash", None)
# If we ever update google-bert/bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(config, {"do_lower_case": False, "model_max_length": 512})
# This model does not have a tokenizer_config so we get back an empty dict.
config = get_tokenizer_config(SMALL_MODEL_IDENTIFIER)
self.assertDictEqual(config, {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
config = get_tokenizer_config(tmp_dir)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"], "BertTokenizer")
def test_new_tokenizer_registration(self):
try:
AutoConfig.register("custom", CustomConfig)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
AutoTokenizer.register(BertConfig, slow_tokenizer_class=BertTokenizer)
tokenizer = CustomTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(new_tokenizer, TokenizersBackend)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def test_new_tokenizer_fast_registration(self):
try:
AutoConfig.register("custom", CustomConfig)
# Can register in two steps (fast takes precedence)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], CustomTokenizer)
AutoTokenizer.register(CustomConfig, fast_tokenizer_class=CustomTokenizerFast)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], CustomTokenizerFast)
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
CustomConfig, slow_tokenizer_class=CustomTokenizer, fast_tokenizer_class=CustomTokenizerFast
)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], CustomTokenizerFast)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(ValueError):
AutoTokenizer.register(BertConfig, fast_tokenizer_class=BertTokenizerFast)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
bert_tokenizer = BertTokenizerFast.from_pretrained(SMALL_MODEL_IDENTIFIER)
bert_tokenizer.save_pretrained(tmp_dir)
tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir)
self.assertIsInstance(new_tokenizer, CustomTokenizerFast)
new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, use_fast=False)
self.assertIsInstance(new_tokenizer, CustomTokenizerFast)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_tokenizer(self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(ValueError):
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
# If remote code is disabled, we can't load this config.
with self.assertRaises(ValueError):
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=False
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True)
self.assertTrue(tokenizer.special_attribute_present)
# Test the dynamic module is loaded only once.
reloaded_tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True
)
self.assertIs(tokenizer.__class__, reloaded_tokenizer.__class__)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast")
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizerFast")
# Test we can also load the slow version
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True, use_fast=False
)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast")
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(tmp_dir)
reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True, use_fast=False)
self.assertTrue(
os.path.exists(os.path.join(tmp_dir, "tokenization.py"))
) # Assert we saved tokenizer code
self.assertEqual(reloaded_tokenizer._auto_class, "AutoTokenizer")
with open(os.path.join(tmp_dir, "tokenizer_config.json"), "r") as f:
tokenizer_config = json.load(f)
# Assert we're pointing at local code and not another remote repo
self.assertEqual(
tokenizer_config["auto_map"]["AutoTokenizer"],
["tokenization.NewTokenizer", "tokenization_fast.NewTokenizerFast"],
)
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizerFast")
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer")
self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizer")
# Test the dynamic module is reloaded if we force it.
reloaded_tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True, force_download=True
)
self.assertIsNot(tokenizer.__class__, reloaded_tokenizer.__class__)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
@require_tokenizers
def test_from_pretrained_dynamic_tokenizer_conflict(self):
class NewTokenizer(BertTokenizer):
special_attribute_present = False
try:
AutoConfig.register("custom", CustomConfig)
AutoTokenizer.register(CustomConfig, slow_tokenizer_class=NewTokenizer)
# If remote code is not set, the default is to use local
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", use_fast=False)
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=False, use_fast=False
)
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True, use_fast=False
)
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast")
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def test_from_pretrained_dynamic_tokenizer_legacy_format(self):
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=True
)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast")
# Test we can also load the slow version
tokenizer = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=True, use_fast=False
)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast")
else:
self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer")
def test_repo_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, "bert-base is not a local folder and is not a valid model identifier"
):
_ = AutoTokenizer.from_pretrained("bert-base")
def test_revision_not_found(self):
with self.assertRaisesRegex(
EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"
):
_ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa")
@unittest.skip("This test is failing on main") # TODO Matt/ydshieh, fix this test!
def test_cached_tokenizer_has_minimum_calls_to_head(self):
# Make sure we have cached the tokenizer.
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter["GET"], 0)
self.assertEqual(counter["HEAD"], 1)
self.assertEqual(counter.total_calls, 1)
def test_init_tokenizer_with_trust(self):
nop_tokenizer_code = """
import transformers
| AutoTokenizerTest |
python | PrefectHQ__prefect | tests/utilities/schema_tools/test_hydration.py | {
"start": 2174,
"end": 4063
} | class ____:
@pytest.mark.parametrize(
"input_object, expected_output",
[
# __prefect_kind set to None, should be a simple pass-through
({"param": {"__prefect_kind": "none", "value": None}}, {"param": None}),
({"param": {"__prefect_kind": "none", "value": 10}}, {"param": 10}),
(
{"param": {"__prefect_kind": "none", "value": "hello"}},
{"param": "hello"},
),
(
{"param": {"__prefect_kind": "none", "value": [1, 2, 3]}},
{"param": [1, 2, 3]},
),
(
{"param": {"__prefect_kind": "none", "value": {"key": "value"}}},
{"param": {"key": "value"}},
),
# Complex objects with __prefect_kind set to "none"
(
{
"param": {
"__prefect_kind": "none",
"value": {"nested": {"another_key": "another_value"}},
}
},
{"param": {"nested": {"another_key": "another_value"}}},
),
# Nested "none" __prefect_kinds
(
{
"param": {
"__prefect_kind": "none",
"value": {
"hello": "world",
"goodbye": {"__prefect_kind": "none", "value": "moon"},
},
}
},
{"param": {"hello": "world", "goodbye": "moon"}},
),
({"param": {"__prefect_kind": "none"}}, {"param": ValueNotFound()}),
],
)
def test_hydrate_with_null_prefect_kind(self, input_object, expected_output):
assert hydrate(input_object) == expected_output
| TestHydrateWithNonePrefectKind |
python | imageio__imageio | imageio/plugins/freeimage.py | {
"start": 936,
"end": 4858
} | class ____(Format):
"""See :mod:`imageio.plugins.freeimage`"""
_modes = "i"
def __init__(self, name, description, extensions=None, modes=None, *, fif=None):
super().__init__(name, description, extensions=extensions, modes=modes)
self._fif = fif
@property
def fif(self):
return self._fif # Set when format is created
def _can_read(self, request):
# Ask freeimage if it can read it, maybe ext missing
if fi.has_lib():
if not hasattr(request, "_fif"):
try:
request._fif = fi.getFIF(request.filename, "r", request.firstbytes)
except Exception: # pragma: no cover
request._fif = -1
if request._fif == self.fif:
return True
elif request._fif == 7 and self.fif == 14:
# PPM gets identified as PBM and PPM can read PBM
# see: https://github.com/imageio/imageio/issues/677
return True
def _can_write(self, request):
# Ask freeimage, because we are not aware of all formats
if fi.has_lib():
if not hasattr(request, "_fif"):
try:
request._fif = fi.getFIF(request.filename, "w")
except ValueError: # pragma: no cover
if request.raw_uri == RETURN_BYTES:
request._fif = self.fif
else:
request._fif = -1
if request._fif is self.fif:
return True
# --
class Reader(Format.Reader):
def _get_length(self):
return 1
def _open(self, flags=0):
self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags)
self._bm.load_from_filename(self.request.get_local_filename())
def _close(self):
self._bm.close()
def _get_data(self, index):
if index != 0:
raise IndexError("This format only supports singleton images.")
return self._bm.get_image_data(), self._bm.get_meta_data()
def _get_meta_data(self, index):
if not (index is None or index == 0):
raise IndexError()
return self._bm.get_meta_data()
# --
class Writer(Format.Writer):
def _open(self, flags=0):
self._flags = flags # Store flags for later use
self._bm = None
self._is_set = False # To prevent appending more than one image
self._meta = {}
def _close(self):
# Set global meta data
self._bm.set_meta_data(self._meta)
# Write and close
self._bm.save_to_filename(self.request.get_local_filename())
self._bm.close()
def _append_data(self, im, meta):
# Check if set
if not self._is_set:
self._is_set = True
else:
raise RuntimeError(
"Singleton image; " "can only append image data once."
)
# Pop unit dimension for grayscale images
if im.ndim == 3 and im.shape[-1] == 1:
im = im[:, :, 0]
# Lazy instantaion of the bitmap, we need image data
if self._bm is None:
self._bm = fi.create_bitmap(
self.request.filename, self.format.fif, self._flags
)
self._bm.allocate(im)
# Set data
self._bm.set_image_data(im)
# There is no distinction between global and per-image meta data
# for singleton images
self._meta = meta
def _set_meta_data(self, meta):
self._meta = meta
# Special plugins
# todo: there is also FIF_LOAD_NOPIXELS,
# but perhaps that should be used with get_meta_data.
| FreeimageFormat |
python | google__jax | tests/multiprocess/pjit_test.py | {
"start": 3409,
"end": 16455
} | class ____(jt_multiprocess.MultiProcessTest):
def test_pjit_array_single_output(self):
global_mesh = jtu.create_mesh((4, 2), ("x", "y"))
global_input_shape = (8, 2)
mesh_axes = jax.sharding.PartitionSpec("x", "y")
global_input_data = np.arange(math.prod(global_input_shape)).reshape(
global_input_shape
)
s = jax.sharding.NamedSharding(global_mesh, mesh_axes)
arr = array.make_array_from_callback(
global_input_shape, s, lambda idx: global_input_data[idx]
)
@functools.partial(pjit.pjit, out_shardings=s)
def f(x):
return x @ x.T
expected_matrix_mul = global_input_data @ global_input_data.T
out = f(arr)
self.assertIsInstance(out, array.ArrayImpl)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.addressable_shards[0].data.shape, (2, 4))
for s in out.addressable_shards:
np.testing.assert_array_equal(
np.asarray(s.data), expected_matrix_mul[s.index]
)
# Test does not work with non-contiguous device IDs.
@jtu.skip_on_devices("cpu")
def test_pjit_array_non_contiguous_mesh_2d(self):
global_mesh = create_2d_non_contiguous_mesh()
global_input_shape = (8, 2)
pspec = jax.sharding.PartitionSpec("x", "y")
input_data = np.arange(math.prod(global_input_shape)).reshape(
global_input_shape
)
in_sharding = jax.sharding.NamedSharding(global_mesh, pspec)
out_sharding = jax.sharding.NamedSharding(global_mesh, pspec)
a1 = array.make_array_from_callback(
global_input_shape, in_sharding, lambda idx: input_data[idx]
)
# device_id -> (index, replica_id)
expected_idx_rid = {
0: ((slice(0, 2), slice(0, 1)), 0),
1: ((slice(2, 4), slice(1, 2)), 0),
2: ((slice(0, 2), slice(1, 2)), 0),
3: ((slice(2, 4), slice(0, 1)), 0),
4: ((slice(4, 6), slice(0, 1)), 0),
5: ((slice(6, 8), slice(1, 2)), 0),
6: ((slice(4, 6), slice(1, 2)), 0),
7: ((slice(6, 8), slice(0, 1)), 0),
}
with global_mesh:
f = pjit.pjit(lambda x: x, out_shardings=out_sharding)
out = f(a1)
for s in out.addressable_shards:
device_id = s.device.id
expected_index = expected_idx_rid[device_id][0]
expected_replica_id = expected_idx_rid[device_id][1]
self.assertEqual(s.index, expected_index)
self.assertEqual(s.replica_id, expected_replica_id)
self.assertEqual(s.data.shape, (2, 1))
np.testing.assert_array_equal(s.data._value, input_data[expected_index])
with global_mesh:
f = pjit.pjit(lambda x: x)
out = f(a1)
for s in out.addressable_shards:
device_id = s.device.id
expected_index = expected_idx_rid[device_id][0]
expected_replica_id = expected_idx_rid[device_id][1]
self.assertEqual(s.index, expected_index)
self.assertEqual(s.replica_id, expected_replica_id)
self.assertEqual(s.data.shape, (2, 1))
np.testing.assert_array_equal(s.data._value, input_data[expected_index])
none_sharding = jax.sharding.NamedSharding(
global_mesh, jax.sharding.PartitionSpec(None)
)
with global_mesh:
f = pjit.pjit(
lambda x: x, in_shardings=none_sharding, out_shardings=out_sharding
)
# Fully replicated values allows a non-contiguous mesh.
out = f(input_data)
self.assertIsInstance(out, array.ArrayImpl)
a2 = array.make_array_from_callback(
global_input_shape, none_sharding, lambda idx: input_data[idx]
)
with global_mesh:
f = pjit.pjit(
lambda x, y: (x, y),
in_shardings=(none_sharding, none_sharding),
out_shardings=(out_sharding, out_sharding),
)
# Fully replicated values + Array allows a non-contiguous mesh.
out1, out2 = f(input_data, a2)
self.assertIsInstance(out1, array.ArrayImpl)
self.assertIsInstance(out2, array.ArrayImpl)
def test_sharded_add(self):
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
b_s = jax.sharding.NamedSharding(global_mesh, P("x"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
b = array.make_array_from_callback(
input_shape, b_s, lambda idx: input_data[idx]
)
out = a + b
for s in out.addressable_shards:
np.testing.assert_array_equal(
s.data, (input_data + input_data)[s.index]
)
def test_sharded_jit_add(self):
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
b_s = jax.sharding.NamedSharding(global_mesh, P("x"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
b = array.make_array_from_callback(
input_shape, b_s, lambda idx: input_data[idx]
)
out = jax.jit(lambda x, y: x + y)(a, b)
for s in out.addressable_shards:
np.testing.assert_array_equal(s.data, (input_data + input_data)[s.index])
def test_sharded_copy(self):
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
arr = array.make_array_from_callback(
input_shape, s, lambda idx: input_data[idx]
)
# Copy the array sharded over multiple devices across multiple processes.
copy_arr = jnp.copy(arr)
for c, a in zip(copy_arr.addressable_shards, arr.addressable_shards):
self.assertNotEqual(
c.data.unsafe_buffer_pointer(), a.data.unsafe_buffer_pointer()
)
self.assertEqual(c.index, a.index)
self.assertEqual(c.replica_id, a.replica_id)
self.assertEqual(c.device, a.device)
np.testing.assert_array_equal(c.data, a.data)
def test_sharded_mul(self):
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
out = a @ a.T
for s in out.addressable_shards:
np.testing.assert_array_equal(
s.data, (input_data @ input_data.T)[s.index]
)
def test_pjit_array_eval_shape(self):
with jtu.create_mesh((8,), "x"):
@functools.partial(
pjit.pjit,
in_shardings=jax.sharding.PartitionSpec(None),
out_shardings=jax.sharding.PartitionSpec("x"),
)
def f():
return jnp.zeros([32, 10])
self.assertEqual(f().shape, (32, 10))
self.assertEqual(jax.eval_shape(f).shape, (32, 10))
def test_trace_with_global_avals(self):
devices = sorted_devices()
mesh_devices = np.array(devices[::2] + devices[1::2])
# The device order in the below mesh is:
# (0, 2, 4, 6, 1, 3, 5, 7)
# each having the following process index:
# (0, 1, 2, 3, 0, 1, 2, 3)
# self.assertListEqual([d.process_index for d in mesh_devices],
# [0, 1, 2, 3, 0, 1, 2, 3])
global_mesh = jax.sharding.Mesh(mesh_devices, ("x",))
x = jnp.arange(16)
def check_shape(x):
self.assertEqual(x.shape, (16,))
return x
with global_mesh:
f = pjit.pjit(
check_shape,
in_shardings=jax.sharding.PartitionSpec("x"),
out_shardings=None,
)
np.testing.assert_array_equal(f(x), jnp.arange(16))
@use_default_mesh()
def test_pjit_in_pjit(self):
# The global size of x is 16. The shape should remain constant i.e. (16,)
# within all `pjit`'s since with Array, pjit only accepts global shaped
# inputs and doesn't lift the shape.
x = jnp.arange(16)
def pjit_all(f):
return pjit.pjit(
f,
in_shardings=jax.sharding.PartitionSpec(ALL_AXES),
out_shardings=jax.sharding.PartitionSpec(ALL_AXES),
)
def check_shape(x):
assert x.shape == (16,)
return x
pjit_all(check_shape)(x)
pjit_all(pjit_all(check_shape))(x)
pjit_all(pjit_all(pjit_all(check_shape)))(x)
def test_compile_parallel(self):
x = jnp.arange(16)
global_mesh = jtu.create_mesh((4, 2), ("x", "y"))
def _lower_compile(inp):
with global_mesh:
f = pjit.pjit(
lambda x: x.sum(),
in_shardings=jax.sharding.PartitionSpec("x"),
out_shardings=None,
)
exe = f.lower(inp).compile()
return exe
with futures.ThreadPoolExecutor(max_workers=5) as executor:
result = executor.map(_lower_compile, [x] * 5)
expected_out = np.arange(16).sum()
for out in list(result):
np.testing.assert_array_equal(out(x), expected_out)
def test_fully_sharded_on_all_devices(self):
if jax.local_device_count() > 1:
self.skipTest("This test only works with 1 process per device.")
num_devices = jax.device_count()
x = jnp.arange(num_devices)
global_mesh = jtu.create_mesh((num_devices,), "x")
with global_mesh:
f = pjit.pjit(
lambda x: x,
in_shardings=jax.sharding.PartitionSpec("x"),
out_shardings=jax.sharding.PartitionSpec("x"),
)
out = f(x)
expected_out = np.arange(num_devices)
for s in out.addressable_shards:
np.testing.assert_array_equal(s.data, expected_out[s.index])
def test_on_device_size_in_bytes(self):
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
shard_size = a.addressable_shards[0].data.on_device_size_in_bytes()
self.assertGreaterEqual(shard_size, 4 * 2)
self.assertEqual(
shard_size * len(a.global_shards), a.on_device_size_in_bytes()
)
def test_numpy_input_error_with_non_trivial_sharding(self):
global_mesh = jtu.create_mesh((8,), "x")
inp = np.arange(8)
with global_mesh:
f = pjit.pjit(
lambda x: x,
in_shardings=jax.sharding.PartitionSpec(None),
out_shardings=jax.sharding.PartitionSpec(None),
)
out = f(inp)
np.testing.assert_array_equal(out, inp)
# If no in_axis_resources are specified, then pjit assumes that the
# numpy input is fully replicated.
f = pjit.pjit(lambda x: x, out_shardings=jax.sharding.PartitionSpec(None))
out = f(inp)
np.testing.assert_array_equal(out, inp)
f = pjit.pjit(
lambda x: x,
in_shardings=jax.sharding.PartitionSpec("x"),
out_shardings=jax.sharding.PartitionSpec("x"),
)
with self.assertRaisesRegex(
ValueError,
"Passing non-trivial shardings for numpy inputs is not allowed",
):
f(inp)
def test_non_contiguous_mesh_fetch_to_host(self):
if jax.local_device_count() != 2:
raise unittest.SkipTest("Test assumes 2 devices per process")
global_mesh = create_2d_non_contiguous_mesh()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P(None, "y"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
np.testing.assert_array_equal(a, input_data)
def test_non_contiguous_mesh_fetch_to_host2(self):
global_mesh = create_2d_non_contiguous_mesh2()
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P(None, "y"))
a = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx]
)
with self.assertRaisesRegex(
RuntimeError,
r"Fetching value for `jax.Array` that spans non-addressable \(non"
r" process local\) devices is not possible",
):
_ = a._value
def test_no_python_shard_arg_fallback(self):
global_mesh = jtu.create_mesh((4, 2), ("x", "y"))
input_shape = (8, 2)
input_data = np.arange(math.prod(input_shape)).reshape(input_shape)
a_s = jax.sharding.NamedSharding(global_mesh, P("x", "y"))
arr = array.make_array_from_callback(
input_shape, a_s, lambda idx: input_data[idx])
@jax.jit
def f(x):
return x * 2
with jtu.count_jax_array_shard_arg_calls() as count:
f(arr)
f(arr)
self.assertEqual(count(), 1)
@contextlib.contextmanager
def capture_stdout():
with unittest.mock.patch("sys.stdout", new_callable=io.StringIO) as fp:
def _read() -> str:
return fp.getvalue()
yield _read
| ArrayPjitMultiHost |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 150450,
"end": 152341
} | class ____(ASTBase):
def __init__(
self, templates: list[ASTTemplateParams | ASTTemplateIntroduction] | None
) -> None:
# templates is None means it's an explicit instantiation of a variable
self.templates = templates
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateDeclarationPrefix):
return NotImplemented
return self.templates == other.templates
def __hash__(self) -> int:
return hash(self.templates)
def get_requires_clause_in_last(self) -> ASTRequiresClause | None:
if self.templates is None:
return None
last_list = self.templates[-1]
if not isinstance(last_list, ASTTemplateParams):
return None
return last_list.requiresClause # which may be None
def get_id_except_requires_clause_in_last(self, version: int) -> str:
assert version >= 2
# This is not part of the Itanium ABI mangling system.
res = []
last_index = len(self.templates) - 1
for i, t in enumerate(self.templates):
if isinstance(t, ASTTemplateParams):
res.append(t.get_id(version, excludeRequires=(i == last_index)))
else:
res.append(t.get_id(version))
return ''.join(res)
def _stringify(self, transform: StringifyTransform) -> str:
return ''.join(map(transform, self.templates))
def describe_signature(
self,
signode: desc_signature,
mode: str,
env: BuildEnvironment,
symbol: Symbol,
lineSpec: bool,
) -> None:
verify_description_mode(mode)
assert self.templates is not None
for t in self.templates:
t.describe_signature_as_introducer(
signode, 'lastIsName', env, symbol, lineSpec
)
| ASTTemplateDeclarationPrefix |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 116736,
"end": 117363
} | class ____(TestDataLoader):
def setUp(self):
super().setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output(
[
sys.executable,
"-c",
"""\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
| TestDataLoaderPersistentWorkers |
python | streamlit__streamlit | lib/tests/streamlit/delta_generator_test.py | {
"start": 16247,
"end": 21330
} | class ____(DeltaGeneratorTestCase):
"""Test the `with DG` feature"""
def test_with(self):
# Same as test_container_paths, but using `with` syntax
level3 = st.container().container().container()
with level3:
st.markdown("hi")
st.markdown("bye")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0, 0, 0), 1) == msg.metadata.delta_path
)
# Now we're out of the `with` block, commands should use the main dg
st.markdown("outside")
msg = self.get_message_from_queue()
assert make_delta_path(RootContainer.MAIN, (), 1) == msg.metadata.delta_path
def test_nested_with(self):
with st.container():
with st.container():
st.markdown("Level 2 with")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0, 0), 0)
== msg.metadata.delta_path
)
st.markdown("Level 1 with")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0,), 1) == msg.metadata.delta_path
)
def test_threads_with(self):
"""
Tests that with statements work correctly when multiple threads are involved.
The test sequence is as follows:
Main Thread | Worker Thread
-----------------------------------------------------
with container1: |
| with container2:
st.markdown("Object 1") |
| st.markdown("Object 2")
We check that Object1 is created in container1 and object2 is created in container2.
"""
container1 = st.container()
container2 = st.container()
with_1 = threading.Event()
with_2 = threading.Event()
object_1 = threading.Event()
def thread():
with_1.wait()
with container2:
with_2.set()
object_1.wait()
st.markdown("Object 2")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (1,), 0)
== msg.metadata.delta_path
)
worker_thread = threading.Thread(target=thread)
add_script_run_ctx(worker_thread)
worker_thread.start()
with container1:
with_1.set()
with_2.wait()
st.markdown("Object in container 1")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0,), 0) == msg.metadata.delta_path
)
object_1.set()
worker_thread.join()
def test_asyncio_with(self):
"""
Tests that with statements work correctly when multiple async tasks are involved.
The test sequence is as follows:
Task 1 | Task 2
-----------------------------------------------------
with container1:
asyncio.create_task() ->
| st.markdown("Object 1a")
| with container2:
st.markdown("Object 1b") |
| st.markdown("Object 2")
In this scenario, Task 2 should inherit the container1 context from Task 1
when it is created, so Objects 1a and 1b will both go in container 1,
and object 2 will go in container 2.
"""
container1 = st.container()
container2 = st.container()
async def runner():
with_2 = asyncio.Event()
object_1 = asyncio.Event()
async def task2():
st.markdown("Object 1a")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0,), 0)
== msg.metadata.delta_path
)
with container2:
with_2.set()
st.markdown("Object 2")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (1,), 0)
== msg.metadata.delta_path
)
await object_1.wait()
async def task1():
with container1:
task = asyncio.create_task(task2())
await with_2.wait()
st.markdown("Object 1b")
msg = self.get_message_from_queue()
assert (
make_delta_path(RootContainer.MAIN, (0,), 1)
== msg.metadata.delta_path
)
object_1.set()
await task
await task1()
asyncio.run(runner())
| DeltaGeneratorWithTest |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 22107,
"end": 23193
} | class ____(Var):
def _lower(self):
v = Var(*self.operands)
return MapPartitions(
v,
func=np.sqrt,
meta=v._meta,
enforce_metadata=True,
transform_divisions=True,
clear_divisions=True,
)
def _mean_chunk(df, *by, observed=None, dropna=None):
if is_series_like(df):
df = df.to_frame()
g = _groupby_raise_unaligned(df, by=by, observed=observed, dropna=dropna)
x = g.sum(numeric_only=True)
n = g[x.columns].count().rename(columns=lambda c: c + "-count")
return concat([x, n], axis=1)
def _mean_combine(g, levels, sort=False, observed=None, dropna=None):
return g.groupby(level=levels, sort=sort, observed=observed, dropna=dropna).sum()
def _mean_agg(g, levels, sort=False, observed=False, dropna=True):
result = g.groupby(level=levels, sort=sort, observed=observed, dropna=dropna).sum()
s = result[result.columns[: len(result.columns) // 2]]
c = result[result.columns[len(result.columns) // 2 :]]
c.columns = s.columns
return s / c
| Std |
python | jazzband__django-oauth-toolkit | tests/test_models.py | {
"start": 6111,
"end": 10710
} | class ____(BaseTestModels):
def test_custom_application_model(self):
"""
If a custom application model is installed, it should be present in
the related objects and not the swapped out one.
See issue #90 (https://github.com/django-oauth/django-oauth-toolkit/issues/90)
"""
related_object_names = [
f.name
for f in UserModel._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
self.assertNotIn("oauth2_provider:application", related_object_names)
self.assertIn("tests_sampleapplication", related_object_names)
def test_custom_application_model_incorrect_format(self):
# Patch oauth2 settings to use a custom Application model
self.oauth2_settings.APPLICATION_MODEL = "IncorrectApplicationFormat"
self.assertRaises(ValueError, get_application_model)
def test_custom_application_model_not_installed(self):
# Patch oauth2 settings to use a custom Application model
self.oauth2_settings.APPLICATION_MODEL = "tests.ApplicationNotInstalled"
self.assertRaises(LookupError, get_application_model)
def test_custom_access_token_model(self):
"""
If a custom access token model is installed, it should be present in
the related objects and not the swapped out one.
"""
# Django internals caches the related objects.
related_object_names = [
f.name
for f in UserModel._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
self.assertNotIn("oauth2_provider:access_token", related_object_names)
self.assertIn("tests_sampleaccesstoken", related_object_names)
def test_custom_access_token_model_incorrect_format(self):
# Patch oauth2 settings to use a custom AccessToken model
self.oauth2_settings.ACCESS_TOKEN_MODEL = "IncorrectAccessTokenFormat"
self.assertRaises(ValueError, get_access_token_model)
def test_custom_access_token_model_not_installed(self):
# Patch oauth2 settings to use a custom AccessToken model
self.oauth2_settings.ACCESS_TOKEN_MODEL = "tests.AccessTokenNotInstalled"
self.assertRaises(LookupError, get_access_token_model)
def test_custom_refresh_token_model(self):
"""
If a custom refresh token model is installed, it should be present in
the related objects and not the swapped out one.
"""
# Django internals caches the related objects.
related_object_names = [
f.name
for f in UserModel._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
self.assertNotIn("oauth2_provider:refresh_token", related_object_names)
self.assertIn("tests_samplerefreshtoken", related_object_names)
def test_custom_refresh_token_model_incorrect_format(self):
# Patch oauth2 settings to use a custom RefreshToken model
self.oauth2_settings.REFRESH_TOKEN_MODEL = "IncorrectRefreshTokenFormat"
self.assertRaises(ValueError, get_refresh_token_model)
def test_custom_refresh_token_model_not_installed(self):
# Patch oauth2 settings to use a custom AccessToken model
self.oauth2_settings.REFRESH_TOKEN_MODEL = "tests.RefreshTokenNotInstalled"
self.assertRaises(LookupError, get_refresh_token_model)
def test_custom_grant_model(self):
"""
If a custom grant model is installed, it should be present in
the related objects and not the swapped out one.
"""
# Django internals caches the related objects.
related_object_names = [
f.name
for f in UserModel._meta.get_fields()
if (f.one_to_many or f.one_to_one) and f.auto_created and not f.concrete
]
self.assertNotIn("oauth2_provider:grant", related_object_names)
self.assertIn("tests_samplegrant", related_object_names)
def test_custom_grant_model_incorrect_format(self):
# Patch oauth2 settings to use a custom Grant model
self.oauth2_settings.GRANT_MODEL = "IncorrectGrantFormat"
self.assertRaises(ValueError, get_grant_model)
def test_custom_grant_model_not_installed(self):
# Patch oauth2 settings to use a custom AccessToken model
self.oauth2_settings.GRANT_MODEL = "tests.GrantNotInstalled"
self.assertRaises(LookupError, get_grant_model)
| TestCustomModels |
python | keras-team__keras | keras/src/metrics/iou_metrics_test.py | {
"start": 3883,
"end": 8641
} | class ____(testing.TestCase):
def test_config(self):
obj = metrics.BinaryIoU(
target_class_ids=[1, 0], threshold=0.1, name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.BinaryIoU.from_config(obj.get_config())
self.assertEqual(obj.name, "iou_class_1_0")
self.assertAlmostEqual(obj2.threshold, 0.1)
self.assertEqual(obj.target_class_ids, [1, 0])
def test_different_thresholds_weighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[0.2, 0.4],
# [0.3, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=0.3, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
sample_weight = np.array([0.1, 0.2, 0.4, 0.3])
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[0.1+0.4, 0],
# [0.2, 0.3]]
# sum_row = [0.5, 0.5], sum_col = [0.7, 0.3], true_positives = [0.5,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.5 / (0.5 + 0.7 - 0.5) + 0.3 / (0.5 + 0.3 - 0.3)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=0.5, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_different_thresholds_unweighted(self):
y_true = [0, 1, 0, 1]
y_pred = [0.1, 0.2, 0.4, 0.7]
# with threshold = 0.3, y_pred will be converted to [0, 0, 1, 1]
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.3)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
# with threshold = 0.5, y_pred will be converted to [0, 0, 0, 1]
# cm = [[2, 0],
# [1, 1]]
# sum_row = [2, 2], sum_col = [3, 1], true_positives = [2, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (2 / (2 + 3 - 2) + 1 / (2 + 1 - 1)) / 2
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=0.5)
result = obj(y_true, y_pred)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_true = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_pred = np.array([[0.1, 0.7], [0.9, 0.3]])
threshold = 0.4 # y_pred will become [[0, 1], [1, 0]]
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
# cm = [[0.2, 0.4],
# [0.1, 0.3]]
# sum_row = [0.6, 0.4], sum_col = [0.3, 0.7], true_positives = [0.2,
# 0.3]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.3 - 0.2) + 0.3 / (0.4 + 0.7 - 0.3)
) / 2
obj = metrics.BinaryIoU(
target_class_ids=[0, 1], threshold=threshold, dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.BinaryIoU(target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([0.6], dtype=np.float32)
threshold = 0.5
y_true = np.array([1])
obj = metrics.BinaryIoU(target_class_ids=[0, 1], threshold=threshold)
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = 1 / (1 + 1 - 1)
self.assertAllClose(result, expected_result, atol=1e-3)
| BinaryIoUTest |
python | PrefectHQ__prefect | tests/test_futures.py | {
"start": 1320,
"end": 6905
} | class ____:
def test_wait(self):
mock_futures = [MockFuture(data=i) for i in range(5)]
futures = wait(mock_futures)
assert futures.not_done == set()
for future in mock_futures:
assert future.state.is_completed()
@pytest.mark.timeout(method="thread")
def test_wait_with_timeout(self):
mock_futures = [MockFuture(data=i) for i in range(5)]
hanging_future = Future()
mock_futures.append(PrefectConcurrentFuture(uuid.uuid4(), hanging_future))
futures = wait(mock_futures, timeout=0.01)
assert futures.not_done == {mock_futures[-1]}
def test_wait_monitors_all_futures_concurrently_with_timeout(self):
"""Test that wait() with timeout monitors all futures concurrently, not sequentially."""
import threading
import time
# Create a slow future first, then fast ones
# If wait() is sequential, it will timeout on the slow one and miss the fast ones
futures = []
# Slow future that won't complete within timeout
slow_future = Future()
futures.append(PrefectConcurrentFuture(uuid.uuid4(), slow_future))
# Fast futures that complete quickly
for i in range(1, 4):
future = Future()
wrapped = PrefectConcurrentFuture(uuid.uuid4(), future)
futures.append(wrapped)
# Complete after short delay
def complete_after(f, delay, result):
time.sleep(delay * 0.01)
f.set_result(Completed(data=result))
thread = threading.Thread(target=complete_after, args=(future, i, i))
thread.daemon = True
thread.start()
# Wait with timeout that allows fast futures to complete
done, not_done = wait(futures, timeout=0.1)
# Should have captured all 3 fast futures
assert len(done) == 3
assert len(not_done) == 1 # Just the slow future
# Verify we got the right futures
done_results = sorted([f.result() for f in done])
assert done_results == [1, 2, 3]
def test_as_completed(self):
mock_futures = [MockFuture(data=i) for i in range(5)]
for future in as_completed(mock_futures):
assert future.state.is_completed()
@pytest.mark.timeout(method="thread")
def test_as_completed_with_timeout(self):
mock_futures = [MockFuture(data=i) for i in range(5)]
hanging_future = Future()
mock_futures.append(PrefectConcurrentFuture(uuid.uuid4(), hanging_future))
with pytest.raises(TimeoutError) as exc_info:
for future in as_completed(mock_futures, timeout=0.01):
assert future.state.is_completed()
assert (
exc_info.value.args[0] == f"1 (of {len(mock_futures)}) futures unfinished"
)
@pytest.mark.usefixtures("use_hosted_api_server")
def test_as_completed_yields_correct_order(self):
@task
def my_test_task(seconds):
import time
time.sleep(seconds)
return seconds
with ThreadPoolTaskRunner() as runner:
futures = []
timings = [1, 5, 10]
for i in reversed(timings):
parameters = {"seconds": i}
future = runner.submit(my_test_task, parameters)
future.parameters = parameters
futures.append(future)
results = []
for future in as_completed(futures):
results.append(future.result())
assert results == timings
def test_as_completed_timeout(self):
@task
def my_test_task(seconds):
import time
time.sleep(seconds)
return seconds
with ThreadPoolTaskRunner() as runner:
futures = []
timings = [1, 5, 10]
for i in reversed(timings):
parameters = {"seconds": i}
future = runner.submit(my_test_task, parameters)
future.parameters = parameters
futures.append(future)
results = []
with pytest.raises(TimeoutError) as exc_info:
for future in as_completed(futures, timeout=5):
results.append(future.result())
assert exc_info.value.args[0] == f"2 (of {len(timings)}) futures unfinished"
async def test_as_completed_yields_correct_order_dist(self, events_pipeline):
@task
async def my_task(seconds):
import time
time.sleep(seconds)
return seconds
futures = []
timings = [1, 5, 10]
task_runs = []
for i in reversed(timings):
task_run = await my_task.create_run(parameters={"seconds": i})
future = PrefectDistributedFuture(task_run_id=task_run.id)
futures.append(future)
task_run = asyncio.create_task(
run_task_async(
task=my_task,
task_run_id=future.task_run_id,
task_run=task_run,
parameters={"seconds": i},
return_type="state",
)
)
task_runs.append(task_run)
await asyncio.gather(*task_runs)
await events_pipeline.process_events()
results = []
with pytest.raises(MissingResult):
for future in as_completed(futures):
results.append(future.result())
assert results == timings
| TestUtilityFunctions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/await1.py | {
"start": 288,
"end": 325
} | class ____(Generic[T]):
body: T
| Msg |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 77712,
"end": 81118
} | class ____(SymbolicContext, Generic[_P1, _T1]):
"""
Create symbols in ``create_symbolic_sizes_strides_storage_offset`` via
a symbolic_context determination as given by ``DimDynamic`` and ``DimConstraint``.
This will cause fresh symbols to be allocated
"""
dynamic_sizes: DimList[DimDynamic]
dynamic_strides: DimList[DimDynamic] = None # type: ignore[assignment]
constraint_sizes: DimList[DimConstraint] = None # type: ignore[assignment]
constraint_strides: DimList[DimConstraint] = None # type: ignore[assignment]
specialize_on: Optional[list[list[Callable[_P1, _T1]]]] = None
# If the tensor is a view, this should be populated for the base. It contains
# information on how to allocate symbols when recursively fakeifying the base
# during view fake-ification.
view_base_context: Optional[SymbolicContext] = None
# TODO: add storage offset and stride symbolic_context
def __post_init__(self) -> None:
if self.specialize_on is None:
object.__setattr__(
self,
"specialize_on",
[[]] * len(self.dynamic_sizes),
)
if self.dynamic_strides is None:
object.__setattr__(
self,
"dynamic_strides",
[DimDynamic.INFER_STRIDE] * len(self.dynamic_sizes),
)
if self.constraint_sizes is None:
object.__setattr__(
self, "constraint_sizes", [None] * len(self.dynamic_sizes)
)
if self.constraint_strides is None:
object.__setattr__(
self, "constraint_strides", [None] * len(self.dynamic_sizes)
)
assert all(
stride in (DimDynamic.INFER_STRIDE, DimDynamic.DYNAMIC, DimDynamic.DUCK)
for stride in self.dynamic_strides
)
# note [Tensor Fakification and Symbol Caching]
#
# As of the time of this note, dynamo creates a fresh fake tensor mode for backends.
# The reason we do this is because there are certain classes of operations, namely,
# metadata mutations, that change tensor size, stride, etc. This means that the fake tensor
# state at the end of a dynamo trace is different than the fake tensor state at the beginning
# of a trace. Backends like aot_autograd need a fresh fake tensor to correctly track metadata mutation,
# view relationships, etc.
#
# As we create a new fake mode, we also lose the memoization that comes with it. Rather than
# transfer the memoization cache, we instead transfer the shape env. However, with this
# comes nuance - as dynamo is selective in how it makes symbolic shapes. Due to strategies in
# automatic dynamic and constraints, the policy for which dims are dynamic is nuanced and varies across
# recompilations.
#
# In order to preserve the symbolic decisions made during dynamo tensor fakification, we pass
# a StatefulSymbolicContext at creation time. This object is tracked, per tensor, on the TracingContext.
# The lifecycle of this object should match the lifecycle of the original dynamo tracked tensor, and it is
# safe to reuse this object as many times as necessary to create a fake tensor. Fake tensors
# created with new fake modes should produce the same exact symbols as the original, providing the same shape_env
# is used.
# TODO(voz): Shape env validation
@dataclass(frozen=True)
| StatelessSymbolicContext |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 22868,
"end": 22968
} | class ____(DagsterError):
"""Indicates than an asset check failed."""
| DagsterAssetCheckFailedError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 19401,
"end": 20478
} | class ____(GithubStreamABC):
"""
API docs: https://docs.github.com/en/rest/orgs/orgs?apiVersion=2022-11-28#list-organizations
"""
# GitHub pagination could be from 1 to 100.
page_size = 100
def __init__(self, organizations: List[str], access_token_type: str = "", **kwargs):
super().__init__(**kwargs)
self.organizations = organizations
self.access_token_type = access_token_type
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
for organization in self.organizations:
yield {"organization": organization}
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"orgs/{stream_slice['organization']}"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
yield response.json()
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record["organization"] = stream_slice["organization"]
return record
| Organizations |
python | PrefectHQ__prefect | src/prefect/events/filters.py | {
"start": 4085,
"end": 5211
} | class ____(EventDataFilter):
id: Optional[list[str]] = Field(
default=None, description="Only include events for resources with these IDs"
)
id_prefix: Optional[list[str]] = Field(
default=None,
description=(
"Only include events for resources with IDs starting with these prefixes."
),
)
labels: Optional[ResourceSpecification] = Field(
default=None, description="Only include events for resources with these labels"
)
distinct: bool = Field(
default=False,
description="Only include events for distinct resources",
)
def includes(self, event: Event) -> bool:
if self.id:
if not any(event.resource.id == resource_id for resource_id in self.id):
return False
if self.id_prefix:
if not any(
event.resource.id.startswith(prefix) for prefix in self.id_prefix
):
return False
if self.labels:
if not self.labels.matches(event.resource):
return False
return True
| EventResourceFilter |
python | wandb__wandb | wandb/sdk/lib/paths.py | {
"start": 649,
"end": 4565
} | class ____(str):
"""A string that represents a path relative to an artifact or run.
The format of the string is always as a POSIX path, e.g. "foo/bar.txt".
A neat trick is that you can use this class as if it were a PurePosixPath. E.g.:
```
>>> path = LogicalPath("foo/bar.txt")
>>> path.parts
('foo', 'bar.txt')
>>> path.parent / "baz.txt"
'foo/baz.txt'
>>> type(path.relative_to("foo"))
LogicalPath
```
"""
# It should probably always be a relative path, but that would be a behavior change.
#
# These strings used to be the output of `to_forward_slash_path`, which only works
# with strings and whose behavior is pretty simple:
# ```
# if platform.system() == "Windows":
# path = path.replace("\\", "/")
# ```
#
# This results in some weird things, such as backslashes being allowed from
# non-Windows platforms (which would probably break if such an artifact was used
# from Windows) and anchors or absolute paths being allowed. E.g., the Windows path
# "C:\foo\bar.txt" becomes "C:/foo/bar.txt", which then would mount as
# "./artifacts/artifact_name:v0/C:/foo/bar.txt" on MacOS and as
# "./artifacts/artifact_name-v0/C-/foo/bar.txt" on Windows.
#
# This implementation preserves behavior for strings but attempts to sanitize other
# formerly unsupported inputs more aggressively. It uses the `.as_posix()` form of
# pathlib objects rather than the `str()` form to reduce how often identical inputs
# will result in different outputs on different platforms; however, it doesn't alter
# absolute paths or check for prohibited characters etc.
def __new__(cls, path: StrPath) -> LogicalPath:
if isinstance(path, LogicalPath):
return super().__new__(cls, path)
if hasattr(path, "as_posix"):
path = PurePosixPath(path.as_posix())
return super().__new__(cls, str(path))
if hasattr(path, "__fspath__"):
path = path.__fspath__() # Can be str or bytes.
if isinstance(path, bytes):
path = os.fsdecode(path)
# For historical reasons we have to convert backslashes to forward slashes, but
# only on Windows, and need to do it before any pathlib operations.
if platform.system() == "Windows":
path = path.replace("\\", "/")
# This weird contortion and the one above are because in some unusual cases
# PurePosixPath(path.as_posix()).as_posix() != path.as_posix().
path = PurePath(path).as_posix()
return super().__new__(cls, str(PurePosixPath(path)))
def to_path(self) -> PurePosixPath:
"""Convert this path to a PurePosixPath."""
return PurePosixPath(self)
def __getattr__(self, name: str) -> Any:
"""Act like a subclass of PurePosixPath for all methods not defined on str."""
try:
attr = getattr(self.to_path(), name)
except AttributeError:
classname = type(self).__qualname__
raise AttributeError(f"{classname!r} has no attribute {name!r}") from None
if isinstance(attr, PurePosixPath):
return LogicalPath(attr)
# If the result is a callable (a method), wrap it so that it has the same
# behavior: if the call result returns a PurePosixPath, return a LogicalPath.
if callable(fn := attr):
@wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any:
if isinstance(res := fn(*args, **kwargs), PurePosixPath):
return LogicalPath(res)
return res
return wrapper
return attr
def __truediv__(self, other: StrPath) -> LogicalPath:
"""Act like a PurePosixPath for the / operator, but return a LogicalPath."""
return LogicalPath(self.to_path() / LogicalPath(other))
| LogicalPath |
python | django-debug-toolbar__django-debug-toolbar | debug_toolbar/panels/sql/panel.py | {
"start": 4069,
"end": 12881
} | class ____(Panel):
"""
Panel that displays information about the SQL queries run while processing
the request.
"""
is_async = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sql_time = 0
self._queries = []
self._databases = {}
# synthetic transaction IDs, keyed by DB alias
self._transaction_ids = {}
def new_transaction_id(self, alias):
"""
Generate and return a new synthetic transaction ID for the specified DB alias.
"""
trans_id = uuid.uuid4().hex
self._transaction_ids[alias] = trans_id
return trans_id
def current_transaction_id(self, alias):
"""
Return the current synthetic transaction ID for the specified DB alias.
"""
trans_id = self._transaction_ids.get(alias)
# Sometimes it is not possible to detect the beginning of the first transaction,
# so current_transaction_id() will be called before new_transaction_id(). In
# that case there won't yet be a transaction ID. so it is necessary to generate
# one using new_transaction_id().
if trans_id is None:
trans_id = self.new_transaction_id(alias)
return trans_id
def record(self, **kwargs):
kwargs["djdt_query_id"] = uuid.uuid4().hex
self._queries.append(kwargs)
alias = kwargs["alias"]
if alias not in self._databases:
self._databases[alias] = {
"time_spent": kwargs["duration"],
"num_queries": 1,
}
else:
self._databases[alias]["time_spent"] += kwargs["duration"]
self._databases[alias]["num_queries"] += 1
self._sql_time += kwargs["duration"]
# Implement the Panel API
nav_title = _("SQL")
@property
def nav_subtitle(self):
stats = self.get_stats()
query_count = len(stats.get("queries", []))
return ngettext(
"%(query_count)d query in %(sql_time).2fms",
"%(query_count)d queries in %(sql_time).2fms",
query_count,
) % {
"query_count": query_count,
"sql_time": stats.get("sql_time"),
}
@property
def title(self):
count = len(self.get_stats().get("databases"))
return ngettext(
"SQL queries from %(count)d connection",
"SQL queries from %(count)d connections",
count,
) % {"count": count}
template = "debug_toolbar/panels/sql.html"
@classmethod
def get_urls(cls):
return [
path("sql_select/", views.sql_select, name="sql_select"),
path("sql_explain/", views.sql_explain, name="sql_explain"),
path("sql_profile/", views.sql_profile, name="sql_profile"),
]
async def aenable_instrumentation(self):
"""
Async version of enable instrumentation.
For async capable panels having async logic for instrumentation.
"""
await sync_to_async(self.enable_instrumentation)()
def enable_instrumentation(self):
# This is thread-safe because database connections are thread-local.
for connection in connections.all():
wrap_cursor(connection)
connection._djdt_logger = self
def disable_instrumentation(self):
for connection in connections.all():
connection._djdt_logger = None
def generate_stats(self, request, response):
similar_query_groups = defaultdict(list)
duplicate_query_groups = defaultdict(list)
if self._queries:
sql_warning_threshold = dt_settings.get_config()["SQL_WARNING_THRESHOLD"]
width_ratio_tally = 0
factor = int(256.0 / (len(self._databases) * 2.5))
for n, db in enumerate(self._databases.values()):
rgb = [0, 0, 0]
color = n % 3
rgb[color] = 256 - n // 3 * factor
nn = color
# XXX: pretty sure this is horrible after so many aliases
while rgb[color] < factor:
nc = min(256 - rgb[color], 256)
rgb[color] += nc
nn += 1
if nn > 2:
nn = 0
rgb[nn] = nc
db["rgb_color"] = rgb
# the last query recorded for each DB alias
last_by_alias = {}
for query in self._queries:
alias = query["alias"]
similar_query_groups[(alias, _similar_query_key(query))].append(query)
duplicate_query_groups[(alias, _duplicate_query_key(query))].append(
query
)
trans_id = query.get("trans_id")
prev_query = last_by_alias.get(alias, {})
prev_trans_id = prev_query.get("trans_id")
# If two consecutive queries for a given DB alias have different
# transaction ID values, a transaction started, finished, or both, so
# annotate the queries as appropriate.
if trans_id != prev_trans_id:
if prev_trans_id is not None:
prev_query["ends_trans"] = True
if trans_id is not None:
query["starts_trans"] = True
if trans_id is not None:
query["in_trans"] = True
if "iso_level" in query:
query["iso_level"] = get_isolation_level_display(
query["vendor"], query["iso_level"]
)
if "trans_status" in query:
query["trans_status"] = get_transaction_status_display(
query["vendor"], query["trans_status"]
)
query["is_slow"] = query["duration"] > sql_warning_threshold
query["is_select"] = is_select_query(query["raw_sql"])
query["rgb_color"] = self._databases[alias]["rgb_color"]
try:
query["width_ratio"] = (query["duration"] / self._sql_time) * 100
except ZeroDivisionError:
query["width_ratio"] = 0
query["start_offset"] = width_ratio_tally
query["end_offset"] = query["width_ratio"] + query["start_offset"]
width_ratio_tally += query["width_ratio"]
last_by_alias[alias] = query
# Close out any transactions that were in progress, since there is no
# explicit way to know when a transaction finishes.
for final_query in last_by_alias.values():
if final_query.get("trans_id") is not None:
final_query["ends_trans"] = True
group_colors = contrasting_color_generator()
_process_query_groups(
similar_query_groups, self._databases, group_colors, "similar"
)
_process_query_groups(
duplicate_query_groups, self._databases, group_colors, "duplicate"
)
self.record_stats(
{
"databases": sorted(
self._databases.items(), key=lambda x: -x[1]["time_spent"]
),
"queries": self._queries,
"sql_time": self._sql_time,
}
)
def generate_server_timing(self, request, response):
stats = self.get_stats()
title = "SQL {} queries".format(len(stats.get("queries", [])))
value = stats.get("sql_time", 0)
self.record_server_timing("sql_time", title, value)
# Cache the content property since it manipulates the queries in the stats
# This allows the caller to treat content as idempotent
@cached_property
def content(self):
if self.has_content:
stats = self.get_stats()
colors = contrasting_color_generator()
trace_colors = defaultdict(lambda: next(colors))
for query in stats.get("queries", []):
query["sql"] = reformat_sql(query["sql"], with_toggle=True)
query["form"] = SignedDataForm(
auto_id=None,
initial=SQLSelectForm(
initial={
"djdt_query_id": query["djdt_query_id"],
"request_id": self.toolbar.request_id,
}
).initial,
)
query["stacktrace"] = render_stacktrace(query["stacktrace"])
query["trace_color"] = trace_colors[query["stacktrace"]]
return render_to_string(self.template, stats)
| SQLPanel |
python | python__mypy | mypy/nodes.py | {
"start": 15522,
"end": 16091
} | class ____(ImportBase):
"""from m import x [as y], ..."""
__slots__ = ("id", "names", "relative")
__match_args__ = ("id", "names", "relative")
id: str
relative: int
names: list[tuple[str, str | None]] # Tuples (name, as name)
def __init__(self, id: str, relative: int, names: list[tuple[str, str | None]]) -> None:
super().__init__()
self.id = id
self.names = names
self.relative = relative
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_import_from(self)
| ImportFrom |
python | readthedocs__readthedocs.org | readthedocs/oauth/migrations/0004_drop_github_and_bitbucket_models.py | {
"start": 472,
"end": 1690
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("oauth", "0003_move_github"),
]
operations = [
migrations.RemoveField(
model_name="bitbucketproject",
name="organization",
),
migrations.RemoveField(
model_name="bitbucketproject",
name="users",
),
migrations.RemoveField(
model_name="bitbucketteam",
name="users",
),
migrations.RemoveField(
model_name="githuborganization",
name="users",
),
migrations.RemoveField(
model_name="githubproject",
name="organization",
),
migrations.RemoveField(
model_name="githubproject",
name="users",
),
migrations.DeleteModel(
name="BitbucketProject",
),
migrations.DeleteModel(
name="BitbucketTeam",
),
migrations.DeleteModel(
name="GithubOrganization",
),
migrations.DeleteModel(
name="GithubProject",
),
migrations.RunPython(forwards_remove_content_types),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function_test.py | {
"start": 5279,
"end": 6013
} | class ____(test.Benchmark):
"""Benchmark the tf.function implementation."""
def benchmark_repeat_captures_property_access(self):
n_iters = 1000000
n_captures = 100
vs = []
for _ in range(n_captures):
vs.append(variables.Variable(1.0))
def f():
result = 0
for idx in range(n_captures):
result += vs[idx]
return result
pf = polymorphic_function.function(f)
g = pf.get_concrete_function().graph
start_time = time.time()
for _ in range(n_iters):
temp = g.captures # pylint: disable=unused-variable
duration = time.time() - start_time
self.report_benchmark(iters=n_iters, wall_time=duration / float(n_iters))
@dataclasses.dataclass
| FunctionBenchmark |
python | neetcode-gh__leetcode | python/0763-partition-labels.py | {
"start": 0,
"end": 486
} | class ____:
def partitionLabels(self, S: str) -> List[int]:
count = {}
res = []
i, length = 0, len(S)
for j in range(length):
c = S[j]
count[c] = j
curLen = 0
goal = 0
while i < length:
c = S[i]
goal = max(goal, count[c])
curLen += 1
if goal == i:
res.append(curLen)
curLen = 0
i += 1
return res
| Solution |
python | spyder-ide__spyder | spyder/api/config/mixins.py | {
"start": 7026,
"end": 13326
} | class ____(SpyderConfigurationAccessor):
"""
Concrete implementation of the protocol
:class:`spyder.config.types.ConfigurationObserver`.
This mixin enables a class to receive configuration updates seamlessly,
by registering methods using the
:func:`spyder.api.config.decorators.on_conf_change` decorator, which
receives a configuration section and option to observe.
When a change occurs on any of the registered configuration options,
the corresponding registered method is called with the new value.
"""
def __init__(self):
super().__init__()
if self.CONF_SECTION is None:
warnings.warn(
'A SpyderConfigurationObserver must define a `CONF_SECTION` '
f'class attribute! Hint: {self} or its parent should define '
'the section.'
)
self._configuration_listeners = {}
self._multi_option_listeners = set({})
self._gather_observers()
self._merge_none_observers()
# Register class to listen for changes in all registered options
for section in self._configuration_listeners:
section = self.CONF_SECTION if section is None else section
observed_options = self._configuration_listeners[section]
for option in observed_options:
# Avoid a crash at startup due to MRO
if not PYSIDE6:
logger.debug(
f'{self} is observing option "{option}" in section '
f'"{section}"'
)
CONF.observe_configuration(self, section, option)
def __del__(self):
# Remove object from the configuration observer
CONF.unobserve_configuration(self)
def _gather_observers(self):
"""Gather all the methods decorated with `on_conf_change`."""
for method_name in dir(self):
# Avoid crash at startup due to MRO
if PYSIDE6 and method_name in {
# PySide seems to require that the class is instantiated to
# access this method
"painters",
# Method is debounced
"restart_kernel",
}:
continue
method = getattr(self, method_name, None)
if hasattr(method, '_conf_listen'):
info = method._conf_listen
if len(info) > 1:
self._multi_option_listeners |= {method_name}
for section, option in info:
self._add_listener(method_name, option, section)
def _merge_none_observers(self):
"""Replace observers that declared section as None by CONF_SECTION."""
default_selectors = self._configuration_listeners.get(None, {})
section_selectors = self._configuration_listeners.get(
self.CONF_SECTION, {})
for option in default_selectors:
default_option_receivers = default_selectors.get(option, [])
section_option_receivers = section_selectors.get(option, [])
merged_receivers = (
default_option_receivers + section_option_receivers)
section_selectors[option] = merged_receivers
self._configuration_listeners[self.CONF_SECTION] = section_selectors
self._configuration_listeners.pop(None, None)
def _add_listener(
self, func: Callable, option: ConfigurationKey, section: str
):
"""
Add a callable as listener of the option `option` on section `section`.
Parameters
----------
func: Callable
Function/method that will be called when `option` changes.
option: ConfigurationKey
Configuration option to observe.
section: str
Name of the section where `option` is contained.
"""
section_listeners = self._configuration_listeners.get(section, {})
option_listeners = section_listeners.get(option, [])
option_listeners.append(func)
section_listeners[option] = option_listeners
self._configuration_listeners[section] = section_listeners
def on_configuration_change(self, option: ConfigurationKey, section: str,
value: Any):
"""
Handle configuration updates for the option `option` on the section
`section`, whose new value corresponds to `value`.
Parameters
----------
option: ConfigurationKey
Configuration option that did change.
section: str
Name of the section where `option` is contained.
value: Any
New value of the configuration option that produced the event.
"""
section_receivers = self._configuration_listeners.get(section, {})
option_receivers = section_receivers.get(option, [])
for receiver in option_receivers:
method = (
receiver if callable(receiver) else getattr(self, receiver)
)
if receiver in self._multi_option_listeners:
method(option, value)
else:
method(value)
def add_configuration_observer(
self, func: Callable, option: str, section: Optional[str] = None
):
"""
Add a callable to observe the option `option` on section `section`.
Parameters
----------
func: Callable
Function that will be called when `option` changes.
option: ConfigurationKey
Configuration option to observe.
section: str
Name of the section where `option` is contained.
Notes
-----
- This is only necessary if you need to add a callable that is not a
class method to observe an option. Otherwise, you simply need to
decorate your method with
:func:`spyder.api.config.decorators.on_conf_change`.
"""
if section is None:
section = self.CONF_SECTION
logger.debug(
f'{self} is observing "{option}" option on section "{section}"'
)
self._add_listener(func, option, section)
CONF.observe_configuration(self, section, option)
| SpyderConfigurationObserver |
python | walkccc__LeetCode | solutions/1566. Detect Pattern of Length M Repeated K or More Times/1566.py | {
"start": 0,
"end": 253
} | class ____:
def containsPattern(self, arr: list[int], m: int, k: int) -> bool:
count = 0
for i in range(m, len(arr)):
count = count + 1 if arr[i] == arr[i - m] else 0
if count == m * k - m:
return True
return False
| Solution |
python | matplotlib__matplotlib | lib/mpl_toolkits/axes_grid1/anchored_artists.py | {
"start": 2875,
"end": 5187
} | class ____(AnchoredOffsetbox):
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `~matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`!matplotlib.axes.Axes.transData`.
loc : str
Location of this artist. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child objects, in fraction of the font size.
borderpad : float, default: 0.5
Border padding, in fraction of the font size.
prop : `~matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, default: True
If True, draw a box around this artist.
**kwargs
Keyword arguments forwarded to `.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `~matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc='upper left')
>>> el = Ellipse((0, 0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
super().__init__(loc, pad=pad, borderpad=borderpad,
child=self.drawing_area, prop=prop, frameon=frameon,
**kwargs)
| AnchoredAuxTransformBox |
python | astropy__astropy | astropy/extern/ply/yacc.py | {
"start": 82667,
"end": 83030
} | class ____(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
| LALRError |
python | celery__celery | t/unit/security/test_key.py | {
"start": 311,
"end": 1570
} | class ____(SecurityCase):
def test_valid_private_key(self):
PrivateKey(KEY1)
PrivateKey(KEY2)
PrivateKey(ENCKEY1, KEYPASSWORD)
PrivateKey(ENCKEY2, KEYPASSWORD)
def test_invalid_private_key(self):
with pytest.raises((SecurityError, TypeError)):
PrivateKey(None)
with pytest.raises(SecurityError):
PrivateKey('')
with pytest.raises(SecurityError):
PrivateKey('foo')
with pytest.raises(SecurityError):
PrivateKey(KEY1[:20] + KEY1[21:])
with pytest.raises(SecurityError):
PrivateKey(ENCKEY1, KEYPASSWORD+b"wrong")
with pytest.raises(SecurityError):
PrivateKey(ENCKEY2, KEYPASSWORD+b"wrong")
with pytest.raises(SecurityError):
PrivateKey(CERT1)
with pytest.raises(SecurityError):
PrivateKey(KEY_ECDSA)
def test_sign(self):
pkey = PrivateKey(KEY1)
pkey.sign(ensure_bytes('test'), get_digest_algorithm())
with pytest.raises(AttributeError):
pkey.sign(ensure_bytes('test'), get_digest_algorithm('unknown'))
# pkey = PrivateKey(KEY_ECDSA)
# pkey.sign(ensure_bytes('test'), get_digest_algorithm())
| test_PrivateKey |
python | hynek__structlog | tests/test_twisted.py | {
"start": 3884,
"end": 6539
} | class ____:
"""
Some tests here are redundant because they predate _extractStuffAndWhy.
"""
def test_EventAdapterFormatsLog(self):
"""
EventAdapter formats log entries correctly.
"""
la = EventAdapter(_render_repr)
assert "{'foo': 'bar'}" == la(None, "msg", {"foo": "bar"})
def test_transforms_whyIntoEvent(self):
"""
log.err(_stuff=exc, _why='foo') makes the output 'event="foo"'
"""
la = EventAdapter(_render_repr)
error = ValueError("test")
rv = la(None, "err", {"_stuff": error, "_why": "foo", "event": None})
assert () == rv[0]
assert isinstance(rv[1]["_stuff"], Failure)
assert error == rv[1]["_stuff"].value
assert "{'event': 'foo'}" == rv[1]["_why"]
def test_worksUsualCase(self):
"""
log.err(exc, _why='foo') makes the output 'event="foo"'
"""
la = EventAdapter(_render_repr)
error = ValueError("test")
rv = la(None, "err", {"event": error, "_why": "foo"})
assert () == rv[0]
assert isinstance(rv[1]["_stuff"], Failure)
assert error == rv[1]["_stuff"].value
assert "{'event': 'foo'}" == rv[1]["_why"]
def test_allKeywords(self):
"""
log.err(_stuff=exc, _why='event')
"""
la = EventAdapter(_render_repr)
error = ValueError("test")
rv = la(None, "err", {"_stuff": error, "_why": "foo"})
assert () == rv[0]
assert isinstance(rv[1]["_stuff"], Failure)
assert error == rv[1]["_stuff"].value
assert "{'event': 'foo'}" == rv[1]["_why"]
def test_noFailure(self):
"""
log.err('event')
"""
la = EventAdapter(_render_repr)
assert ((), {"_stuff": None, "_why": "{'event': 'someEvent'}"}) == la(
None, "err", {"event": "someEvent"}
)
def test_noFailureWithKeyword(self):
"""
log.err(_why='event')
"""
la = EventAdapter(_render_repr)
assert ((), {"_stuff": None, "_why": "{'event': 'someEvent'}"}) == la(
None, "err", {"_why": "someEvent"}
)
def test_catchesConflictingEventAnd_why(self):
"""
Passing both _why and event raises a ValueError.
"""
la = EventAdapter(_render_repr)
with pytest.raises(
ValueError, match="Both `_why` and `event` supplied"
):
la(None, "err", {"event": "someEvent", "_why": "someReason"})
@pytest.fixture
def jr():
"""
A plain Twisted JSONRenderer.
"""
return JSONRenderer()
| TestEventAdapter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass9.py | {
"start": 1078,
"end": 1149
} | class ____(metaclass=Meta2, param1=1, param2="", param3=""): ...
| Class2_1 |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/transformer_wmt.py | {
"start": 5435,
"end": 6488
} | class ____(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
config: TransformerConfig
out_dim: int | None = None
@nn.compact
def __call__(self, inputs):
"""Applies Transformer MlpBlock module."""
config = self.config
actual_out_dim = (inputs.shape[-1] if self.out_dim is None
else self.out_dim)
x = nn.Dense(
config.mlp_dim,
dtype=config.dtype,
kernel_init=config.kernel_init,
bias_init=config.bias_init)(
inputs)
x = nn.relu(x)
x = nn.Dropout(rate=config.dropout_rate)(
x, deterministic=config.deterministic)
output = nn.Dense(
actual_out_dim,
dtype=config.dtype,
kernel_init=config.kernel_init,
bias_init=config.bias_init)(
x)
output = nn.Dropout(rate=config.dropout_rate)(
output, deterministic=config.deterministic)
return output
| MlpBlock |
python | huggingface__transformers | examples/pytorch/language-modeling/run_clm.py | {
"start": 5723,
"end": 30205
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
block_size: Optional[int] = field(
default=None,
metadata={
"help": (
"Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
keep_linebreaks: bool = field(
default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
)
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def split_streaming_dataset(
full_streaming_dataset,
validation_percentage: int = 5,
) -> IterableDatasetDict:
"""
Splits a streaming dataset into
training and validation IterableDatasets, and supports methods like .map(), .filter(),
.take() and properties like .features on the resulting streams.
Args:
full_streaming_dataset (Dataset): The name of the dataset to load (e.g., "HuggingFaceFW/fineweb").
validation_percentage (int): The proportion of the dataset to be used for validation split.
Returns:
IterableDatasetDict: An IterableDatasetDict containing two IterableDataset objects: (train_stream, validation_stream).
"""
if not (0 < validation_percentage < 100):
raise ValueError(
f"validation_percentage must be between 0 and 100 (exclusive). Passed: {validation_percentage}"
)
def split_generator(is_train: bool):
for i, example in enumerate(full_streaming_dataset):
if is_train:
if i % 100 > validation_percentage:
yield example
else:
if i % 100 < validation_percentage:
yield example
features = full_streaming_dataset.features
train_stream = IterableDataset.from_generator(split_generator, gen_kwargs={"is_train": True}, features=features)
validation_stream = IterableDataset.from_generator(
split_generator, gen_kwargs={"is_train": False}, features=features
)
return IterableDatasetDict({"train": train_stream, "validation": validation_stream})
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
if "validation" not in raw_datasets:
if data_args.streaming:
dataset_stream = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split="train",
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
raw_datasets = split_streaming_dataset(dataset_stream, data_args.validation_split_percentage)
else:
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
else:
data_files = {}
dataset_args = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
**dataset_args,
)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets:
if data_args.streaming:
dataset_stream = load_dataset(
extension,
data_files=data_files,
split="train",
cache_dir=model_args.cache_dir,
token=model_args.token,
**dataset_args,
)
raw_datasets = split_streaming_dataset(dataset_stream, data_args.validation_split_percentage)
else:
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
token=model_args.token,
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
token=model_args.token,
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
dtype = model_args.dtype if model_args.dtype in ["auto", None] else getattr(torch, model_args.dtype)
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
dtype=dtype,
)
else:
model = AutoModelForCausalLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
logger.info(f"Training new model from scratch - Total size={n_params / 2**20:.2f}M params")
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = list(raw_datasets["train"].features)
else:
column_names = list(raw_datasets["validation"].features)
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
" before being passed to the model."
)
return output
with training_args.main_process_first(desc="dataset map tokenization"):
if not data_args.streaming:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
else:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
remove_columns=column_names,
)
if hasattr(config, "max_position_embeddings"):
max_pos_embeddings = config.max_position_embeddings
else:
# Define a default value if the attribute is missing in the config.
max_pos_embeddings = 1024
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > max_pos_embeddings:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
f"Using block_size={min(1024, max_pos_embeddings)} instead. You can change that default value by passing --block_size xxx."
)
if max_pos_embeddings > 0:
block_size = min(1024, max_pos_embeddings)
else:
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model "
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, and if the total_length < block_size we exclude this batch and return an empty dict.
# We could add padding if the model supported it instead of this drop, you can customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/process#map
with training_args.main_process_first(desc="grouping texts together"):
if not data_args.streaming:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
else:
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
if data_args.streaming:
train_dataset = train_dataset.take(data_args.max_train_samples)
else:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
if data_args.streaming:
eval_dataset = eval_dataset.take(data_args.max_eval_samples)
else:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics but we need to shift the labels
labels = labels[:, 1:].reshape(-1)
preds = preds[:, :-1].reshape(-1)
return metric.compute(predictions=preds, references=labels)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
processing_class=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_xla_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_xla_available()
else None,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
if data_args.streaming:
metrics["train_samples"] = max_train_samples
else:
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
if data_args.streaming:
metrics["eval_samples"] = max_eval_samples
else:
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| DataTrainingArguments |
python | tensorflow__tensorflow | tensorflow/python/ops/data_flow_ops.py | {
"start": 92106,
"end": 95712
} | class ____:
"""RecordInput asynchronously reads and randomly yields TFRecords.
A RecordInput Op will continuously read a batch of records asynchronously
into a buffer of some fixed capacity. It can also asynchronously yield
random records from this buffer.
It will not start yielding until at least `buffer_size / 2` elements have been
placed into the buffer so that sufficient randomization can take place.
The order the files are read will be shifted each epoch by `shift_amount` so
that the data is presented in a different order every epoch.
"""
def __init__(self,
file_pattern,
batch_size=1,
buffer_size=1,
parallelism=1,
shift_ratio=0,
seed=0,
name=None,
batches=None,
compression_type=None):
"""Constructs a RecordInput Op.
Args:
file_pattern: File path to the dataset, possibly containing wildcards.
All matching files will be iterated over each epoch.
batch_size: How many records to return at a time.
buffer_size: The maximum number of records the buffer will contain.
parallelism: How many reader threads to use for reading from files.
shift_ratio: What percentage of the total number files to move the start
file forward by each epoch.
seed: Specify the random number seed used by generator that randomizes
records.
name: Optional name for the operation.
batches: None by default, creating a single batch op. Otherwise specifies
how many batches to create, which are returned as a list when
`get_yield_op()` is called. An example use case is to split processing
between devices on one computer.
compression_type: The type of compression for the file. Currently ZLIB and
GZIP are supported. Defaults to none.
Raises:
ValueError: If one of the arguments is invalid.
"""
self._batch_size = batch_size
if batches is not None:
self._batch_size *= batches
self._batches = batches
self._file_pattern = file_pattern
self._buffer_size = buffer_size
self._parallelism = parallelism
self._shift_ratio = shift_ratio
self._seed = seed
self._name = name
self._compression_type = python_io.TFRecordCompressionType.NONE
if compression_type is not None:
self._compression_type = compression_type
def get_yield_op(self):
"""Adds a node that yields a group of records every time it is executed.
If RecordInput `batches` parameter is not None, it yields a list of
record batches with the specified `batch_size`.
"""
compression_type = python_io.TFRecordOptions.get_compression_type_string(
python_io.TFRecordOptions(self._compression_type))
records = gen_data_flow_ops.record_input(
file_pattern=self._file_pattern,
file_buffer_size=self._buffer_size,
file_parallelism=self._parallelism,
file_shuffle_shift_ratio=self._shift_ratio,
batch_size=self._batch_size,
file_random_seed=self._seed,
compression_type=compression_type,
name=self._name)
if self._batches is None:
return records
else:
with ops.name_scope(self._name):
batch_list = [[] for _ in range(self._batches)]
records = array_ops.split(records, self._batch_size, 0)
for index, protobuf in enumerate(records):
batch_index = index % self._batches
batch_list[batch_index].append(array_ops.reshape(protobuf, []))
return batch_list
| RecordInput |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pinecone/destination_pinecone/config.py | {
"start": 958,
"end": 1034
} | class ____(VectorDBConfigModel):
indexing: PineconeIndexingModel
| ConfigModel |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_index_tricks.py | {
"start": 15570,
"end": 17041
} | class ____(TestCase):
def test_regression_1(self):
# Test empty untyped inputs create outputs of indexing type, gh-5804
(a,) = ix_(range(0))
assert_equal(a.dtype, np.intp)
(a,) = ix_([])
assert_equal(a.dtype, np.intp)
# but if the type is specified, don't change it
(a,) = ix_(np.array([], dtype=np.float32))
assert_equal(a.dtype, np.float32)
def test_shape_and_dtype(self):
sizes = (4, 5, 3, 2)
# Test both lists and arrays
for func in (range, np.arange):
arrays = ix_(*[func(sz) for sz in sizes])
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
assert_(np.issubdtype(a.dtype, np.integer))
def test_bool(self):
bool_a = [True, False, True, True]
(int_a,) = np.nonzero(bool_a)
assert_equal(ix_(bool_a)[0], int_a)
def test_1d_only(self):
idx2d = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, ix_, idx2d)
def test_repeated_input(self):
length_of_vector = 5
x = np.arange(length_of_vector)
out = ix_(x, x)
assert_equal(out[0].shape, (length_of_vector, 1))
assert_equal(out[1].shape, (1, length_of_vector))
# check that input shape is not modified
assert_equal(x.shape, (length_of_vector,))
| TestIx_ |
python | getsentry__sentry | tests/sentry/api/endpoints/test_api_authorizations.py | {
"start": 1661,
"end": 6815
} | class ____(ApiAuthorizationsTest):
method = "delete"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="test@example.com")
self.login_as(user=self.user)
self.application = ApiApplication.objects.create(owner=self.create_user(), name="test")
self.authorization = ApiAuthorization.objects.create(
user=self.user,
application=self.application,
)
def test_simple(self) -> None:
app = ApiApplication.objects.create(name="test", owner=self.user)
auth = ApiAuthorization.objects.create(application=app, user=self.user)
token = ApiToken.objects.create(application=app, user=self.user)
self.get_success_response(authorization=auth.id, status_code=204)
assert not ApiAuthorization.objects.filter(id=auth.id).exists()
assert not ApiToken.objects.filter(id=token.id).exists()
def test_with_org(self) -> None:
org1 = self.organization
org2 = self.create_organization(owner=self.user, slug="test-org-2")
app_with_org = ApiApplication.objects.create(
name="test-app", owner=self.user, requires_org_level_access=True
)
org1_auth = ApiAuthorization.objects.create(
application=app_with_org, user=self.user, organization_id=org1.id
)
org2_auth = ApiAuthorization.objects.create(
application=app_with_org, user=self.user, organization_id=org2.id
)
org1_token = ApiToken.objects.create(
application=app_with_org, user=self.user, scoping_organization_id=org1.id
)
org2_token = ApiToken.objects.create(
application=app_with_org, user=self.user, scoping_organization_id=org2.id
)
self.get_success_response(authorization=org1_auth.id, status_code=204)
assert not ApiAuthorization.objects.filter(id=org1_auth.id).exists()
assert not ApiToken.objects.filter(id=org1_token.id).exists()
assert ApiAuthorization.objects.filter(id=org2_auth.id).exists()
assert ApiToken.objects.filter(id=org2_token.id).exists()
def test_delete_authorization_cleans_up_grants(self) -> None:
# Create API grants associated with this authorization
grant_1 = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
expires_at=timezone.now() + timedelta(minutes=10),
)
grant_2 = ApiGrant.objects.create(
user=self.user,
application=self.application,
redirect_uri="https://example.com",
expires_at=timezone.now() + timedelta(minutes=10),
)
# Only exchange one of the grants
token = ApiToken.from_grant(grant_1)
token.save()
# grant_2 should still exist at this point
assert ApiGrant.objects.filter(id=grant_2.id).exists()
# Make the delete request
self.get_success_response(
authorization=self.authorization.id,
status_code=204,
)
# Verify the authorization is deleted
assert not ApiAuthorization.objects.filter(id=self.authorization.id).exists()
# Verify associated grants are deleted
assert not ApiGrant.objects.filter(id=grant_1.id).exists()
assert not ApiGrant.objects.filter(id=grant_2.id).exists()
# Verify associated tokens are deleted
assert not ApiToken.objects.filter(id=token.id).exists()
def test_delete_authorization_with_no_grants(self) -> None:
"""Test that deletion works when there are no associated grants"""
self.get_success_response(
authorization=self.authorization.id,
status_code=204,
)
assert not ApiAuthorization.objects.filter(id=self.authorization.id).exists()
def test_delete_authorization_with_organization_scoped_grants(self) -> None:
"""Test that only grants for the specific org are deleted"""
org1 = self.create_organization()
org2 = self.create_organization()
# Create grants for different orgs
grant1 = ApiGrant.objects.create(
user=self.user,
application=self.application,
organization_id=org1.id,
redirect_uri="https://example.com",
)
grant2 = ApiGrant.objects.create(
user=self.user,
application=self.application,
organization_id=org2.id,
redirect_uri="https://example.com",
)
# Create authorization for org1
auth1 = ApiAuthorization.objects.create(
user=self.user,
application=self.application,
organization_id=org1.id,
)
# Delete authorization for org1
self.get_success_response(
authorization=auth1.id,
status_code=204,
)
# Verify only org1's grant is deleted
assert not ApiGrant.objects.filter(id=grant1.id).exists()
assert ApiGrant.objects.filter(id=grant2.id).exists()
| ApiAuthorizationsDeleteTest |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 65626,
"end": 65848
} | class ____(BaseModel):
"""
Asset collection response.
"""
assets: Annotated[list[AssetResponse], Field(title="Assets")]
total_entries: Annotated[int, Field(title="Total Entries")]
| AssetCollectionResponse |
python | pypa__pipenv | pipenv/patched/pip/_internal/exceptions.py | {
"start": 18051,
"end": 20133
} | class ____(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = (
"THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS "
"FILE. If you have updated the package versions, please update "
"the hashes. Otherwise, examine the package contents carefully; "
"someone may have tampered with them."
)
def __init__(self, allowed: Dict[str, List[str]], gots: Dict[str, "_Hash"]) -> None:
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self) -> str:
return f" {self._requirement_name()}:\n{self._hash_comparison()}"
def _hash_comparison(self) -> str:
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name: str) -> "chain[str]":
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(" or"))
lines: List[str] = []
for hash_name, expecteds in self.allowed.items():
prefix = hash_then_or(hash_name)
lines.extend((f" Expected {next(prefix)} {e}") for e in expecteds)
lines.append(
f" Got {self.gots[hash_name].hexdigest()}\n"
)
return "\n".join(lines)
| HashMismatch |
python | neetcode-gh__leetcode | python/0138-copy-list-with-random-pointer.py | {
"start": 29,
"end": 203
} | class ____:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
| Node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.