language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/informer/test_modeling_informer.py | {
"start": 7699,
"end": 19398
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (InformerModel, InformerForPrediction) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": InformerModel} if is_torch_available() else {}
is_encoder_decoder = True
test_missing_keys = False
test_inputs_embeds = False
def setUp(self):
self.model_tester = InformerModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=InformerConfig,
has_text_modality=False,
prediction_length=self.model_tester.prediction_length,
)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.context_length
if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1:
seq_length = seq_length * self.model_tester.chunk_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "prediction_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip(reason="Informer does not have tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip
def test_model_outputs_equivalence(self):
pass
@unittest.skip
def test_determinism(self):
pass
@unittest.skip(reason="randomly selects U keys while calculating attentions")
def test_batching_equivalence(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# # Input is 'static_categorical_features' not 'input_ids'
def test_model_main_input_name(self):
model_signature = inspect.signature(getattr(InformerModel, "forward"))
# The main input is the name of the argument after `self`
observed_main_input_name = list(model_signature.parameters.keys())[1]
self.assertEqual(InformerModel.main_input_name, observed_main_input_name)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
expected_arg_names.extend(
[
"future_observed_mask",
"decoder_attention_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
]
if "future_observed_mask" in arg_names
else [
"decoder_attention_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
context_length = getattr(self.model_tester, "context_length", seq_len)
prediction_length = getattr(self.model_tester, "prediction_length", seq_len)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, context_length],
)
out_len = len(outputs)
correct_outlen = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, prediction_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_seq_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 2, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, context_length],
)
@is_flaky()
def test_retain_grad_hidden_states_attentions(self):
super().test_retain_grad_hidden_states_attentions()
@unittest.skip(reason="Model does not have input embeddings")
def test_model_get_set_embeddings(self):
pass
def prepare_batch(filename="train-batch.pt"):
file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset")
check_torch_load_is_safe()
batch = torch.load(file, map_location=torch_device, weights_only=True)
return batch
@require_torch
@slow
| InformerModelTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_storage_transfer_service.py | {
"start": 14572,
"end": 17515
} | class ____(GoogleCloudBaseOperator):
"""
Delete a transfer job.
This is a soft delete. After a transfer job is deleted, the job and all the transfer
executions are subject to garbage collection. Transfer jobs become eligible for garbage
collection 30 days after soft delete.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataTransferServiceDeleteJobOperator`
:param job_name: (Required) Name of the TRANSFER operation
:param project_id: (Optional) the ID of the project that owns the Transfer
Job. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param api_version: API version used (e.g. v1).
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_transfer_job_delete_template_fields]
template_fields: Sequence[str] = (
"job_name",
"project_id",
"gcp_conn_id",
"api_version",
"google_impersonation_chain",
)
# [END gcp_transfer_job_delete_template_fields]
def __init__(
self,
*,
job_name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
project_id: str = PROVIDE_PROJECT_ID,
google_impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_name = job_name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.google_impersonation_chain = google_impersonation_chain
self._validate_inputs()
def _validate_inputs(self) -> None:
if not self.job_name:
raise AirflowException("The required parameter 'job_name' is empty or None")
def execute(self, context: Context) -> None:
self._validate_inputs()
hook = CloudDataTransferServiceHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.google_impersonation_chain,
)
hook.delete_transfer_job(job_name=self.job_name, project_id=self.project_id)
| CloudDataTransferServiceDeleteJobOperator |
python | getsentry__sentry | tests/sentry/preprod/size_analysis/test_size_analysis_tasks.py | {
"start": 22794,
"end": 26153
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.organization)
def _create_size_metrics(self, **kwargs):
"""Helper to create PreprodArtifactSizeMetrics."""
artifact = PreprodArtifact.objects.create(
project=self.project,
app_id="com.example.app",
state=PreprodArtifact.ArtifactState.PROCESSED,
)
return PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
**kwargs,
)
def test_manual_size_analysis_comparison_success(self):
"""Test manual_size_analysis_comparison with valid metrics."""
head_size_metrics = self._create_size_metrics()
base_size_metrics = self._create_size_metrics()
with patch(
"sentry.preprod.size_analysis.tasks._run_size_analysis_comparison"
) as mock_run_comparison:
manual_size_analysis_comparison(
project_id=self.project.id,
org_id=self.organization.id,
head_artifact_id=head_size_metrics.preprod_artifact.id,
base_artifact_id=base_size_metrics.preprod_artifact.id,
)
mock_run_comparison.assert_called_once_with(
self.organization.id,
head_size_metrics,
base_size_metrics,
)
def test_manual_size_analysis_comparison_nonexistent_head_metric(self):
"""Test manual_size_analysis_comparison with nonexistent head metric."""
base_size_metrics = self._create_size_metrics()
with patch("sentry.preprod.size_analysis.tasks.logger") as mock_logger:
manual_size_analysis_comparison(
project_id=self.project.id,
org_id=self.organization.id,
head_artifact_id=99999,
base_artifact_id=base_size_metrics.preprod_artifact.id,
)
mock_logger.exception.assert_called_once()
call_args = mock_logger.exception.call_args
assert "preprod.size_analysis.compare.manual.head_artifact_not_found" in call_args[0]
assert call_args[1]["extra"]["head_artifact_id"] == 99999
def test_manual_size_analysis_comparison_nonexistent_base_metric(self):
"""Test manual_size_analysis_comparison with nonexistent base metric."""
head_size_metrics = self._create_size_metrics()
with patch("sentry.preprod.size_analysis.tasks.logger") as mock_logger:
manual_size_analysis_comparison(
project_id=self.project.id,
org_id=self.organization.id,
head_artifact_id=head_size_metrics.preprod_artifact.id,
base_artifact_id=99999,
)
mock_logger.exception.assert_called_once()
call_args = mock_logger.exception.call_args
assert "preprod.size_analysis.compare.manual.base_artifact_not_found" in call_args[0]
assert call_args[1]["extra"]["base_artifact_id"] == 99999
| ManualSizeAnalysisComparisonTest |
python | zostera__django-bootstrap4 | src/bootstrap4/renderers.py | {
"start": 992,
"end": 2609
} | class ____:
"""A content renderer."""
def __init__(self, *args, **kwargs):
self.layout = kwargs.get("layout", "")
self.form_group_class = kwargs.get("form_group_class", FORM_GROUP_CLASS)
self.field_class = kwargs.get("field_class", "")
self.label_class = kwargs.get("label_class", "")
self.show_help = kwargs.get("show_help", True)
self.show_label = kwargs.get("show_label", True)
self.exclude = kwargs.get("exclude", "")
self.set_placeholder = kwargs.get("set_placeholder", True)
self.size = self.parse_size(kwargs.get("size", ""))
self.horizontal_label_class = kwargs.get(
"horizontal_label_class", get_bootstrap_setting("horizontal_label_class")
)
self.horizontal_field_class = kwargs.get(
"horizontal_field_class", get_bootstrap_setting("horizontal_field_class")
)
def parse_size(self, size):
size = text_value(size).lower().strip()
if size in ("sm", "small"):
return "small"
if size in ("lg", "large"):
return "large"
if size in ("md", "medium", ""):
return "medium"
raise BootstrapError(f'Invalid value "{size}" for parameter "size" (expected "sm", "md", "lg" or "").')
def get_size_class(self, prefix="form-control"):
if self.size == "small":
return prefix + "-sm"
if self.size == "large":
return prefix + "-lg"
return ""
def _render(self):
return ""
def render(self):
return mark_safe(self._render())
| BaseRenderer |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22846,
"end": 23000
} | class ____(PriorityQueueTest, unittest.TestCase):
queue = c_queue
# A Queue subclass that can provoke failure at a moment's notice :)
| CPriorityQueueTest |
python | chroma-core__chroma | chromadb/utils/embedding_functions/schemas/bm25_tokenizer.py | {
"start": 4519,
"end": 5159
} | class ____:
def __init__(self, seed: int = 0) -> None:
try:
import mmh3
except ImportError:
raise ValueError(
"The murmurhash3 python package is not installed. Please install it with `pip install murmurhash3`"
)
self.hasher = mmh3.hash
self.seed = seed
def hash(self, token: str) -> int:
return cast(int, abs(self.hasher(token, seed=self.seed)))
__all__ = [
"Bm25Tokenizer",
"DEFAULT_CHROMA_BM25_STOPWORDS",
"DEFAULT_ENGLISH_STOPWORDS",
"SnowballStemmer",
"get_english_stemmer",
"Murmur3AbsHasher",
]
| Murmur3AbsHasher |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_properties06.py | {
"start": 315,
"end": 1769
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("properties06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
company_guid = "2096f6a2-d2f7-48be-b329-b73aaa526e5d"
site_id = "cb46c030-1825-4e81-a295-151c039dbf02"
action_id = "88124cf5-1340-457d-90e1-0000a9427c99"
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_Enabled", "true", "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_SetDate", "2024-01-01T12:00:00Z", "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_Method", "Privileged", "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_Name", "Confidential", "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_SiteId", site_id, "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_ActionId", action_id, "text"
)
workbook.set_custom_property(
f"MSIP_Label_{company_guid}_ContentBits", "2", "text"
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numba__numba | numba/core/typing/cmathdecl.py | {
"start": 971,
"end": 1202
} | class ____(ConcreteTemplate):
# unary cmath.log()
cases = [signature(tp, tp) for tp in sorted(types.complex_domain)]
# binary cmath.log()
cases += [signature(tp, tp, tp) for tp in sorted(types.complex_domain)]
| Cmath_log |
python | run-llama__llama_index | llama-index-integrations/callbacks/llama-index-callbacks-openinference/llama_index/callbacks/openinference/base.py | {
"start": 4167,
"end": 9844
} | class ____(BaseCallbackHandler):
"""
Callback handler for storing generation data in OpenInference format.
OpenInference is an open standard for capturing and storing AI model
inferences. It enables production LLMapp servers to seamlessly integrate
with LLM observability solutions such as Arize and Phoenix.
For more information on the specification, see
https://github.com/Arize-ai/open-inference-spec
"""
def __init__(
self,
callback: Optional[Callable[[List[QueryData], List[NodeData]], None]] = None,
) -> None:
"""
Initializes the OpenInferenceCallbackHandler.
Args:
callback (Optional[Callable[[List[QueryData], List[NodeData]], None]], optional): A
callback function that will be called when a query trace is
completed, often used for logging or persisting query data.
"""
super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
self._callback = callback
self._trace_data = TraceData()
self._query_data_buffer: List[QueryData] = []
self._node_data_buffer: List[NodeData] = []
def start_trace(self, trace_id: Optional[str] = None) -> None:
if trace_id == "query" or trace_id == "chat":
self._trace_data = TraceData()
self._trace_data.query_data.timestamp = datetime.now().isoformat()
self._trace_data.query_data.id = _generate_random_id()
def end_trace(
self,
trace_id: Optional[str] = None,
trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
if trace_id == "query" or trace_id == "chat":
self._query_data_buffer.append(self._trace_data.query_data)
self._node_data_buffer.extend(self._trace_data.node_datas)
self._trace_data = TraceData()
if self._callback is not None:
self._callback(self._query_data_buffer, self._node_data_buffer)
def on_event_start(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
parent_id: str = "",
**kwargs: Any,
) -> str:
if payload is not None:
if event_type is CBEventType.QUERY:
query_text = payload[EventPayload.QUERY_STR]
self._trace_data.query_data.query_text = query_text
elif event_type is CBEventType.LLM:
if prompt := payload.get(EventPayload.PROMPT, None):
self._trace_data.query_data.llm_prompt = prompt
if messages := payload.get(EventPayload.MESSAGES, None):
self._trace_data.query_data.llm_messages = [
(m.role.value, m.content) for m in messages
]
# For chat engines there is no query event and thus the
# query text will be None, in this case we set the query
# text to the last message passed to the LLM
if self._trace_data.query_data.query_text is None:
self._trace_data.query_data.query_text = messages[-1].content
return event_id
def on_event_end(
self,
event_type: CBEventType,
payload: Optional[Dict[str, Any]] = None,
event_id: str = "",
**kwargs: Any,
) -> None:
if payload is None:
return
if event_type is CBEventType.RETRIEVE:
for node_with_score in payload[EventPayload.NODES]:
node = node_with_score.node
score = node_with_score.score
self._trace_data.query_data.node_ids.append(node.hash)
self._trace_data.query_data.scores.append(score)
self._trace_data.node_datas.append(
NodeData(
id=node.hash,
node_text=node.text,
)
)
elif event_type is CBEventType.LLM:
if self._trace_data.query_data.response_text is None:
if response := payload.get(EventPayload.RESPONSE, None):
if isinstance(response, ChatResponse):
# If the response is of class ChatResponse the string
# representation has the format "<role>: <message>",
# but we want just the message
response_text = response.message.content
else:
response_text = str(response)
self._trace_data.query_data.response_text = response_text
elif completion := payload.get(EventPayload.COMPLETION, None):
self._trace_data.query_data.response_text = str(completion)
elif event_type is CBEventType.EMBEDDING:
self._trace_data.query_data.query_embedding = payload[
EventPayload.EMBEDDINGS
][0]
def flush_query_data_buffer(self) -> List[QueryData]:
"""
Clears the query data buffer and returns the data.
Returns:
List[QueryData]: The query data.
"""
query_data_buffer = self._query_data_buffer
self._query_data_buffer = []
return query_data_buffer
def flush_node_data_buffer(self) -> List[NodeData]:
"""
Clears the node data buffer and returns the data.
Returns:
List[NodeData]: The node data.
"""
node_data_buffer = self._node_data_buffer
self._node_data_buffer = []
return node_data_buffer
| OpenInferenceCallbackHandler |
python | openai__openai-python | src/openai/_base_client.py | {
"start": 47023,
"end": 64733
} | class ____(BaseClient[httpx.AsyncClient, AsyncStream[Any]]):
_client: httpx.AsyncClient
_default_stream_cls: type[AsyncStream[Any]] | None = None
def __init__(
self,
*,
version: str,
base_url: str | URL,
_strict_response_validation: bool,
max_retries: int = DEFAULT_MAX_RETRIES,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
) -> None:
if not is_given(timeout):
# if the user passed in a custom http client with a non-default
# timeout set then we use that timeout.
#
# note: there is an edge case here where the user passes in a client
# where they've explicitly set the timeout to match the default timeout
# as this check is structural, meaning that we'll think they didn't
# pass in a timeout and will ignore it
if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT:
timeout = http_client.timeout
else:
timeout = DEFAULT_TIMEOUT
if http_client is not None and not isinstance(http_client, httpx.AsyncClient): # pyright: ignore[reportUnnecessaryIsInstance]
raise TypeError(
f"Invalid `http_client` argument; Expected an instance of `httpx.AsyncClient` but got {type(http_client)}"
)
super().__init__(
version=version,
base_url=base_url,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
max_retries=max_retries,
custom_query=custom_query,
custom_headers=custom_headers,
_strict_response_validation=_strict_response_validation,
)
self._client = http_client or AsyncHttpxClientWrapper(
base_url=base_url,
# cast to a valid type because mypy doesn't understand our type narrowing
timeout=cast(Timeout, timeout),
)
def is_closed(self) -> bool:
return self._client.is_closed
async def close(self) -> None:
"""Close the underlying HTTPX client.
The client will *not* be usable after this.
"""
await self._client.aclose()
async def __aenter__(self: _T) -> _T:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def _prepare_options(
self,
options: FinalRequestOptions, # noqa: ARG002
) -> FinalRequestOptions:
"""Hook for mutating the given options"""
return options
async def _prepare_request(
self,
request: httpx.Request, # noqa: ARG002
) -> None:
"""This method is used as a callback for mutating the `Request` object
after it has been constructed.
This is useful for cases where you want to add certain headers based off of
the request properties, e.g. `url`, `method` etc.
"""
return None
@overload
async def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
async def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
) -> _AsyncStreamT: ...
@overload
async def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT: ...
async def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
*,
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
if self._platform is None:
# `get_platform` can make blocking IO calls so we
# execute it earlier while we are in an async context
self._platform = await asyncify(get_platform)()
cast_to = self._maybe_override_cast_to(cast_to, options)
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
if input_options.idempotency_key is None and input_options.method.lower() != "get":
# ensure the idempotency key is reused between requests
input_options.idempotency_key = self._idempotency_key()
response: httpx.Response | None = None
max_retries = input_options.get_max_retries(self.max_retries)
retries_taken = 0
for retries_taken in range(max_retries + 1):
options = model_copy(input_options)
options = await self._prepare_options(options)
remaining_retries = max_retries - retries_taken
request = self._build_request(options, retries_taken=retries_taken)
await self._prepare_request(request)
kwargs: HttpxSendArgs = {}
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth
if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects
log.debug("Sending HTTP Request: %s %s", request.method, request.url)
response = None
try:
response = await self._client.send(
request,
stream=stream or self._should_stream_response_body(request=request),
**kwargs,
)
except httpx.TimeoutException as err:
log.debug("Encountered httpx.TimeoutException", exc_info=True)
if remaining_retries > 0:
await self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=None,
)
continue
log.debug("Raising timeout error")
raise APITimeoutError(request=request) from err
except Exception as err:
log.debug("Encountered Exception", exc_info=True)
if remaining_retries > 0:
await self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=None,
)
continue
log.debug("Raising connection error")
raise APIConnectionError(request=request) from err
log.debug(
'HTTP Response: %s %s "%i %s" %s',
request.method,
request.url,
response.status_code,
response.reason_phrase,
response.headers,
)
log.debug("request_id: %s", response.headers.get("x-request-id"))
try:
response.raise_for_status()
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
if remaining_retries > 0 and self._should_retry(err.response):
await err.response.aclose()
await self._sleep_for_retry(
retries_taken=retries_taken,
max_retries=max_retries,
options=input_options,
response=response,
)
continue
# If the response is streamed then we need to explicitly read the response
# to completion before attempting to access the response text.
if not err.response.is_closed:
await err.response.aread()
log.debug("Re-raising status error")
raise self._make_status_error_from_response(err.response) from None
break
assert response is not None, "could not resolve response (should never happen)"
return await self._process_response(
cast_to=cast_to,
options=options,
response=response,
stream=stream,
stream_cls=stream_cls,
retries_taken=retries_taken,
)
async def _sleep_for_retry(
self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
) -> None:
remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
await anyio.sleep(timeout)
async def _process_response(
self,
*,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
response: httpx.Response,
stream: bool,
stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None,
retries_taken: int = 0,
) -> ResponseT:
if response.request.headers.get(RAW_RESPONSE_HEADER) == "true":
return cast(
ResponseT,
LegacyAPIResponse(
raw=response,
client=self,
cast_to=cast_to,
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)
origin = get_origin(cast_to) or cast_to
if (
inspect.isclass(origin)
and issubclass(origin, BaseAPIResponse)
# we only want to actually return the custom BaseAPIResponse class if we're
# returning the raw response, or if we're not streaming SSE, as if we're streaming
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
):
if not issubclass(origin, AsyncAPIResponse):
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
response_cls = cast("type[BaseAPIResponse[Any]]", cast_to)
return cast(
"ResponseT",
response_cls(
raw=response,
client=self,
cast_to=extract_response_type(response_cls),
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
),
)
if cast_to == httpx.Response:
return cast(ResponseT, response)
api_response = AsyncAPIResponse(
raw=response,
client=self,
cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast]
stream=stream,
stream_cls=stream_cls,
options=options,
retries_taken=retries_taken,
)
if bool(response.request.headers.get(RAW_RESPONSE_HEADER)):
return cast(ResponseT, api_response)
return await api_response.parse()
def _request_api_list(
self,
model: Type[_T],
page: Type[AsyncPageT],
options: FinalRequestOptions,
) -> AsyncPaginator[_T, AsyncPageT]:
return AsyncPaginator(client=self, options=options, page_cls=page, model=model)
@overload
async def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
async def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
) -> _AsyncStreamT: ...
@overload
async def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT: ...
async def get(
self,
path: str,
*,
cast_to: Type[ResponseT],
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
opts = FinalRequestOptions.construct(method="get", url=path, **options)
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
@overload
async def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[False] = False,
) -> ResponseT: ...
@overload
async def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
) -> _AsyncStreamT: ...
@overload
async def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT: ...
async def post(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
) -> ResponseT | _AsyncStreamT:
opts = FinalRequestOptions.construct(
method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
async def patch(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options)
return await self.request(cast_to, opts)
async def put(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
files: RequestFiles | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(
method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options
)
return await self.request(cast_to, opts)
async def delete(
self,
path: str,
*,
cast_to: Type[ResponseT],
body: Body | None = None,
options: RequestOptions = {},
) -> ResponseT:
opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options)
return await self.request(cast_to, opts)
def get_api_list(
self,
path: str,
*,
model: Type[_T],
page: Type[AsyncPageT],
body: Body | None = None,
options: RequestOptions = {},
method: str = "get",
) -> AsyncPaginator[_T, AsyncPageT]:
opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options)
return self._request_api_list(model, page, opts)
def make_request_options(
*,
query: Query | None = None,
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
idempotency_key: str | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
post_parser: PostParser | NotGiven = not_given,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
options: RequestOptions = {}
if extra_headers is not None:
options["headers"] = extra_headers
if extra_body is not None:
options["extra_json"] = cast(AnyMapping, extra_body)
if query is not None:
options["params"] = query
if extra_query is not None:
options["params"] = {**options.get("params", {}), **extra_query}
if not isinstance(timeout, NotGiven):
options["timeout"] = timeout
if idempotency_key is not None:
options["idempotency_key"] = idempotency_key
if is_given(post_parser):
# internal
options["post_parser"] = post_parser # type: ignore
return options
| AsyncAPIClient |
python | getsentry__sentry | tests/sentry/models/test_rule.py | {
"start": 70,
"end": 2603
} | class ____(TestCase):
def setUp(self) -> None:
self.action_uuid = str(uuid4())
self.action = {
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
"uuid": self.action_uuid,
}
self.notify_issue_owners_action = [
self.action,
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
"uuid": str(uuid4()),
},
]
self.rule = self.create_project_rule(
project=self.project, action_data=self.notify_issue_owners_action
)
def test_simple(self) -> None:
result = self.rule.get_rule_action_details_by_uuid(self.action_uuid)
assert result == self.action
def test_returns_none(self) -> None:
result = self.rule.get_rule_action_details_by_uuid(str(uuid4()))
assert result is None
def test_when_no_actions_are_in_rule(self) -> None:
rule = self.create_project_rule(
project=self.project,
action_data=[],
)
result = rule.get_rule_action_details_by_uuid(str(uuid4()))
assert result is None
def test_when_actions_have_missing_uuid_key(self) -> None:
rule = self.create_project_rule(
project=self.project,
action_data=[
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
}
],
)
result = rule.get_rule_action_details_by_uuid(str(uuid4()))
assert result is None
def test_when_action_has_missing_uuid_value(self) -> None:
rule = self.create_project_rule(
project=self.project,
action_data=[
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
"uuid": "",
}
],
)
result = rule.get_rule_action_details_by_uuid(str(uuid4()))
assert result is None
| TestRule_GetRuleActionDetailsByUuid |
python | openai__gym | gym/error.py | {
"start": 4453,
"end": 4929
} | class ____(Exception):
"""Raised when `reset`, or `step` is called asynchronously (e.g. with `reset_async`, or `step_async` respectively), and `reset_async`, or `step_async` (respectively) is called again (without a complete call to `reset_wait`, or `step_wait` respectively)."""
def __init__(self, message: str, name: str):
"""Initialises the exception with name attributes."""
super().__init__(message)
self.name = name
| AlreadyPendingCallError |
python | Farama-Foundation__Gymnasium | gymnasium/core.py | {
"start": 26706,
"end": 27979
} | class ____(Wrapper[ObsType, ActType, ObsType, ActType]):
"""Superclass of wrappers that can modify the returning reward from a step.
If you would like to apply a function to the reward that is returned by the base environment before
passing it to learning code, you can simply inherit from :class:`RewardWrapper` and overwrite the method
:meth:`reward` to implement that transformation.
"""
def __init__(self, env: Env[ObsType, ActType]):
"""Constructor for the Reward wrapper.
Args:
env: Environment to be wrapped.
"""
Wrapper.__init__(self, env)
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Modifies the :attr:`env` :meth:`step` reward using :meth:`self.reward`."""
observation, reward, terminated, truncated, info = self.env.step(action)
return observation, self.reward(reward), terminated, truncated, info
def reward(self, reward: SupportsFloat) -> SupportsFloat:
"""Returns a modified environment ``reward``.
Args:
reward: The :attr:`env` :meth:`step` reward
Returns:
The modified `reward`
"""
raise NotImplementedError
| RewardWrapper |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid_connect/provider.py | {
"start": 1047,
"end": 4239
} | class ____(OAuth2Provider):
id = "openid_connect"
name = "OpenID Connect"
account_class = OpenIDConnectProviderAccount
oauth2_adapter_class = OpenIDConnectOAuth2Adapter
supports_token_authentication = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = self.app.name
@property
def server_url(self):
url = self.app.settings["server_url"]
return self.wk_server_url(url)
def wk_server_url(self, url):
well_known_uri = "/.well-known/openid-configuration"
if "/.well-known/" not in url:
url += well_known_uri
return url
def get_login_url(self, request, **kwargs):
url = reverse(
self.app.provider + "_login", kwargs={"provider_id": self.app.provider_id}
)
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def get_callback_url(self):
return reverse(
self.app.provider + "_callback",
kwargs={"provider_id": self.app.provider_id},
)
@property
def token_auth_method(self):
return self.app.settings.get("token_auth_method")
def get_default_scope(self):
return ["openid", "profile", "email"]
def extract_uid(self, data):
data = _pick_data(data)
return str(data["sub"])
def extract_common_fields(self, data):
data = _pick_data(data)
return dict(
email=data.get("email"),
username=data.get("preferred_username"),
name=data.get("name"),
user_id=data.get("user_id"),
picture=data.get("picture"),
last_name=data.get("family_name"),
first_name=data.get("given_name"),
)
def extract_email_addresses(self, data):
data = _pick_data(data)
addresses = []
email = data.get("email")
if email:
addresses.append(
EmailAddress(
email=email,
verified=data.get("email_verified", False),
primary=True,
)
)
return addresses
def get_oauth2_adapter(self, request):
return self.oauth2_adapter_class(request, self.app.provider_id)
def verify_token(self, request, token):
id_token = token.get("id_token")
if not id_token:
raise get_adapter().validation_error("invalid_token")
try:
oauth2_adapter = self.get_oauth2_adapter(request)
openid_config = oauth2_adapter.openid_config
identity_data = jwtkit.verify_and_decode(
credential=id_token,
keys_url=openid_config["jwks_uri"],
issuer=openid_config["issuer"],
audience=[self.app.client_id],
lookup_kid=jwtkit.lookup_kid_jwk,
)
except (OAuth2Error, requests.RequestException) as e:
raise get_adapter().validation_error("invalid_token") from e
login = self.sociallogin_from_response(request, identity_data)
return login
provider_classes = [OpenIDConnectProvider]
| OpenIDConnectProvider |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 118142,
"end": 118342
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = ModuleWithDefaultTensorArgsMethod()
def forward(self):
return self.m()
| WrapperModule |
python | donnemartin__interactive-coding-challenges | arrays_strings/fizz_buzz/test_fizz_buzz.py | {
"start": 18,
"end": 761
} | class ____(unittest.TestCase):
def test_fizz_buzz(self):
solution = Solution()
self.assertRaises(TypeError, solution.fizz_buzz, None)
self.assertRaises(ValueError, solution.fizz_buzz, 0)
expected = [
'1',
'2',
'Fizz',
'4',
'Buzz',
'Fizz',
'7',
'8',
'Fizz',
'Buzz',
'11',
'Fizz',
'13',
'14',
'FizzBuzz'
]
self.assertEqual(solution.fizz_buzz(15), expected)
print('Success: test_fizz_buzz')
def main():
test = TestFizzBuzz()
test.test_fizz_buzz()
if __name__ == '__main__':
main()
| TestFizzBuzz |
python | pytorch__pytorch | torch/cuda/graphs.py | {
"start": 7897,
"end": 28121
} | class ____:
r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay.
See :ref:`CUDA Graphs <cuda-graph-semantics>` for a general introduction,
detailed use, and constraints.
Arguments:
cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture.
pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or
:meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) hinting this graph's capture
may share memory from the specified pool. See :ref:`Graph memory management<graph-memory-management>`.
stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context.
If not supplied, ``graph`` sets its own internal side stream as the current stream in the context.
capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream.
Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc,
may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for
actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting
unless you're familiar with `cudaStreamCaptureMode <https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85>`_
.. note::
For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture
used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture.
.. warning::
This API is in beta and may change in future releases.
.. _cudaStreamCaptureMode:
https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85
""" # noqa: B950
default_capture_stream: Optional[torch.cuda.Stream] = None
def __init__(
self,
cuda_graph: CUDAGraph,
pool: Optional[_POOL_HANDLE] = None,
stream: Optional[torch.cuda.Stream] = None,
capture_error_mode: str = "global",
):
# Lazy-init of default_capture_stream helps avoid circular-import errors.
# Not thread safe, but graphs already have the general (explicitly documented)
# restriction that only one capture may be underway at a time in the process.
if self.__class__.default_capture_stream is None:
self.__class__.default_capture_stream = torch.cuda.Stream()
self.pool: Union[tuple[()], tuple[_POOL_HANDLE]] = (
() if pool is None else (pool,)
)
self.capture_stream = (
stream if stream is not None else self.__class__.default_capture_stream
)
assert self.capture_stream is not None
self.stream_ctx = torch.cuda.stream(self.capture_stream)
self.cuda_graph = cuda_graph
self.capture_error_mode = capture_error_mode
def __enter__(self) -> None:
# Free as much memory as we can for the graph
torch.cuda.synchronize()
if torch.compiler.config.force_cudagraph_gc:
# Originally we unconditionally garbage collected here. On one hand
# that's nice because we have a chance to collect more memory, but
# on the other hand it is REALLY expensive, especially for doing
# multiple cudagraph captures in a row. In theory it will only help
# when a dead python cycle is holding onto CUDA memory.
gc.collect()
torch.cuda.empty_cache()
# Stackoverflow seems comfortable with this pattern
# https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487
self.stream_ctx.__enter__()
self.cuda_graph.capture_begin(
# type: ignore[misc]
*self.pool,
# pyrefly: ignore [bad-keyword-argument]
capture_error_mode=self.capture_error_mode,
)
def __exit__(self, *args: object) -> None:
self.cuda_graph.capture_end()
self.stream_ctx.__exit__(*args)
# returning None should propagate exceptions from either capture_end or stream_ctx.__exit__()
_ModuleOrCallable: TypeAlias = Union["torch.nn.Module", Callable[..., object]]
@overload
def make_graphed_callables(
callables: _ModuleOrCallable,
sample_args: tuple[Tensor, ...],
num_warmup_iters: int = 3,
allow_unused_input: bool = False,
pool: Optional[_POOL_HANDLE] = None,
) -> _ModuleOrCallable: ...
@overload
def make_graphed_callables(
callables: tuple[_ModuleOrCallable, ...],
sample_args: tuple[tuple[Tensor, ...], ...],
num_warmup_iters: int = 3,
allow_unused_input: bool = False,
pool: Optional[_POOL_HANDLE] = None,
) -> tuple[_ModuleOrCallable, ...]: ...
def make_graphed_callables(
callables: Union[_ModuleOrCallable, tuple[_ModuleOrCallable, ...]],
sample_args: Union[tuple[Tensor, ...], tuple[tuple[Tensor, ...], ...]],
num_warmup_iters: int = 3,
allow_unused_input: bool = False,
pool: Optional[_POOL_HANDLE] = None,
) -> Union[_ModuleOrCallable, tuple[_ModuleOrCallable, ...]]:
r"""Accept callables (functions or :class:`nn.Module<torch.nn.Module>`\ s) and returns graphed versions.
Each graphed callable's forward pass runs its source callable's
forward CUDA work as a CUDA graph inside a single autograd node.
The graphed callable's forward pass also appends
a backward node to the autograd graph. During backward, this node runs the
callable's backward work as a CUDA graph.
Therefore, each graphed callable should be a drop-in replacement for its source callable
in an autograd-enabled training loop.
See :ref:`Partial-network capture<partial-network-capture>` for detailed use and constraints.
If you pass a tuple of several callables, their captures will use the same memory pool.
See :ref:`Graph memory management<graph-memory-management>` for when this is appropriate.
Arguments:
callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph.
See :ref:`Graph memory management<graph-memory-management>` for when passing a tuple of callables
is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order
they'll run in the live workload.
sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable.
If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors.
If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors.
num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs
11 iterations for warm up. Default: ``3``.
allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs
(and therefore their grad is always zero) is an error. Defaults to False.
pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or
:meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory
with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`.
.. note::
The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state
that's expected for the corresponding real input in the training loop.
.. warning::
This API is in beta and may change in future releases.
.. warning::
``sample_args`` for each callable must contain only Tensors. Other types are not allowed.
.. warning::
Returned callables do not support higher order differentiation (e.g., double backward).
.. warning::
In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters
may be trainable. Buffers must have ``requires_grad=False``.
.. warning::
After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`,
you may not add or remove any of that Module's parameters or buffers.
.. warning::
:class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks
registered on them at the time they are passed. However, registering hooks on modules *after* passing them
through :func:`~torch.cuda.make_graphed_callables` is allowed.
.. warning::
When running a graphed callable, you must pass its arguments in the same order and format
they appeared in that callable's ``sample_args``.
.. warning::
The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled
caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`.
"""
if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled():
raise RuntimeError(
"make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`."
)
just_one_callable = False
_sample_args: tuple[tuple[Tensor, ...], ...]
if not isinstance(callables, tuple):
just_one_callable = True
callables = (callables,)
_sample_args = (typing.cast(tuple[Tensor, ...], sample_args),)
else:
_sample_args = typing.cast(tuple[tuple[Tensor, ...], ...], sample_args)
flatten_sample_args = []
for c, args in zip(callables, _sample_args):
if isinstance(c, torch.nn.Module):
assert (
len(c._backward_hooks) == 0
and len(c._forward_hooks) == 0
and len(c._forward_pre_hooks) == 0
), (
"Modules must not have hooks registered at the time they are passed. However, registering hooks "
+ "on modules after passing them through make_graphed_callables is allowed."
)
assert all(b.requires_grad is False for b in c.buffers()), (
"In any :class:`~torch.nn.Module` passed to "
+ ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have "
+ "``requires_grad=False``."
)
flatten_arg = torch.utils._pytree.arg_tree_leaves(*args)
flatten_sample_args.append(tuple(flatten_arg))
assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), (
"In the beta API, sample_args "
+ "for each callable must contain only Tensors. Other types are not allowed."
)
# If a callable is an nn.Module, its graph's full input surface is the args the user explicitly
# passes to forward (ie, its sample_args) AND the module's parameter attributes.
per_callable_len_user_args = [len(args) for args in flatten_sample_args]
per_callable_module_params = [
tuple(c.parameters()) if isinstance(c, torch.nn.Module) else ()
for c in callables
]
per_callable_static_input_surfaces = [
flatten_sample_args[i] + per_callable_module_params[i]
for i in range(len(callables))
]
fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
mempool = graph_pool_handle() if pool is None else pool
# Warmup
# Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work
# from ending up in any captures.
torch.cuda.synchronize()
with torch.cuda.stream(torch.cuda.Stream()):
for func, args, static_input_surface in zip(
callables, _sample_args, per_callable_static_input_surfaces
):
grad_inputs, outputs, outputs_grad = None, None, None
for _ in range(num_warmup_iters):
outputs = torch.utils._pytree.tree_leaves(func(*args))
outputs_grad = tuple(o for o in outputs if o.requires_grad)
if len(outputs_grad) > 0:
grad_inputs = torch.autograd.grad(
outputs=outputs_grad,
inputs=tuple(
i for i in static_input_surface if i.requires_grad
),
grad_outputs=tuple(
torch.empty_like(o) for o in outputs if o.requires_grad
),
only_inputs=True,
allow_unused=allow_unused_input,
)
for v in [outputs, outputs_grad, grad_inputs]:
del v
torch.cuda.synchronize()
# All captures here share a mempool. To avoid replays corrupting each other's memory,
# the safest approach is to capture all passes in the same order they'll run:
# fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1.
# Capture forward graphs
per_callable_static_outputs = []
per_callable_output_unflatten_spec = []
for func, args, fwd_graph in zip(callables, _sample_args, fwd_graphs):
with torch.cuda.graph(fwd_graph, pool=mempool):
func_outputs = func(*args)
flatten_outputs, spec = torch.utils._pytree.tree_flatten(func_outputs)
per_callable_static_outputs.append(tuple(flatten_outputs))
per_callable_output_unflatten_spec.append(spec)
# Capture backward graphs in reverse order
per_callable_static_grad_outputs = []
per_callable_static_grad_inputs = []
for static_input_surface, static_outputs, bwd_graph in zip(
reversed(per_callable_static_input_surfaces),
reversed(per_callable_static_outputs),
reversed(bwd_graphs),
):
# For now, assumes all static_outputs require grad
# assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad."
static_grad_outputs = tuple(
torch.empty_like(o) if o.requires_grad else None for o in static_outputs
)
outputs_grad = tuple(o for o in static_outputs if o.requires_grad)
grad_inputs = None
if len(outputs_grad) > 0:
with torch.cuda.graph(bwd_graph, pool=mempool):
grad_inputs = torch.autograd.grad(
outputs=outputs_grad,
inputs=tuple(i for i in static_input_surface if i.requires_grad),
grad_outputs=tuple(o for o in static_grad_outputs if o is not None),
only_inputs=True,
allow_unused=allow_unused_input,
)
# Constructs a tuple suitable for returning from Graphed.backward:
# Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad.
# I couldn't think of a slick one-liner for this pattern.
static_grad_inputs = []
grad_idx = 0
for arg in static_input_surface:
if arg.requires_grad and grad_inputs is not None:
static_grad_inputs.append(grad_inputs[grad_idx])
grad_idx += 1
else:
static_grad_inputs.append(None) # type: ignore[arg-type]
static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment]
per_callable_static_grad_outputs.append(static_grad_outputs)
per_callable_static_grad_inputs.append(static_grad_inputs)
# Reverses the most recent two lists
per_callable_static_grad_outputs.reverse()
per_callable_static_grad_inputs.reverse()
# Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable.
def make_graphed_autograd_function(
fwd_graph: CUDAGraph,
bwd_graph: CUDAGraph,
module_params: tuple[torch.nn.Parameter, ...],
len_user_args: int,
output_unflatten_spec: torch.utils._pytree.TreeSpec,
static_input_surface: tuple[Tensor, ...],
static_outputs: tuple[Tensor, ...],
static_grad_outputs: tuple[Optional[Tensor], ...],
static_grad_inputs: tuple[Tensor, ...],
) -> Callable[..., object]:
class Graphed(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx: object, *inputs: Tensor) -> tuple[Tensor, ...]:
# At this stage, only the user args may (potentially) be new tensors.
for i in range(len_user_args):
if static_input_surface[i].data_ptr() != inputs[i].data_ptr():
static_input_surface[i].copy_(inputs[i])
fwd_graph.replay()
assert isinstance(static_outputs, tuple)
return tuple(o.detach() for o in static_outputs)
@staticmethod
@torch.autograd.function.once_differentiable
# pyrefly: ignore [bad-override]
def backward(ctx: object, *grads: Tensor) -> tuple[Tensor, ...]:
assert len(grads) == len(static_grad_outputs)
for g, grad in zip(static_grad_outputs, grads):
if g is not None:
# don't copy if autograd gods have been kind and the
# incoming grad is already in the right place
if g.data_ptr() != grad.data_ptr():
g.copy_(grad)
bwd_graph.replay()
# Input args that didn't require grad expect a None gradient.
assert isinstance(static_grad_inputs, tuple)
return tuple(
# pyrefly: ignore [bad-argument-type]
b.detach() if b is not None else b
for b in static_grad_inputs
)
def functionalized(*user_args: object) -> object:
# Runs the autograd function with inputs == all inputs to the graph that might require grad
# (explicit user args + module parameters)
# Assumes module params didn't change since capture.
flatten_user_args = torch.utils._pytree.arg_tree_leaves(*user_args)
out = Graphed.apply(*(tuple(flatten_user_args) + module_params))
return torch.utils._pytree.tree_unflatten(out, output_unflatten_spec)
return functionalized
# Put together the final graphed callables
ret: list[_ModuleOrCallable] = []
for i, func in enumerate(callables):
graphed = make_graphed_autograd_function(
fwd_graphs[i],
bwd_graphs[i],
per_callable_module_params[i],
per_callable_len_user_args[i],
per_callable_output_unflatten_spec[i],
per_callable_static_input_surfaces[i],
per_callable_static_outputs[i],
per_callable_static_grad_outputs[i],
per_callable_static_grad_inputs[i],
)
if isinstance(func, torch.nn.Module):
def make_graphed_forward(
func: torch.nn.Module,
graph_training_state: bool,
graphed: Callable[_P, _R],
orig_fwd: Callable[_P, _R],
) -> Callable[_P, _R]:
def new_fwd(*user_args: _P.args, **user_kwargs: _P.kwargs) -> _R:
# If the module's training-or-eval state matches what we graphed,
# run the graph, otherwise run the original forward method
if func.training == graph_training_state:
return graphed(*user_args, **user_kwargs)
else:
return orig_fwd(*user_args, **user_kwargs)
return new_fwd
func.forward = make_graphed_forward(
func, func.training, graphed, func.forward
)
ret.append(func)
else:
ret.append(graphed)
if just_one_callable:
return ret[0]
return tuple(ret)
| graph |
python | ray-project__ray | doc/source/serve/doc_code/load_shedding.py | {
"start": 637,
"end": 1671
} | class ____:
async def do_request(self) -> int:
async with aiohttp.ClientSession("http://localhost:8000/") as session:
return (await session.get("/")).status
r = Requester.remote()
serve.run(SlowDeployment.bind())
# Send 4 requests first.
# 2 of these will be sent to the replica. These requests take a few seconds to execute.
first_refs = [r.do_request.remote() for _ in range(2)]
_, pending = ray.wait(first_refs, timeout=1)
assert len(pending) == 2
# 2 will be queued in the proxy.
queued_refs = [r.do_request.remote() for _ in range(2)]
_, pending = ray.wait(queued_refs, timeout=0.1)
assert len(pending) == 2
# Send an additional 5 requests. These will be rejected immediately because
# the replica and the proxy queue are already full.
for status_code in ray.get([r.do_request.remote() for _ in range(5)]):
assert status_code == 503
# The initial requests will finish successfully.
for ref in first_refs:
print(f"Request finished with status code {ray.get(ref)}.")
# __client_test_end__
| Requester |
python | getsentry__sentry | src/sentry/integrations/utils/atlassian_connect.py | {
"start": 571,
"end": 6175
} | class ____(Exception):
pass
def get_query_hash(
uri: str, method: str, query_params: Mapping[str, str | Sequence[str]] | None = None
) -> str:
# see
# https://developer.atlassian.com/static/connect/docs/latest/concepts/understanding-jwt.html#qsh
uri = uri.rstrip("/")
method = method.upper()
if query_params is None:
query_params = {}
sorted_query = []
for k, v in sorted(query_params.items()):
# don't include jwt query param
if k != "jwt":
if isinstance(v, str):
param_val = percent_encode(v)
else:
param_val = ",".join(percent_encode(val) for val in v)
sorted_query.append(f"{percent_encode(k)}={param_val}")
query_string = "{}&{}&{}".format(method, uri, "&".join(sorted_query))
return hashlib.sha256(query_string.encode("utf8")).hexdigest()
def get_token(request: HttpRequest) -> str:
try:
# request.headers = {"Authorization": "JWT abc123def456"}
auth_header: str = request.META["HTTP_AUTHORIZATION"]
return auth_header.split(" ", 1)[1]
except (KeyError, IndexError):
raise AtlassianConnectValidationError("Missing/Invalid authorization header")
def get_integration_from_jwt(
token: str | None,
path: str,
provider: str,
query_params: Mapping[str, str] | None,
method: str = "GET",
) -> RpcIntegration:
# https://developer.atlassian.com/static/connect/docs/latest/concepts/authentication.html
# Extract the JWT token from the request's jwt query
# parameter or the authorization header.
if token is None:
raise AtlassianConnectValidationError("No token parameter")
# Decode the JWT token, without verification. This gives
# you a header JSON object, a claims JSON object, and a signature.
claims = jwt.peek_claims(token)
headers = jwt.peek_header(token)
# Extract the issuer ('iss') claim from the decoded, unverified
# claims object. This is the clientKey for the tenant - an identifier
# for the Atlassian application making the call
issuer = claims.get("iss")
# Look up the sharedSecret for the clientKey, as stored
# by the add-on during the installation handshake
integration = integration_service.get_integration(provider=provider, external_id=issuer)
if not integration:
raise AtlassianConnectValidationError("No integration found")
# Verify the signature with the sharedSecret and the algorithm specified in the header's
# alg field. We only need the token + shared secret and do not want to provide an
# audience to the JWT validation that is require to match. Bitbucket does give us an
# audience claim however, so disable verification of this.
key_id = headers.get("kid")
try:
# We only authenticate asymmetrically (through the CDN) if the event provides a key ID
# in its JWT headers. This should only appear for install/uninstall events.
decoded_claims = (
authenticate_asymmetric_jwt(token, key_id)
if key_id
else jwt.decode(token, integration.metadata["shared_secret"], audience=False)
)
except InvalidSignatureError as e:
raise AtlassianConnectValidationError("Signature is invalid") from e
except ExpiredSignatureError as e:
raise AtlassianConnectValidationError("Signature is expired") from e
verify_claims(decoded_claims, path, query_params, method)
return integration
def verify_claims(
claims: Mapping[str, str],
path: str,
query_params: Mapping[str, str] | None,
method: str,
) -> None:
# Verify the query has not been tampered by Creating a Query Hash
# and comparing it against the qsh claim on the verified token.
qsh = get_query_hash(path, method, query_params)
if qsh != claims["qsh"]:
raise AtlassianConnectValidationError("Query hash mismatch")
def authenticate_asymmetric_jwt(token: str | None, key_id: str) -> dict[str, str]:
"""
Allows for Atlassian Connect installation lifecycle security improvements (i.e. verified senders)
See: https://community.developer.atlassian.com/t/action-required-atlassian-connect-installation-lifecycle-security-improvements/49046
"""
if token is None:
raise AtlassianConnectValidationError("No token parameter")
headers = jwt.peek_header(token)
key_response = requests.get(f"https://connect-install-keys.atlassian.com/{key_id}")
public_key = key_response.content.decode("utf-8").strip()
decoded_claims = jwt.decode(
token, public_key, audience=absolute_uri(), algorithms=[headers.get("alg")]
)
if not decoded_claims:
raise AtlassianConnectValidationError("Unable to verify asymmetric installation JWT")
return decoded_claims
def get_integration_from_request(request: HttpRequest, provider: str) -> RpcIntegration:
return get_integration_from_jwt(request.GET.get("jwt"), request.path, provider, request.GET)
@control_silo_function
def parse_integration_from_request(request: HttpRequest, provider: str) -> Integration | None:
token = (
get_token(request=request)
if request.META.get("HTTP_AUTHORIZATION") is not None
else request.GET.get("jwt")
)
rpc_integration = get_integration_from_jwt(
token=token,
path=request.path,
provider=provider,
query_params=request.GET,
method=request.method if request.method else "POST",
)
return Integration.objects.filter(id=rpc_integration.id).first()
| AtlassianConnectValidationError |
python | docker__docker-py | docker/types/services.py | {
"start": 32722,
"end": 33328
} | class ____(dict):
"""
Network attachment options for a service.
Args:
target (str): The target network for attachment.
Can be a network name or ID.
aliases (:py:class:`list`): A list of discoverable alternate names
for the service.
options (:py:class:`dict`): Driver attachment options for the
network target.
"""
def __init__(self, target, aliases=None, options=None):
self['Target'] = target
self['Aliases'] = aliases
self['DriverOpts'] = options
| NetworkAttachmentConfig |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 37930,
"end": 50217
} | class ____(KyutaiSpeechToTextPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = KyutaiSpeechToTextEmbeddings(config)
self.layers = nn.ModuleList(
[
KyutaiSpeechToTextDecoderLayer(config, layer_idx, use_flexible_linear=False)
for layer_idx in range(config.num_hidden_layers)
]
)
self.norm = KyutaiSpeechToTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = None
if attention_mask is not None:
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
# embed positions
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of KyutaiSpeechToText. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
)
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# StaticCache
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
config: KyutaiSpeechToTextConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`KyutaiSpeechToTextConfig`):
The model's configuration class
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
-1, 1
)
text_config = config.get_text_config()
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding)
if not is_static_sliding_cache or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
cache_position.reshape(-1, 1) - text_config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring
| KyutaiSpeechToTextModel |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/conv2d_test.py | {
"start": 3935,
"end": 4663
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Testing conversion of Conv2D (data_format=NCHW) in TF-TRT conversion."""
def GraphFn(self, inp):
np.random.seed(1234)
return build_graph(
inp=inp,
dtype=dtypes.float32,
num_filters=5,
data_format="channels_last",
kernel_sizes=[(3, 3), (3, 2)],
dilation_rates=[(1, 1), (2, 3)])
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[13, 7, 11, 3]],
[[13, 7, 11, 5]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000"]
| Conv2DNHWCTest |
python | pytorch__pytorch | torch/utils/data/datapipes/datapipe.py | {
"start": 16416,
"end": 17117
} | class ____(_DataPipeSerializationWrapper, IterDataPipe):
def __init__(self, datapipe: IterDataPipe[_T_co]) -> None:
super().__init__(datapipe)
# pyrefly: ignore [invalid-type-var]
self._datapipe_iter: Iterator[_T_co] | None = None
def __iter__(self) -> "_IterDataPipeSerializationWrapper":
self._datapipe_iter = iter(self._datapipe)
return self
def __next__(self) -> _T_co: # type: ignore[type-var]
if self._datapipe_iter is None:
raise AssertionError(
"Iterator has not been initialized; call __iter__() before __next__()"
)
return next(self._datapipe_iter)
| _IterDataPipeSerializationWrapper |
python | matplotlib__matplotlib | galleries/examples/widgets/annotated_cursor.py | {
"start": 877,
"end": 13331
} | class ____(Cursor):
"""
A crosshair cursor like `~matplotlib.widgets.Cursor` with a text showing \
the current coordinates.
For the cursor to remain responsive you must keep a reference to it.
The data of the axis specified as *dataaxis* must be in ascending
order. Otherwise, the `numpy.searchsorted` call might fail and the text
disappears. You can satisfy the requirement by sorting the data you plot.
Usually the data is already sorted (if it was created e.g. using
`numpy.linspace`), but e.g. scatter plots might cause this problem.
The cursor sticks to the plotted line.
Parameters
----------
line : `matplotlib.lines.Line2D`
The plot line from which the data coordinates are displayed.
numberformat : `python format string <https://docs.python.org/3/\
library/string.html#formatstrings>`_, optional, default: "{0:.4g};{1:.4g}"
The displayed text is created by calling *format()* on this string
with the two coordinates.
offset : (float, float) default: (5, 5)
The offset in display (pixel) coordinates of the text position
relative to the cross-hair.
dataaxis : {"x", "y"}, optional, default: "x"
If "x" is specified, the vertical cursor line sticks to the mouse
pointer. The horizontal cursor line sticks to *line*
at that x value. The text shows the data coordinates of *line*
at the pointed x value. If you specify "y", it works in the opposite
manner. But: For the "y" value, where the mouse points to, there might
be multiple matching x values, if the plotted function is not biunique.
Cursor and text coordinate will always refer to only one x value.
So if you use the parameter value "y", ensure that your function is
biunique.
Other Parameters
----------------
textprops : `matplotlib.text` properties as dictionary
Specifies the appearance of the rendered text object.
**cursorargs : `matplotlib.widgets.Cursor` properties
Arguments passed to the internal `~matplotlib.widgets.Cursor` instance.
The `matplotlib.axes.Axes` argument is mandatory! The parameter
*useblit* can be set to *True* in order to achieve faster rendering.
"""
def __init__(self, line, numberformat="{0:.4g};{1:.4g}", offset=(5, 5),
dataaxis='x', textprops=None, **cursorargs):
if textprops is None:
textprops = {}
# The line object, for which the coordinates are displayed
self.line = line
# The format string, on which .format() is called for creating the text
self.numberformat = numberformat
# Text position offset
self.offset = np.array(offset)
# The axis in which the cursor position is looked up
self.dataaxis = dataaxis
# First call baseclass constructor.
# Draws cursor and remembers background for blitting.
# Saves ax as class attribute.
super().__init__(**cursorargs)
# Default value for position of text.
self.set_position(self.line.get_xdata()[0], self.line.get_ydata()[0])
# Create invisible animated text
self.text = self.ax.text(
self.ax.get_xbound()[0],
self.ax.get_ybound()[0],
"0, 0",
animated=bool(self.useblit),
visible=False, **textprops)
# The position at which the cursor was last drawn
self.lastdrawnplotpoint = None
def onmove(self, event):
"""
Overridden draw callback for cursor. Called when moving the mouse.
"""
# Leave method under the same conditions as in overridden method
if self.ignore(event):
self.lastdrawnplotpoint = None
return
if not self.canvas.widgetlock.available(self):
self.lastdrawnplotpoint = None
return
# If the mouse left drawable area, we now make the text invisible.
# Baseclass will redraw complete canvas after, which makes both text
# and cursor disappear.
if event.inaxes != self.ax:
self.lastdrawnplotpoint = None
self.text.set_visible(False)
super().onmove(event)
return
# Get the coordinates, which should be displayed as text,
# if the event coordinates are valid.
plotpoint = None
if event.xdata is not None and event.ydata is not None:
# Get plot point related to current x position.
# These coordinates are displayed in text.
plotpoint = self.set_position(event.xdata, event.ydata)
# Modify event, such that the cursor is displayed on the
# plotted line, not at the mouse pointer,
# if the returned plot point is valid
if plotpoint is not None:
event.xdata = plotpoint[0]
event.ydata = plotpoint[1]
# If the plotpoint is given, compare to last drawn plotpoint and
# return if they are the same.
# Skip even the call of the base class, because this would restore the
# background, draw the cursor lines and would leave us the job to
# re-draw the text.
if plotpoint is not None and plotpoint == self.lastdrawnplotpoint:
return
# Baseclass redraws canvas and cursor. Due to blitting,
# the added text is removed in this call, because the
# background is redrawn.
super().onmove(event)
# Check if the display of text is still necessary.
# If not, just return.
# This behaviour is also cloned from the base class.
if not self.get_active() or not self.visible:
return
# Draw the widget, if event coordinates are valid.
if plotpoint is not None:
# Update position and displayed text.
# Position: Where the event occurred.
# Text: Determined by set_position() method earlier
# Position is transformed to pixel coordinates,
# an offset is added there and this is transformed back.
temp = [event.xdata, event.ydata]
temp = self.ax.transData.transform(temp)
temp = temp + self.offset
temp = self.ax.transData.inverted().transform(temp)
self.text.set_position(temp)
self.text.set_text(self.numberformat.format(*plotpoint))
self.text.set_visible(self.visible)
# Tell base class, that we have drawn something.
# Baseclass needs to know, that it needs to restore a clean
# background, if the cursor leaves our figure context.
self.needclear = True
# Remember the recently drawn cursor position, so events for the
# same position (mouse moves slightly between two plot points)
# can be skipped
self.lastdrawnplotpoint = plotpoint
# otherwise, make text invisible
else:
self.text.set_visible(False)
# Draw changes. Cannot use _update method of baseclass,
# because it would first restore the background, which
# is done already and is not necessary.
if self.useblit:
self.ax.draw_artist(self.text)
self.canvas.blit(self.ax.bbox)
else:
# If blitting is deactivated, the overridden _update call made
# by the base class immediately returned.
# We still have to draw the changes.
self.canvas.draw_idle()
def set_position(self, xpos, ypos):
"""
Finds the coordinates, which have to be shown in text.
The behaviour depends on the *dataaxis* attribute. Function looks
up the matching plot coordinate for the given mouse position.
Parameters
----------
xpos : float
The current x position of the cursor in data coordinates.
Important if *dataaxis* is set to 'x'.
ypos : float
The current y position of the cursor in data coordinates.
Important if *dataaxis* is set to 'y'.
Returns
-------
ret : {2D array-like, None}
The coordinates which should be displayed.
*None* is the fallback value.
"""
# Get plot line data
xdata = self.line.get_xdata()
ydata = self.line.get_ydata()
# The dataaxis attribute decides, in which axis we look up which cursor
# coordinate.
if self.dataaxis == 'x':
pos = xpos
data = xdata
lim = self.ax.get_xlim()
elif self.dataaxis == 'y':
pos = ypos
data = ydata
lim = self.ax.get_ylim()
else:
raise ValueError(f"The data axis specifier {self.dataaxis} should "
f"be 'x' or 'y'")
# If position is valid and in valid plot data range.
if pos is not None and lim[0] <= pos <= lim[-1]:
# Find closest x value in sorted x vector.
# This requires the plotted data to be sorted.
index = np.searchsorted(data, pos)
# Return none, if this index is out of range.
if index < 0 or index >= len(data):
return None
# Return plot point as tuple.
return (xdata[index], ydata[index])
# Return none if there is no good related point for this x position.
return None
def clear(self, event):
"""
Overridden clear callback for cursor, called before drawing the figure.
"""
# The base class saves the clean background for blitting.
# Text and cursor are invisible,
# until the first mouse move event occurs.
super().clear(event)
if self.ignore(event):
return
self.text.set_visible(False)
def _update(self):
"""
Overridden method for either blitting or drawing the widget canvas.
Passes call to base class if blitting is activated, only.
In other cases, one draw_idle call is enough, which is placed
explicitly in this class (see *onmove()*).
In that case, `~matplotlib.widgets.Cursor` is not supposed to draw
something using this method.
"""
if self.useblit:
super()._update()
fig, ax = plt.subplots(figsize=(8, 6))
ax.set_title("Cursor Tracking x Position")
x = np.linspace(-5, 5, 1000)
y = x**2
line, = ax.plot(x, y)
ax.set_xlim(-5, 5)
ax.set_ylim(0, 25)
# A minimum call
# Set useblit=True on most backends for enhanced performance
# and pass the ax parameter to the Cursor base class.
# cursor = AnnotatedCursor(line=lin[0], ax=ax, useblit=True)
# A more advanced call. Properties for text and lines are passed.
# Watch the passed color names and the color of cursor line and text, to
# relate the passed options to graphical elements.
# The dataaxis parameter is still the default.
cursor = AnnotatedCursor(
line=line,
numberformat="{0:.2f}\n{1:.2f}",
dataaxis='x', offset=[10, 10],
textprops={'color': 'blue', 'fontweight': 'bold'},
ax=ax,
useblit=True,
color='red',
linewidth=2)
# Simulate a mouse move to (-2, 10), needed for online docs
t = ax.transData
MouseEvent(
"motion_notify_event", ax.figure.canvas, *t.transform((-2, 10))
)._process()
plt.show()
# %%
# Trouble with non-biunique functions
# -----------------------------------
# A call demonstrating problems with the *dataaxis=y* parameter.
# The text now looks up the matching x value for the current cursor y position
# instead of vice versa. Hover your cursor to y=4. There are two x values
# producing this y value: -2 and 2. The function is only unique,
# but not biunique. Only one value is shown in the text.
fig, ax = plt.subplots(figsize=(8, 6))
ax.set_title("Cursor Tracking y Position")
line, = ax.plot(x, y)
ax.set_xlim(-5, 5)
ax.set_ylim(0, 25)
cursor = AnnotatedCursor(
line=line,
numberformat="{0:.2f}\n{1:.2f}",
dataaxis='y', offset=[10, 10],
textprops={'color': 'blue', 'fontweight': 'bold'},
ax=ax,
useblit=True,
color='red', linewidth=2)
# Simulate a mouse move to (-2, 10), needed for online docs
t = ax.transData
MouseEvent(
"motion_notify_event", ax.figure.canvas, *t.transform((-2, 10))
)._process()
plt.show()
| AnnotatedCursor |
python | sphinx-doc__sphinx | sphinx/domains/__init__.py | {
"start": 1759,
"end": 12447
} | class ____:
"""A Domain is meant to be a group of "object" description directives for
objects of a similar nature, and corresponding roles to create references to
them. Examples would be Python modules, classes, functions etc., elements
of a templating language, Sphinx roles and directives, etc.
Each domain has a separate storage for information about existing objects
and how to reference them in `self.data`, which must be a dictionary. It
also must implement several functions that expose the object information in
a uniform way to parts of Sphinx that allow the user to reference or search
for objects in a domain-agnostic way.
About `self.data`: since all object and cross-referencing information is
stored on a BuildEnvironment instance, the `domain.data` object is also
stored in the `env.domaindata` dict under the key `domain.name`. Before the
build process starts, every active domain is instantiated and given the
environment object; the `domaindata` dict must then either be nonexistent or
a dictionary whose 'version' key is equal to the domain class'
:attr:`data_version` attribute. Otherwise, `OSError` is raised and the
pickled environment is discarded.
"""
#: domain name: should be short, but unique
name: ClassVar[str] = ''
#: domain label: longer, more descriptive (used in messages)
label: ClassVar[str] = ''
#: type (usually directive) name -> ObjType instance
object_types: ClassVar[dict[str, ObjType]] = {}
#: directive name -> directive class
directives: ClassVar[dict[str, type[Directive]]] = {}
#: role name -> role callable
roles: ClassVar[dict[str, RoleFunction | XRefRole]] = {}
#: a list of Index subclasses
indices: ClassVar[list[type[Index]]] = []
#: role name -> a warning message if reference is missing
dangling_warnings: ClassVar[dict[str, str]] = {}
#: node_class -> (enum_node_type, title_getter)
enumerable_nodes: ClassVar[dict[type[Node], tuple[str, TitleGetter | None]]] = {}
#: data value for a fresh environment
initial_data: ClassVar[dict[str, Any]] = {}
#: data value
data: dict[str, Any]
#: data version, bump this when the format of `self.data` changes
data_version: ClassVar[int] = 0
def __init__(self, env: BuildEnvironment) -> None:
domain_data: dict[str, dict[str, Any]] = env.domaindata
self.env: BuildEnvironment = env
self._role_cache: dict[str, RoleFunction] = {}
self._directive_cache: dict[str, type[Directive]] = {}
self._role2type: dict[str, list[str]] = {}
self._type2role: dict[str, str] = {}
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types) # type: ignore[misc]
self.directives = dict(self.directives) # type: ignore[misc]
self.roles = dict(self.roles) # type: ignore[misc]
self.indices = list(self.indices) # type: ignore[misc]
if self.name not in domain_data:
assert isinstance(self.initial_data, dict)
new_data = copy.deepcopy(self.initial_data)
new_data['version'] = self.data_version
self.data = domain_data[self.name] = new_data
else:
self.data = domain_data[self.name]
if self.data['version'] != self.data_version:
raise OSError('data of %r domain out of date' % self.label)
for name, obj in self.object_types.items():
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
self.objtypes_for_role = self._role2type.get
self.role_for_objtype = self._type2role.get
def setup(self) -> None:
"""Set up domain object."""
# Add special hyperlink target for index pages (ex. py-modindex)
std = self.env.domains.standard_domain
for index in self.indices:
if index.name and index.localname:
docname = f'{self.name}-{index.name}'
std.note_hyperlink_target(docname, docname, '', index.localname)
def add_object_type(self, name: str, objtype: ObjType) -> None:
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
self._type2role[name] = objtype.roles[0]
else:
self._type2role[name] = ''
for role in objtype.roles:
self._role2type.setdefault(role, []).append(name)
def role(self, name: str) -> RoleFunction | None:
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
return self._role_cache[name]
if name not in self.roles:
return None
fullname = f'{self.name}:{name}'
def role_adapter(
typ: str,
rawtext: str,
text: str,
lineno: int,
inliner: Inliner,
options: dict[str, Any] | None = None,
content: Sequence[str] = (),
) -> tuple[list[Node], list[nodes.system_message]]:
return self.roles[name](
fullname, rawtext, text, lineno, inliner, options or {}, content
)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name: str) -> type[Directive] | None:
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
return self._directive_cache[name]
if name not in self.directives:
return None
fullname = f'{self.name}:{name}'
BaseDirective = self.directives[name]
class DirectiveAdapter(BaseDirective): # type: ignore[valid-type,misc]
def run(self) -> list[Node]:
self.name = fullname
return super().run()
self._directive_cache[name] = DirectiveAdapter
return DirectiveAdapter
# methods that should be overwritten
def clear_doc(self, docname: str) -> None:
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames: Set[str], otherdata: dict[str, Any]) -> None:
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
msg = (
f'merge_domaindata must be implemented in {self.__class__} '
'to be able to do parallel builds!'
)
raise NotImplementedError(msg)
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
"""Process a document after it is read by the environment."""
pass
def check_consistency(self) -> None:
"""Do consistency checks (**experimental**)."""
pass
def process_field_xref(self, pnode: pending_xref) -> None:
"""Process a pending xref created in a doc field.
For example, attach information about the current scope.
"""
pass
def resolve_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
typ: str,
target: str,
node: pending_xref,
contnode: Element,
) -> nodes.reference | None:
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the :event:`missing-reference` event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the :event:`missing-reference` event being emitted.
"""
pass
def resolve_any_xref(
self,
env: BuildEnvironment,
fromdocname: str,
builder: Builder,
target: str,
node: pending_xref,
contnode: Element,
) -> list[tuple[str, nodes.reference]]:
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
don't know the type. Otherwise, the arguments are the same as for
:meth:`resolve_xref`.
The method must return a list (potentially empty) of tuples
``('domain:role', newnode)``, where ``'domain:role'`` is the name of a
role that could have created the same reference, e.g. ``'py:func'``.
``newnode`` is what :meth:`resolve_xref` would return.
.. versionadded:: 1.3
"""
raise NotImplementedError
def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]:
"""Return an iterable of "object descriptions".
Object descriptions are tuples with six items:
``name``
Fully qualified name.
``dispname``
Name to display when searching/linking.
``type``
Object type, a key in ``self.object_types``.
``docname``
The document where it is to be found.
``anchor``
The anchor name for the object.
``priority``
How "important" the object is (determines placement in search
results). One of:
``1``
Default priority (placed before full-text matches).
``0``
Object is important (placed before default-priority objects).
``2``
Object is unimportant (placed after full-text matches).
``-1``
Object should not show up in search at all.
"""
return []
def get_type_name(self, type: ObjType, primary: bool = False) -> str:
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node: Node) -> str | None:
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node: Element) -> str | None:
"""Return full qualified name for given node."""
pass
| Domain |
python | PrefectHQ__prefect | src/prefect/filesystems.py | {
"start": 18280,
"end": 21751
} | class ____(WritableFileSystem, WritableDeploymentStorage):
"""
Store data as a file on a SMB share.
Example:
Load stored SMB config:
```python
from prefect.filesystems import SMB
smb_block = SMB.load("BLOCK_NAME")
```
"""
_block_type_name = "SMB"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/3f624663f7beb97d011d011bffd51ecf6c499efc-195x195.png"
_documentation_url = (
"https://docs.prefect.io/latest/develop/results#specifying-a-default-filesystem"
)
share_path: str = Field(
default=...,
description="SMB target (requires <SHARE>, followed by <PATH>).",
examples=["/SHARE/dir/subdir"],
)
smb_username: Optional[SecretStr] = Field(
default=None,
title="SMB Username",
description="Username with access to the target SMB SHARE.",
)
smb_password: Optional[SecretStr] = Field(
default=None, title="SMB Password", description="Password for SMB access."
)
smb_host: str = Field(
default=..., title="SMB server/hostname", description="SMB server/hostname."
)
smb_port: Optional[int] = Field(
default=None, title="SMB port", description="SMB port (default: 445)."
)
_remote_file_system: RemoteFileSystem = None
@property
def basepath(self) -> str:
return f"smb://{self.smb_host.rstrip('/')}/{self.share_path.lstrip('/')}"
@property
def filesystem(self) -> RemoteFileSystem:
settings = {}
if self.smb_username:
settings["username"] = self.smb_username.get_secret_value()
if self.smb_password:
settings["password"] = self.smb_password.get_secret_value()
if self.smb_host:
settings["host"] = self.smb_host
if self.smb_port:
settings["port"] = self.smb_port
self._remote_file_system = RemoteFileSystem(
basepath=f"smb://{self.smb_host.rstrip('/')}/{self.share_path.lstrip('/')}",
settings=settings,
)
return self._remote_file_system
@sync_compatible
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> bytes:
"""
Downloads a directory from a given remote path to a local directory.
Defaults to downloading the entire contents of the block's basepath to the current working directory.
"""
return await self.filesystem.get_directory(
from_path=from_path, local_path=local_path
)
@sync_compatible
async def put_directory(
self,
local_path: Optional[str] = None,
to_path: Optional[str] = None,
ignore_file: Optional[str] = None,
) -> int:
"""
Uploads a directory from a given local path to a remote directory.
Defaults to uploading the entire contents of the current working directory to the block's basepath.
"""
return await self.filesystem.put_directory(
local_path=local_path,
to_path=to_path,
ignore_file=ignore_file,
overwrite=False,
)
@sync_compatible
async def read_path(self, path: str) -> bytes:
return await self.filesystem.read_path(path)
@sync_compatible
async def write_path(self, path: str, content: bytes) -> str:
return await self.filesystem.write_path(path=path, content=content)
| SMB |
python | pytorch__pytorch | torch/distributed/checkpoint/planner.py | {
"start": 10408,
"end": 16263
} | class ____:
"""
Abstract class defining the protocol used by load_state_dict to plan the load process.
LoadPlanner are stateful objects that can be used to customize the whole load process.
LoadPlanner acts as an access proxy to the state_dict, so any transformation done to it
will be visible to the whole process.
A planner subclass can expect the following sequence of calls during load_state_dict:
1) set_up_planner - called on all ranks.
Signals the start of loading a checkpoint.
2) create_local_plan - called on all ranks.
Process the state_dict and produces a `LoadPlan` that will be sent for global planning.
3) create_global_plan - called on the coordinator rank only.
Takes the LoadPlan from all ranks and make any global decision.
4) load_bytes - called multiple times on each rank
This is called once per non-tensor value in state_dict.
5) resolve_tensor and commit_tensor - called multiple times on each rank
They are called in pair for each Tensor value in state_dict.
Users are recommended to extend DefaultLoadPlanner instead of this interface directly as
most changes can be expressed by changes in a single method.
There are two usual patterns of extension:
Rewriting state_dict. This is the simplest way to extend the load process as it
doesn't requite understanding the intrincacies of how LoadPlan works. We need
to keep a reference to the original state_dict as load happens in place so
we need to be able to perform it in place
>>> # xdoctest: +SKIP("undefined vars")
>>> class RenamePlanner(DefaultLoadPlanner):
>>> def set_up_planner(
>>> self,
>>> state_dict: STATE_DICT_TYPE,
>>> metadata: Metadata,
>>> is_coordinator: bool,
>>> ) -> None:
>>> self.original_state_dict = state_dict
>>> state_dict = {"foo_" + k: v for k, v in state_dict.items()}
>>>
>>> if self.flatten_sharded_tensors:
>>> state_dict = _flatten_sharded_tensors(state_dict)
>>>
>>> if self.flatten_state_dict:
>>> state_dict, self.mappings = flatten_state_dict(state_dict)
>>>
>>> self.state_dict = state_dict
>>> self.metadata = metadata
>>> self.is_coordinator = is_coordinator
>>>
>>> def load_bytes(self, read_item, value):
>>> # Remove the "foo_" prefix
>>> self.original_state_dict[read_item.dest_index.fqn[4:]] = torch.load(value, weights_only=False)
Modifying resolve_tensor and commit_tensor to handle load time transformation.
>>> # xdoctest: +SKIP("undefined vars")
>>> class MetaModelMaterialize(DefaultSavePlanner):
>>> def resolve_tensor(self, read_item):
>>> tensor = super().resolve_tensor(read_item)
>>> return torch.empty_like(tensor, device="cpu")
>>>
>>> def commit_tensor(self, read_item, tensor):
>>> self.state_dict[read_item.dest_index.fqn] = tensor
"""
@abc.abstractmethod
def set_up_planner(
self,
state_dict: STATE_DICT_TYPE,
metadata: Optional[Metadata] = None,
is_coordinator: bool = False,
) -> None:
"""
Initialize this instance to load data into ``state_dict``.
. N.B. This is called on every rank.
"""
@abc.abstractmethod
def create_local_plan(self) -> LoadPlan:
"""
Create a LoadPlan based on state_dict and metadata provided by set_up_planner.
. N.B. This is called on every rank.
"""
@abc.abstractmethod
def create_global_plan(self, global_plan: list[LoadPlan]) -> list[LoadPlan]:
"""
Compute the global load plan and return plans for each rank.
. N.B. This is called on the coordinator rank only
"""
@abc.abstractmethod
def finish_plan(self, central_plan: LoadPlan) -> LoadPlan:
"""Accept the plan from coordinator and return final LoadPlan."""
@abc.abstractmethod
def load_bytes(self, read_item: ReadItem, value: io.BytesIO) -> None:
"""
Load the item described by ``read_item``and ``value``.
This method is expected to modify in-place the underlying state_dict.
The contents of ``value`` are defined by the SavePlanner used to produce
the checkpoint being loaded.
"""
def resolve_bytes(self, read_item: ReadItem) -> io.BytesIO:
"""
Return the BytesIO to be used by the StorageReader to load `read_item`.
The BytesIO should alias with one on the underlying state_dict as StorageReader will replace its contents.
"""
raise NotImplementedError("LoadPlanner.resolve_bytes is not implemented")
@abc.abstractmethod
def resolve_tensor(self, read_item: ReadItem) -> torch.Tensor:
"""
Return the tensor described by ``read_item`` to be used by the StorageReader to load `read_item`.
The tensor should alias with one on the underlying state_dict as StorageReader will replace its contents.
If, for any reason, that's not possible, the planner can use the ``commit_tensor`` method to copy the data
back to the one in state_dict.
"""
@abc.abstractmethod
def commit_tensor(self, read_item: ReadItem, tensor: torch.Tensor) -> None:
"""
Call once the StorageReader finished loading data into ``tensor``.
The provided tensor is the same one returned by the call to ``resolve_tensor``.
This method is only needed if this LoadPlanner needs to post process ``tensor`` prior to
copying it back to the one in the state_dict.
The contents of tensor will follow its device synchronization model.
"""
| LoadPlanner |
python | jupyterlab__jupyterlab | jupyterlab/handlers/error_handler.py | {
"start": 463,
"end": 831
} | class ____(ExtensionHandlerMixin, JupyterHandler):
def initialize(self, messages=None, name=None):
super().initialize(name=name)
self.messages = messages
@web.authenticated
@web.removeslash
def get(self):
msgs = [f"<h2>{msg}</h2>" for msg in self.messages]
self.write(TEMPLATE.format(messages="\n".join(msgs)))
| ErrorHandler |
python | pytorch__pytorch | test/test_cuda_multigpu.py | {
"start": 50725,
"end": 70835
} | class ____(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if (
input.is_cuda and input.get_device() == i
): # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [
torch.empty_like(input, device=0),
torch.empty_like(input, device=1),
]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(
RuntimeError, r"Exactly one of 'devices' and 'out'"
):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(
RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1",
):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(
RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1",
):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
self.genSparseTensor((2, 3), 2, 1, False, "cuda", torch.float64)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
self.genSparseTensor((2, 3), 2, 10, False, "cuda", torch.float64)[0],
self.genSparseTensor((2, 3), 2, 5, False, "cuda", torch.float64)[0],
self.genSparseTensor((3, 3), 2, 7, False, "cuda", torch.int64)[0],
self.genSparseTensor((2, 3), 2, 2, False, "cuda", torch.float32)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
self.genSparseTensor((2, 7), 2, 3, False, "cuda", torch.int64)[0],
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda(),
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r.coalesce() if r.is_sparse else r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
self.genSparseTensor((2, 3), 2, 1, False, "cuda", torch.float64)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
self.genSparseTensor((2, 3), 2, 10, False, "cuda", torch.float64)[0],
self.genSparseTensor((2, 3), 2, 5, False, "cuda", torch.float64)[0],
self.genSparseTensor((3, 3), 2, 7, False, "cuda", torch.int64)[0],
self.genSparseTensor((2, 3), 2, 2, False, "cuda", torch.float32)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
self.genSparseTensor((2, 7), 2, 3, False, "cuda", torch.int64)[0],
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(
r.data_ptr(), input.data_ptr()
) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(
RuntimeError, r"Expected devices and chunk_sizes to be of same length"
):
comm.scatter(
input,
[0 for _ in range(len(chunk_sizes) + 1)],
dim=dim,
chunk_sizes=chunk_sizes,
)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(
RuntimeError, r"Expected at least one device to scatter to"
):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(
RuntimeError, r"Expected at least one output tensor to scatter to"
):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(
RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0",
):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(
RuntimeError, r"Output tensor at index 0 has incorrect shape"
):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(
RuntimeError,
r"Total size for output tensors along scatter dim \d+ does not match",
):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device("cuda:0"), torch.device("cpu")]
if torch.cuda.device_count() > 2:
destinations.append(torch.device("cuda:2"))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device("cuda", torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(
RuntimeError, r"'destination' must not be specified"
):
comm.gather(
(x, y),
dim,
destination="cpu",
out=torch.empty(expected_size, device="cpu"),
)
with self.assertRaisesRegex(
RuntimeError, r"Expected at least one tensor to gather from"
):
comm.gather(())
with self.assertRaisesRegex(
RuntimeError, r"Expected all input tensors to be CUDA tensors, "
):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(
RuntimeError,
r"Expected all input tensors to have the same number of dimensions",
):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(
RuntimeError, r"Input tensor at index 1 has invalid shape"
):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device="cpu").contiguous(
memory_format=torch.channels_last
)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ["a", "b"]
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, "cpu") # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to("cpu"), outputs[1][i].to("cpu")))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, "cpu") # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to("cpu"), outputs[1][i].to("cpu")))
self.assertTrue(torch.equal(x, cat))
instantiate_parametrized_tests(TestCudaMultiGPU)
if __name__ == "__main__":
run_tests()
| TestCudaComm |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_test.py | {
"start": 2450,
"end": 3947
} | class ____(linalg.LinearOperator):
"""LinearOperator that wraps a [batch] matrix and implements matmul/solve."""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
parameters = dict(
matrix=matrix,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square
)
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
super(LinearOperatorMatmulSolve, self).__init__(
dtype=self._matrix.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters)
def _shape(self):
return self._matrix.shape
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = ops.convert_to_tensor(x, name="x")
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = ops.convert_to_tensor(rhs, name="rhs")
assert not adjoint_arg, "Not implemented for this test class."
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
@test_util.run_all_in_graph_and_eager_modes
| LinearOperatorMatmulSolve |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_unittest/test_assertions.py | {
"start": 1463,
"end": 6472
} | class ____(__TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assertRaises_frames_survival(self):
# Issue #9815: assertRaises should avoid keeping local variables
# in a traceback alive.
class A:
pass
wr = None
class Foo(unittest.TestCase):
def foo(self):
nonlocal wr
a = A()
wr = weakref.ref(a)
try:
raise OSError
except OSError:
raise ValueError
def test_functional(self):
self.assertRaises(ValueError, self.foo)
def test_with(self):
with self.assertRaises(ValueError):
self.foo()
Foo("test_functional").run()
gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
Foo("test_with").run()
gc_collect() # For PyPy or other GCs.
self.assertIsNone(wr())
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
| Test_Assertions |
python | ray-project__ray | python/ray/llm/_internal/serve/core/ingress/ingress.py | {
"start": 11006,
"end": 24443
} | class ____(DeploymentProtocol):
def __init__(
self,
llm_deployments: List[DeploymentHandle],
*,
_get_lora_model_metadata_func: Optional[
Callable[[str, LLMConfig], Awaitable[Dict[str, Any]]]
] = None,
):
self._default_serve_handles: Dict[str, DeploymentHandle] = {}
self._llm_configs: Dict[str, LLMConfig] = {}
# Configuring a ServeHandle with .options() creates a new ServeHandle
# object, which contains a new metrics pusher and long-polling call.
# Creating too many ServeHandles can impact event-loop and Serve Controller
# performance, so we save configured ServeHandles here and reuse them.
self._configured_serve_handles: Dict[str, DeploymentHandle] = {}
self._get_lora_model_metadata_func = (
_get_lora_model_metadata_func or self._default_get_lora_model_metadata_func
)
# Setup _default_serve_handles and _llm_configs asynchronously.
self._init_completed = asyncio.Event()
self.running_setup_task = get_or_create_event_loop().create_task(
self._setup_handle_and_config_maps(llm_deployments=llm_deployments)
)
async def _default_get_lora_model_metadata_func(
self, model_id: str, llm_config: LLMConfig
) -> Dict[str, Any]:
return await get_lora_model_metadata(model_id, llm_config)
async def _setup_handle_and_config_maps(
self, llm_deployments: List[DeploymentHandle]
):
for handle in llm_deployments:
llm_config = await handle.llm_config.remote()
self._default_serve_handles[llm_config.model_id] = handle
self._llm_configs[llm_config.model_id] = llm_config
# Note (genesu): Even though we have already checked model id uniqueness in
# `router_application()` under run.py. When we OSS this router component, users
# would be able to directly use the lower level api and bypass that check. We
# check it again here to ensure all the model ids are unique.
if len(llm_deployments) != len(self._llm_configs):
raise ValueError("Duplicate models found. Make sure model ids are unique.")
self._init_completed.set()
async def check_health(self):
await self._init_completed.wait()
def _get_configured_serve_handle(self, model_id: str):
"""Gets a ServeHandle to a model deployment.
Configures the handle's options, and stores it in a cache.
If the model_id includes LoRA suffix, we set the model ID as
the multiplexed_model_id, so the request uses Serve's multiplexed
routing logic.
If the model_id is a base model- even if the model has LoRA
adapters- we don't set multiplexed_model_id. Setting
multiplexed_model_id would cause base model requests to be
sent to a single model replica, instead of being load
balanced across all replicas. This is undesirable for base
model requests (unlike LoRA requests) because all the replicas
have a copy of the base model.
"""
if model_id not in self._configured_serve_handles:
base_model_id = get_base_model_id(model_id)
if base_model_id in self._default_serve_handles:
if model_id == base_model_id:
default_handle = self._default_serve_handles[model_id]
configured_handle = default_handle.options(stream=True)
self._configured_serve_handles[model_id] = configured_handle
else:
default_handle = self._default_serve_handles[base_model_id]
configured_handle = default_handle.options(
stream=True,
multiplexed_model_id=model_id,
)
self._configured_serve_handles[model_id] = configured_handle
else:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
f'Could not find model with id "{model_id}".',
)
return self._configured_serve_handles[model_id]
async def _get_response(
self,
*,
body: Union[
CompletionRequest,
ChatCompletionRequest,
EmbeddingRequest,
TranscriptionRequest,
ScoreRequest,
],
call_method: str,
) -> AsyncGenerator[
Union[
LLMChatResponse,
LLMCompletionsResponse,
LLMEmbeddingsResponse,
LLMTranscriptionResponse,
LLMScoreResponse,
],
None,
]:
"""Calls the model deployment and returns the stream."""
model: str = body.model
base_model_id = get_base_model_id(model)
if base_model_id not in self._llm_configs:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
f'Got request for model "{model}". '
f'Could not find base model with ID "{base_model_id}".',
)
model_handle = self._get_configured_serve_handle(model)
# TODO(seiji): Remove when we update to Pydantic v2.11+ with the fix
# for tool calling ValidatorIterator serialization issue.
if isinstance(body, ChatCompletionRequest):
body = _sanitize_chat_completion_request(body)
async for response in getattr(model_handle, call_method).remote(body):
yield response
async def model(self, model_id: str) -> Optional[ModelCard]:
if model_id in self._llm_configs:
return to_model_metadata(model_id, self._llm_configs[model_id])
base_model_id = get_base_model_id(model_id)
if (
base_model_id in self._llm_configs
and self._llm_configs[base_model_id].lora_config
):
try:
overrides = await self._get_lora_model_metadata_func(
model_id, self._llm_configs[base_model_id]
)
return to_model_metadata(
model_id=model_id,
model_config=self._llm_configs[base_model_id],
overrides=overrides,
)
except HTTPException:
logger.exception(
"Unable to retrieve LoRA adapter config file for "
f'"{model_id}". Omitting it from list of available models. '
"Check that adapter config file exists in cloud bucket."
)
async def models(self) -> ModelList:
"""OpenAI API-compliant endpoint to get all rayllm models."""
all_models = dict()
for base_model_id, llm_config in self._llm_configs.items():
# Add the base model.
all_models[base_model_id] = await self.model(base_model_id)
if llm_config.lora_config is not None:
# Add all the fine-tuned models.
lora_model_ids = get_lora_model_ids(
dynamic_lora_loading_path=llm_config.lora_config.dynamic_lora_loading_path,
base_model_id=base_model_id,
)
for lora_id in lora_model_ids:
model_data = await self.model(lora_id)
if model_data is not None:
all_models[lora_id] = model_data
return ModelList(data=list(all_models.values()))
async def model_data(self, model: str) -> ModelCard:
"""OpenAI API-compliant endpoint to get one rayllm model.
:param model: The model ID (e.g. "amazon/LightGPT")
"""
model = replace_prefix(model)
model_data = await self.model(model)
if model_data is None:
raise OpenAIHTTPException(
message=f"Unable to find {model}. Please ensure that the model exists and you have permission.",
status_code=status.HTTP_404_NOT_FOUND,
type="InvalidModel",
)
return model_data
async def _process_llm_request(
self,
body: Union[CompletionRequest, ChatCompletionRequest, TranscriptionRequest],
call_method: str,
) -> Response:
async with router_request_timeout(DEFAULT_LLM_ROUTER_HTTP_TIMEOUT):
gen = self._get_response(body=body, call_method=call_method)
# In streaming with batching enabled, this first response can be a list of chunks.
initial_response, gen = await _peek_at_generator(gen)
if isinstance(initial_response, list):
first_chunk = initial_response[0]
else:
first_chunk = initial_response
if isinstance(first_chunk, ErrorResponse):
raise OpenAIHTTPException(
message=first_chunk.error.message,
status_code=first_chunk.error.code,
type=first_chunk.error.type,
)
if isinstance(first_chunk, NON_STREAMING_RESPONSE_TYPES):
# Not streaming, first chunk should be a single response
return JSONResponse(content=first_chunk.model_dump())
# In case of streaming we need to iterate over the chunks and yield them
openai_stream_generator = _openai_json_wrapper(gen)
return StreamingResponse(
openai_stream_generator, media_type="text/event-stream"
)
async def completions(self, body: CompletionRequest) -> Response:
"""Given a prompt, the model will return one or more predicted completions,
and can also return the probabilities of alternative tokens at each position.
Args:
body: The CompletionRequest object.
Returns:
A response object with completions.
"""
return await self._process_llm_request(
body, call_method=CallMethod.COMPLETIONS.value
)
async def chat(self, body: ChatCompletionRequest) -> Response:
"""Given a prompt, the model will return one or more predicted completions,
and can also return the probabilities of alternative tokens at each position.
Args:
body: The ChatCompletionRequest object.
Returns:
A response object with completions.
"""
return await self._process_llm_request(body, call_method=CallMethod.CHAT.value)
async def embeddings(self, body: EmbeddingRequest) -> Response:
"""Create embeddings for the provided input.
Args:
body: The EmbeddingRequest object.
Returns:
A response object with embeddings.
"""
async with router_request_timeout(DEFAULT_LLM_ROUTER_HTTP_TIMEOUT):
results = self._get_response(body=body, call_method="embeddings")
result = await results.__anext__()
if isinstance(result, ErrorResponse):
raise OpenAIHTTPException(
message=result.error.message,
status_code=result.error.code,
type=result.error.type,
)
if isinstance(result, EmbeddingResponse):
return JSONResponse(content=result.model_dump())
# Annotated[..., Form()] is wrapper that is used to handle multiple form data, which is how audio is sent in transcription requests.
# vLLM implementation for handling transcription requests: https://github.com/vllm-project/vllm/blob/0825197bee8dea547f2ab25f48afd8aea0cd2578/vllm/entrypoints/openai/api_server.py#L839.
async def transcriptions(
self, body: Annotated[TranscriptionRequest, Form()]
) -> Response:
"""Create transcription for the provided audio input.
Args:
body: The TranscriptionRequest object.
Returns:
A response object with transcriptions.
"""
return await self._process_llm_request(
body, call_method=CallMethod.TRANSCRIPTIONS.value
)
async def score(self, body: ScoreRequest) -> Response:
"""Create scores for the provided text pairs.
Note: This is a vLLM specific endpoint.
Args:
body: The score request containing input text pairs to score.
Returns:
A response object with scores.
"""
async with router_request_timeout(DEFAULT_LLM_ROUTER_HTTP_TIMEOUT):
results = self._get_response(body=body, call_method="score")
result = await results.__anext__()
if isinstance(result, ErrorResponse):
raise OpenAIHTTPException(
message=result.message,
status_code=result.code,
type=result.type,
)
if isinstance(result, ScoreResponse):
return JSONResponse(content=result.model_dump())
@classmethod
def get_deployment_options(
cls, llm_configs: Optional[List[LLMConfig]] = None
) -> Dict[str, Any]:
"""Get the deployment options for the ingress deployment.
Args:
llm_configs: The LLM configs to infer the number of ingress replicas from.
Returns:
A dictionary containing the deployment options for the ingress deployment.
"""
return DEFAULT_INGRESS_OPTIONS
| OpenAiIngress |
python | tensorflow__tensorflow | tensorflow/python/autograph/core/converter_test.py | {
"start": 1157,
"end": 1998
} | class ____(converter_testing.TestCase):
def test_to_ast(self):
opts = converter.ConversionOptions()
opts_ast = opts.to_ast()
template = '''
def f():
return opts_ast
'''
opts_packed = templates.replace(template, opts_ast=opts_ast)
reparsed, _, _ = loader.load_ast(opts_packed)
fake_ag = types.ModuleType('fake_ag')
fake_ag.ConversionOptions = converter.ConversionOptions
fake_ag.Feature = converter.Feature
reparsed.ag__ = fake_ag
reparsed_opts = reparsed.f()
self.assertEqual(opts.recursive, reparsed_opts.recursive)
self.assertEqual(opts.user_requested, False)
self.assertEqual(
opts.internal_convert_user_code,
reparsed_opts.internal_convert_user_code)
self.assertEqual(opts.optional_features, reparsed_opts.optional_features)
| ConversionOptionsTest |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-tldw/llama_index/retrievers/tldw/base.py | {
"start": 969,
"end": 2887
} | class ____(BaseRetriever):
r"""
A retriever that searches for relevant video moments from the TL;DW collection.
Args:
api_key (str): The API key for authentication.
collection_id (str): The ID of the video collection to search within.
callback_manager (Optional[CallbackManager]): Optional callback manager for logging and event handling.
"""
def __init__(
self,
api_key: str,
collection_id: str,
callback_manager: Optional[CallbackManager] = None,
) -> None:
self._api_key = api_key
self._collection_id = collection_id
super().__init__(
callback_manager=callback_manager,
)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
headers = {
"Authorization": f"Bearer {self._api_key}",
}
res = requests.post(
f"{API_ENDPOINT}/search",
headers=headers,
json={
"collection_id": self._collection_id,
"search_term": query_bundle.query_str,
},
)
search_results = SearchResult.model_validate(res.json())
# Return individual fragments as nodes
return [
NodeWithScore(
node=TextNode(
text=fragment.description,
metadata={
"scene_index": idx,
"media_id": scene.media_id,
"start_ms": fragment.start_ms,
"end_ms": fragment.end_ms,
"scene_start_ms": scene.start_ms,
"scene_end_ms": scene.end_ms,
},
),
score=fragment.similarity,
)
for idx, scene in enumerate(search_results.scenes)
for fragment in scene.fragments
]
| TldwRetriever |
python | ray-project__ray | python/ray/tune/tests/execution/utils.py | {
"start": 661,
"end": 933
} | class ____(FixedResourceManager):
def __init__(self, total_resources: Dict[str, float]):
self._allow_strict_pack = True
self._total_resources = total_resources
self._requested_resources = []
self._used_resources = []
| BudgetResourceManager |
python | apache__airflow | providers/qdrant/src/airflow/providers/qdrant/operators/qdrant.py | {
"start": 1348,
"end": 4122
} | class ____(BaseOperator):
"""
Upload points to a Qdrant collection.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:QdrantIngestOperator`
:param conn_id: The connection id to connect to a Qdrant instance.
:param collection_name: The name of the collection to ingest data into.
:param vectors: An iterable over vectors to upload.
:param payload: Iterable of vector payloads, Optional. Defaults to None.
:param ids: Iterable of custom vector ids, Optional. Defaults to None.
:param batch_size: Number of points to upload per-request. Defaults to 64.
:param parallel: Number of parallel upload processes. Defaults to 1.
:param method: Start method for parallel processes. Defaults to 'forkserver'.
:param max_retries: Number of retries for failed requests. Defaults to 3.
:param wait: Await for the results to be applied on the server side. Defaults to True.
:param kwargs: Additional keyword arguments passed to the BaseOperator constructor.
"""
template_fields: Sequence[str] = (
"collection_name",
"vectors",
"payload",
"ids",
"batch_size",
"parallel",
"method",
"max_retries",
"wait",
)
def __init__(
self,
*,
conn_id: str = QdrantHook.default_conn_name,
collection_name: str,
vectors: Iterable[VectorStruct],
payload: Iterable[dict[str, Any]] | None = None,
ids: Iterable[int | str] | None = None,
batch_size: int = 64,
parallel: int = 1,
method: str | None = None,
max_retries: int = 3,
wait: bool = True,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.collection_name = collection_name
self.vectors = vectors
self.payload = payload
self.ids = ids
self.batch_size = batch_size
self.parallel = parallel
self.method = method
self.max_retries = max_retries
self.wait = wait
@cached_property
def hook(self) -> QdrantHook:
"""Return an instance of QdrantHook."""
return QdrantHook(conn_id=self.conn_id)
def execute(self, context: Context) -> None:
"""Upload points to a Qdrant collection."""
self.hook.conn.upload_collection(
collection_name=self.collection_name,
vectors=self.vectors,
payload=self.payload,
ids=self.ids,
batch_size=self.batch_size,
parallel=self.parallel,
method=self.method,
max_retries=self.max_retries,
wait=self.wait,
)
| QdrantIngestOperator |
python | justquick__django-activity-stream | runtests/testapp/tests/test_drf.py | {
"start": 391,
"end": 908
} | class ____(BaseDRFTestCase):
def test_urls(self):
self._check_urls('actions', 'follows', 'groups', 'sites',
'players', 'nested-models', 'my-users')
def test_serializers(self):
models = (Group, MyUser, Player, Site, NestedModel)
self.assertSetEqual(serializers.keys(), models, domap=False)
groups = self.get(reverse('group-list'))
assert len(groups) == 2
self.assertSetEqual(GroupSerializer.Meta.fields, groups[0].keys())
| DRFTestAppTests |
python | mlflow__mlflow | dev/set_matrix.py | {
"start": 26811,
"end": 28581
} | class ____(json.JSONEncoder):
def default(self, o):
if isinstance(o, MatrixItem):
return o.model_dump(exclude_none=True)
elif isinstance(o, Version):
return str(o)
return super().default(o)
def set_action_output(name, value):
with open(os.getenv("GITHUB_OUTPUT"), "a") as f:
f.write(f"{name}={value}\n")
def split(matrix, n):
grouped_by_name = defaultdict(list)
for item in matrix:
grouped_by_name[item.name].append(item)
num = len(matrix) // n
chunk = []
for group in grouped_by_name.values():
chunk.extend(group)
if len(chunk) >= num:
yield chunk
chunk = []
if chunk:
yield chunk
def main(args):
# https://docs.github.com/en/actions/learn-github-actions/usage-limits-billing-and-administration#usage-limits
# > A job matrix can generate a maximum of 256 jobs per workflow run.
MAX_ITEMS = 256
NUM_JOBS = 2
print(divider("Parameters"))
print(json.dumps(args, indent=2))
matrix = generate_matrix(args)
matrix = sorted(matrix, key=lambda x: (x.name, x.category, x.version))
assert len(matrix) <= MAX_ITEMS * 2, f"Too many jobs: {len(matrix)} > {MAX_ITEMS * NUM_JOBS}"
for idx, mat in enumerate(split(matrix, NUM_JOBS), start=1):
mat = {"include": mat, "job_name": [x.job_name for x in mat]}
print(divider(f"Matrix {idx}"))
print(json.dumps(mat, indent=2, cls=CustomEncoder))
if "GITHUB_ACTIONS" in os.environ:
set_action_output(f"matrix{idx}", json.dumps(mat, cls=CustomEncoder))
set_action_output(f"is_matrix{idx}_empty", "true" if len(mat) == 0 else "false")
if __name__ == "__main__":
main(sys.argv[1:])
| CustomEncoder |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 5094,
"end": 5770
} | class ____(Blockwise):
_parameters = [
"frame",
"divisions_left",
"divisions_right",
"closed",
"rule",
"kwargs",
"how",
"fill_value",
"how_args",
"how_kwargs",
]
operation = staticmethod(_resample_series)
@functools.cached_property
def _meta(self):
return self.frame._meta
def _divisions(self):
return list(self.divisions_left.iterable) + [self.divisions_right.iterable[-1]]
def _blockwise_arg(self, arg, i):
if isinstance(arg, BlockwiseDep):
return arg.iterable[i]
return super()._blockwise_arg(arg, i)
| ResampleAggregation |
python | doocs__leetcode | solution/0200-0299/0225.Implement Stack using Queues/Solution.py | {
"start": 0,
"end": 608
} | class ____:
def __init__(self):
self.q1 = deque()
self.q2 = deque()
def push(self, x: int) -> None:
self.q2.append(x)
while self.q1:
self.q2.append(self.q1.popleft())
self.q1, self.q2 = self.q2, self.q1
def pop(self) -> int:
return self.q1.popleft()
def top(self) -> int:
return self.q1[0]
def empty(self) -> bool:
return len(self.q1) == 0
# Your MyStack object will be instantiated and called as such:
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
| MyStack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple30.py | {
"start": 104,
"end": 208
} | class ____[*Ts]:
def __init__(self, *args: *Ts): ...
def method(self):
Child(self)
| Parent |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 62222,
"end": 63599
} | class ____:
def test_fields_at_class_declaration(self) -> None:
table = PrettyTable(
field_names=CITY_DATA_HEADER,
fields=["City name", "Annual Rainfall"],
)
for row in CITY_DATA:
table.add_row(row)
assert (
"""+-----------+-----------------+
| City name | Annual Rainfall |
+-----------+-----------------+
| Adelaide | 600.5 |
| Brisbane | 1146.4 |
| Darwin | 1714.7 |
| Hobart | 619.5 |
| Sydney | 1214.8 |
| Melbourne | 646.9 |
| Perth | 869.4 |
+-----------+-----------------+"""
== table.get_string().strip()
)
def test_fields(self) -> None:
table = PrettyTable()
table.field_names = CITY_DATA_HEADER
table.fields = ["City name", "Annual Rainfall"]
for row in CITY_DATA:
table.add_row(row)
assert (
"""+-----------+-----------------+
| City name | Annual Rainfall |
+-----------+-----------------+
| Adelaide | 600.5 |
| Brisbane | 1146.4 |
| Darwin | 1714.7 |
| Hobart | 619.5 |
| Sydney | 1214.8 |
| Melbourne | 646.9 |
| Perth | 869.4 |
+-----------+-----------------+"""
== table.get_string().strip()
)
| TestFields |
python | ray-project__ray | ci/ray_ci/linux_container.py | {
"start": 309,
"end": 3904
} | class ____(Container):
def __init__(
self,
docker_tag: str,
volumes: Optional[List[str]] = None,
envs: Optional[List[str]] = None,
python_version: Optional[str] = None,
tmp_filesystem: Optional[str] = None,
architecture: Optional[str] = None,
privileged: bool = False,
) -> None:
super().__init__(docker_tag, volumes, envs)
if tmp_filesystem is not None:
if tmp_filesystem != "tmpfs":
raise ValueError("Only tmpfs is supported for tmp filesystem")
self.python_version = python_version or DEFAULT_PYTHON_VERSION
self.tmp_filesystem = tmp_filesystem
self.privileged = privileged
if architecture is None:
architecture = platform.machine()
if architecture.lower() == "amd64":
architecture = "x86_64"
if architecture == "arm64":
architecture = "aarch64"
self.architecture = architecture
def install_ray(
self, build_type: Optional[str] = None, mask: Optional[str] = None
) -> List[str]:
cache_readonly = os.environ.get("BUILDKITE_CACHE_READONLY", "")
env = os.environ.copy()
env["DOCKER_BUILDKIT"] = "1"
build_cmd = [
"docker",
"build",
"--pull",
"--progress=plain",
"-t",
self._get_docker_image(),
"--build-arg",
f"BASE_IMAGE={self._get_docker_image()}",
"--build-arg",
f"BUILD_TYPE={build_type or ''}",
"--build-arg",
f"BUILDKITE_CACHE_READONLY={cache_readonly}",
]
if not build_type or build_type == "optimized":
python_version = self.python_version
core_image_tag = f"ray-core-py{python_version}"
if self.architecture != DEFAULT_ARCHITECTURE:
core_image_tag += f"-{self.architecture}"
ray_core_image = get_docker_image(core_image_tag)
build_cmd += ["--build-arg", f"RAY_CORE_IMAGE={ray_core_image}"]
ray_dashboard_image = get_docker_image("ray-dashboard")
build_cmd += ["--build-arg", f"RAY_DASHBOARD_IMAGE={ray_dashboard_image}"]
if mask:
build_cmd += ["--build-arg", "RAY_INSTALL_MASK=" + mask]
build_cmd += ["-f", "ci/ray_ci/tests.env.Dockerfile", "/ray"]
subprocess.check_call(
build_cmd,
env=env,
stdout=sys.stdout,
stderr=sys.stderr,
)
def get_run_command_shell(self) -> List[str]:
return ["/bin/bash", "-iecuo", "pipefail", "--"]
def get_run_command_extra_args(
self,
gpu_ids: Optional[List[int]] = None,
) -> List[str]:
extra_args = [
"--add-host",
"rayci.localhost:host-gateway",
]
if self.tmp_filesystem:
extra_args += [
"--mount",
f"type={self.tmp_filesystem},destination=/tmp",
]
if self.privileged:
extra_args += ["--privileged"]
else:
for cap in _DOCKER_CAP_ADD:
extra_args += ["--cap-add", cap]
if gpu_ids:
extra_args += ["--gpus", f'"device={",".join(map(str, gpu_ids))}"']
extra_args += [
"--workdir",
"/rayci",
"--shm-size=2.5gb",
]
return extra_args
def get_artifact_mount(self) -> Tuple[str, str]:
return ("/tmp/artifacts", "/artifact-mount")
| LinuxContainer |
python | pytorch__pytorch | benchmarks/dynamo/common.py | {
"start": 2678,
"end": 23700
} | class ____(NamedTuple):
backend: str # aot_eager or inductor
training: bool
dynamic: bool = False
device: str = "cuda"
CI_SKIP_OPTIMIZER = {
# HF
"MobileBertForMaskedLM", # Stack issue in fx
}
try:
from .fb.common import INTERNAL_CI_SKIP_DYNAMIC_BATCH_ONLY
except ImportError:
INTERNAL_CI_SKIP_DYNAMIC_BATCH_ONLY = set()
try:
from pytorch.benchmark.fb.run_utils import trace_handler
except ImportError:
trace_handler = None
CI_SKIP_DYNAMIC_BATCH_ONLY = {
"sam",
# See https://github.com/mindee/doctr/blob/f2114758d529ed8d3d0030581638f0520b6b98d8/doctr/models/detection/core.py#L89
# It iterates over the batch, which is dynamic, and dynamo chokes
# We should be able to graphbreak there.
"doctr_det_predictor",
"dlrm",
"pyhpc_isoneutral_mixing",
"pyhpc_equation_of_state",
"pyhpc_turbulent_kinetic_energy",
"detectron2_fcos_r_50_fpn",
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"Reformer",
"llama",
}.union(INTERNAL_CI_SKIP_DYNAMIC_BATCH_ONLY)
# These models currently fail accuracy with eager Adam optimizer
# so we use SGD when running the full benchmarks
# https://github.com/pytorch/pytorch/issues/115966
BENCHMARK_USE_SGD = {
# TorchBench
"BERT_pytorch",
"LearningToPaint",
"alexnet",
"dcgan",
"demucs",
"densenet121",
"dlrm",
"fastNLP_Bert",
"mobilenet_v2",
"phlippe_densenet",
"phlippe_resnet",
"pytorch_stargan",
"resnet18",
"shufflenet_v2_x1_0",
"speech_transformer",
"squeezenet1_1",
"stable_diffusion_text_encoder",
"vgg16",
# HF
"AlbertForMaskedLM",
"BartForCausalLM",
"ElectraForCausalLM",
"M2M100ForConditionalGeneration",
"MBartForCausalLM",
"OPTForCausalLM",
"PLBartForCausalLM",
"PegasusForCausalLM",
"TrOCRForCausalLM",
"XGLMForCausalLM",
# TIMM
"adv_inception_v3",
"tf_efficientnet_b0",
"ghostnet_100",
}
# These models OOM in CI
# due to the extra memory of Adam optimizer states,
# so we fall back to SGD in CI
CI_USE_SGD = {
"torchrec_dlrm",
"demucs",
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
"llama_v2_7b_16h",
"mobilenet_v2_quantized_qat",
"phi_1_5 resnet50_quantized_qat",
"BlenderbotForCausalLM",
"DALLE2_pytorch",
"moco",
"timm_efficientdet",
"ghostnet_100",
"inception_v3",
"mobilevit_s",
"pytorch_CycleGAN_and_pix2pix",
"vision_maskrcnn",
"dlrm",
"resnet50",
"dm_nfnet_f0",
}
DO_NOT_CAST_INPUTS = {"stable_diffusion"}
# Maps a benchmark model name to a list of status codes. For any listed entry, we'll
# capture TORCH_COMPILE_DEBUG logs in CI runs and preserve them (i.e., for upload) if
# the result status matches one listed.
CI_PRESERVE_COMPILE_DEBUG = {
# For example:
# "mnasnet1_0": ["fail_accuracy"],
}
@functools.lru_cache(maxsize=1)
def load_yaml_file(filename):
filepath = os.path.join(os.path.dirname(__file__), filename)
with open(filepath) as f:
data = yaml.safe_load(f)
internal_file_path = os.path.join(os.path.dirname(__file__), "fb", filename)
if os.path.exists(internal_file_path):
with open(internal_file_path) as f:
internal_data = yaml.safe_load(f)
data.update(internal_data)
def flatten(lst):
for item in lst:
if isinstance(item, list):
yield from flatten(item)
else:
yield item
def maybe_list_to_set(obj):
if isinstance(obj, dict):
return {k: maybe_list_to_set(v) for k, v in obj.items()}
if isinstance(obj, list):
return set(flatten(obj))
return obj
return maybe_list_to_set(data)
def model_specified_by_path(path_and_class_str):
return ":" in path_and_class_str
def load_model_from_path(path_and_class_str):
configs = {}
for kvstr in path_and_class_str.split(","):
k, v = kvstr.split(":")
configs[k] = v
for name in ["path", "class"]:
if name not in configs:
raise RuntimeError(
"Invalid --only arguments. Check help message for the correct format"
)
path = configs["path"]
class_name = configs["class"]
if path[:1] != "/":
raise RuntimeError(
"Use absolute path since dynamo may change the current working directory which makes using relative path tricky"
)
spec = importlib.util.spec_from_file_location("module_name", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
model_class = getattr(module, class_name)
assert issubclass(model_class, torch.nn.Module)
model = model_class()
assert hasattr(model, "get_example_inputs")
inputs = model.get_example_inputs()
return model, inputs
def write_outputs(filename, headers, row, upload_to_benchmark_db: bool = True):
"""
Write both CSV and JSON outputs using the original CSV output interface
"""
global disable_output
if disable_output:
return
output_csv(filename, headers, row)
if upload_to_benchmark_db:
output_json(filename, headers, row)
def output_csv(filename, headers, row):
if os.path.exists(filename):
with open(filename) as fd:
lines = list(csv.reader(fd)) or [[]]
if headers and len(headers) > len(lines[0]):
# if prior results failed the header might not be filled in yet
lines[0] = headers
else:
headers = lines[0]
else:
lines = [headers]
lines.append([(f"{x:.6f}" if isinstance(x, float) else x) for x in row])
with open(filename, "w") as fd:
writer = csv.writer(fd, lineterminator="\n")
for line in lines:
writer.writerow(list(line) + ["0"] * (len(headers) - len(line)))
def output_json(filename, headers, row):
"""
Write the result into JSON format, so that it can be uploaded to the benchmark database
to be displayed on OSS dashboard. The JSON format is defined at
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
"""
origin = ""
if "torchbench" in filename:
origin = "torchbench"
elif "huggingface" in filename:
origin = "huggingface"
elif "timm_models" in filename:
origin = "timm_models"
extra_info = {
"device": current_device,
"quantization": current_quantization,
"batch_size": current_batch_size,
}
if current_settings:
extra_info.update(current_settings)
mapping_headers = {headers[i]: v for i, v in enumerate(row)}
with open(f"{os.path.splitext(filename)[0]}.json", "a") as f:
for header, value in mapping_headers.items():
# These headers are not metric names
if header in ("dev", "name", "batch_size"):
continue
# Make sure that the record is valid
if not current_name:
continue
record = {
"benchmark": {
"name": "TorchInductor",
"mode": current_mode,
"dtype": current_dtype,
"extra_info": extra_info,
},
"model": {
"name": current_name,
"type": "OSS model",
"backend": current_backend,
"origins": [origin],
},
}
# NB: When the metric is accuracy, its value is actually a string, i.e. pass, and
# not a number. ClickHouse doesn't support mix types atm. It has a Variant type
# https://clickhouse.com/docs/en/sql-reference/data-types/variant, but this isn't
# recommended by CH team themselves. The workaround here is to store that value
# in the extra_info field instead.
if isinstance(value, str):
record["metric"] = {
"name": header,
"extra_info": {"benchmark_values": [value]},
}
else:
record["metric"] = {
"name": header,
"benchmark_values": [value],
}
print(json.dumps(record), file=f)
def get_suite_from_model_iter_fn(model_iter_fn):
# TODO: This is a bit of a hack
suite = None
if (runner := getattr(model_iter_fn, "__self__", None)) and hasattr(
runner, "suite_name"
):
suite = runner.suite_name
return suite
def output_signpost(data, args, suite, error=None):
from torch.utils._stats import simple_call_counter
data = data.copy()
if "name" not in data:
data["name"] = current_name
if "dev" not in data:
data["dev"] = current_device
filtered_args = vars(args).copy()
# I generated this list by reading through all the configs and dropping
# ones that looked irrelevant or redundant
for k in [
"filter",
"exclude",
"exclude_exact",
"dump_raw_metrics",
"log_operator_inputs",
"distributed_master_port",
"skip_accuracy_check",
"generate_aot_autograd_stats",
"output",
"output_directory",
"disable_output",
"export_profiler_trace",
"profiler_trace_name",
"explain",
"stats",
"print_memory",
"print_compilation_time",
"print_dataframe_summary",
"print_graph_breaks",
"log_graph_breaks",
"timing",
"progress",
"timeout",
"per_process_memory_fraction",
"minify",
"verbose",
"quiet",
"print_fx",
"print_aten_ops",
"log_conv_args",
"recompile_profiler",
"find_batch_sizes",
# Redundant
"batch_size",
"batch_size_file",
"only",
"diff_branch",
"tag",
"coverage",
"overhead",
"speedup_dynamo_ts",
"speedup_fx2trt",
"speedup_fx2trt_fp16",
"accuracy",
"performance",
"tolerance",
]:
del filtered_args[k]
event_name = "unknown"
if args.accuracy:
event_name = "accuracy"
elif args.quantization:
event_name = "quantization"
elif args.performance:
event_name = "performance"
from torch._dynamo.utils import calculate_time_spent, compilation_time_metrics
wall_time_by_phase = calculate_time_spent()
open_source_signpost(
subsystem="dynamo_benchmark",
name=event_name,
parameters=json.dumps(
{
**data,
# TODO: Arguably the rest of these should be in the CSV too
"suite": suite,
# Better than using compile_times utils directly
# NB: Externally, compilation_metrics colloquially refers to
# the coarse-grained phase timings, even though internally
# they are called something else
"compilation_metrics": wall_time_by_phase,
"agg_compilation_metrics": {
k: sum(v) for k, v in compilation_time_metrics.items()
},
"detailed_compilation_metrics": compilation_time_metrics,
"simple_call_counter": simple_call_counter,
# NB: args has training vs inference
"args": filtered_args,
"error": error,
}
),
)
return wall_time_by_phase["total_wall_time"]
def nothing(f):
return f
@functools.cache
def patch_torch_manual_seed():
"""Make torch manual seed deterministic. Helps with accuracy testing."""
def deterministic_torch_manual_seed(*args, **kwargs):
from torch._C import default_generator
seed = 1337
if HAS_CUDA:
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
if HAS_XPU:
import torch.xpu
if not torch.xpu._is_in_bad_fork():
torch.xpu.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed = deterministic_torch_manual_seed
def empty_gpu_cache(device):
"""
Explicitly empty gpu cache to avoid OOM in subsequent run.
"""
if device not in ["cuda", "xpu", "mps"]:
log.warning(
"Trying to call the empty_gpu_cache for device: %s, which is not in list [cuda, xpu]",
device,
)
return
getattr(torch, device).empty_cache()
def synchronize():
pass
def summarize_graph_break(filename):
"""
Sorts and de-dupes the graphs breaks on the reason string. Note that this
function is just a best effort to reduce the logging information. We could
miss some graph breaks because of de-duping. We can further refine this
function as need arises.
"""
log_file = f"{filename.rstrip('.csv')}_graph_breaks.csv"
if os.path.exists(log_file):
df = pd.read_csv(log_file)
df = df.sort_values("reason").drop_duplicates(subset="reason")
# Specialize for multi tensor sgd as reason is not identical
multi_tensor_sgd_row = df.loc[df["reason"].str.contains("_multi_tensor_sgd")]
if len(multi_tensor_sgd_row):
df = df[
~df["reason"].str.contains("_multi_tensor_sgd")
] # Drop all sgd rows
df = pd.concat(
[df, pd.DataFrame([multi_tensor_sgd_row.iloc[0]])], axis=0
) # Add back a single row
df.to_csv(f"{log_file.rstrip('.csv')}_deduped.csv", index=False)
def print_summary(filename, print_dataframe=False):
if not (filename and os.path.exists(filename)):
return
data = pd.read_csv(filename)
if "tag" in data.columns:
for tag in data.tag.unique():
if tag == "0.0000":
continue # This happens for failed runs
print(f"\nSummary for tag={tag}:")
print_summary_table(data[data.tag == tag], print_dataframe=print_dataframe)
else:
print_summary_table(data, print_dataframe=print_dataframe)
summarize_graph_break(filename)
def print_summary_table(data, print_dataframe=False):
if print_dataframe:
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 1000
pd.options.display.width = 2000
print(data)
width = max(map(len, data.columns))
for col in data.columns:
try:
if col in ("dev", "name", "batch_size", "tag"):
continue
elif col in ("pct_ops", "pct_time"):
print(col.ljust(width), f"{data[col].mean():.3%}")
elif col in ("graphs", "graph_calls", "captured_ops", "total_ops"):
print(col.ljust(width), f"{data[col].mean():.3f}")
elif col in ("compilation_latency"):
print(col.ljust(width), f"mean={data[col].mean():.3f} seconds")
elif col in ("compression_ratio"):
print(col.ljust(width), f"mean={data[col].mean():.3f}x")
elif col in ("accuracy"):
pass_rate = (data[col] == "pass").mean()
print(col.ljust(width), f"pass_rate={100 * pass_rate:.2f}%")
else:
cdata = data[col]
print(
col.ljust(width),
f"gmean={gmean(cdata):.2f}x mean={cdata.mean():.3f}x",
)
except Exception:
pass
def tensor_is_on_xla(tensors):
def visit(x: torch.Tensor):
nonlocal result
if x.device.type == "xla":
result = True
result = False
tree_map_only(torch.Tensor, visit, tensors)
return result
def timed(
model,
model_iter_fn,
example_inputs,
times=1,
return_result=False,
collect_outputs=False,
batch_size=None,
):
use_xla = tensor_is_on_xla(example_inputs)
synchronize()
if batch_size:
patch_torch_manual_seed()
if use_xla:
xm.mark_step()
xm.wait_device_ops()
def vary_batch(t: torch.Tensor, new_batch_size) -> torch.Tensor:
for i, s in enumerate(t.size()):
if s == batch_size:
# If new batch is smaller, we truncate
if new_batch_size < batch_size:
indexer = [slice(None)] * t.ndim
indexer[i] = slice(0, new_batch_size)
t = t[tuple(indexer)]
# If new batch is greater, we just duplicate the last row
# over and over until we hit the desired batch size
elif new_batch_size > batch_size:
indexer = [slice(None)] * t.ndim
indexer[i] = -1
last_slice = t[tuple(indexer)].unsqueeze(i)
repeat_shape = list(t.shape)
repeat_shape[i] = new_batch_size - batch_size
padding = last_slice.expand(*repeat_shape)
t = torch.cat([t, padding], dim=i)
break
return t
time_total = 0
# Dont collect outputs to correctly measure timing
for i in range(times):
# If batch_size is 1, it too often collides with other non batch size
# dimensions resulting in errors.
if batch_size and batch_size > 1:
# Calculate new batch size by varying the original batch size by up to 20%
# Ensure it's at least greater than 1
variation = random.uniform(0.8, 1.2)
new_batch_size = max(2, int(batch_size * variation))
example_inputs = tree_map_only(
torch.Tensor, lambda x: vary_batch(x, new_batch_size), example_inputs
)
# Put this call inside the loop to reset the seed for each iteration.
# Don't include reset_rng_state() to correctly measure timing
reset_rng_state(use_xla)
t_iter_begin = time.perf_counter()
result = model_iter_fn(model, example_inputs, collect_outputs=collect_outputs)
# instead of calling sync on result_list, we should call mark_step.
# In training case, result_list may be empty, but we want to
# send all the pending graphs for compilation.
if use_xla:
# For the model running on regular torchxla (baseline), we need the
# mark step to send the accumulated graph for compilation.
#
# For the model running with dynamo/torchxla bridge, in training case,
# we need the mark step to send the optimizer graph out for
# compilation.
xm.mark_step()
t_iter_end = time.perf_counter()
time_total += t_iter_end - t_iter_begin
t_0 = time.perf_counter()
if use_xla:
xm.wait_device_ops()
synchronize()
t_1 = time.perf_counter()
time_total += t_1 - t_0
return (time_total, result) if return_result else time_total
@overload
def _normalize_bench_inputs(example_inputs: _D) -> tuple[tuple[()], _D]: ...
@overload
def _normalize_bench_inputs(
example_inputs: Sequence[_T],
) -> tuple[tuple[_T, ...], dict[str, Any]]: ...
def _normalize_bench_inputs(example_inputs):
# NOTE(bowbao): For huggingface benchmark, example_inputs are formatted as dictionary,
# and consumed like `model(**example_inputs)`.
# For other benchmarks, example_inputs are formatted as tuple and consumed
# like `model(*example_inputs)`.
if isinstance(example_inputs, dict):
return (), example_inputs
else:
return tuple(example_inputs), {}
def _register_dataclass_output_as_pytree(example_outputs) -> None:
# NOTE(angelayi): For huggingface benchmark, some example outputs are
# formatted as a dataclass which pytree cannot consume. So we want
# to register the pytree implementation here
example_outputs_flat = pytree.tree_leaves(example_outputs)
output_dataclass_types = [
type(out) for out in example_outputs_flat if dataclasses.is_dataclass(type(out))
]
for output_type in output_dataclass_types:
from torch._export.utils import register_dataclass_as_pytree_node
register_dataclass_as_pytree_node(
output_type,
serialized_type_name=f"{output_type.__module__}.{output_type.__name__}",
)
| CI |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/input_util_test.py | {
"start": 19286,
"end": 21052
} | class ____(test_util.DTensorBaseTest):
@parameterized.parameters(
{
'mesh_dims': [(MESH_DIM_BATCH, 8)],
'layout_specs': [UNSHARDED],
'batch_dim': None,
'counts': [1],
}, {
'mesh_dims': [(MESH_DIM_BATCH, 8)],
'layout_specs': [MESH_DIM_BATCH],
'batch_dim': None,
'counts': [8],
}, {
'mesh_dims': [(MESH_DIM_BATCH, 8)],
'layout_specs': [MESH_DIM_BATCH],
'batch_dim': MESH_DIM_BATCH,
'counts': [1],
}, {
'mesh_dims': [(MESH_DIM_BATCH, 2),
(MESH_DIM_HEIGHT, 4),
(MESH_DIM_WIDTH, 2)],
'layout_specs': [UNSHARDED, MESH_DIM_HEIGHT],
'batch_dim': None,
'counts': [1, 4],
}, {
'mesh_dims': [(MESH_DIM_BATCH, 2),
(MESH_DIM_HEIGHT, 4),
(MESH_DIM_WIDTH, 2)],
'layout_specs': [MESH_DIM_BATCH, MESH_DIM_WIDTH, MESH_DIM_HEIGHT],
'batch_dim': None,
'counts': [2, 2, 4],
}, {
'mesh_dims': [(MESH_DIM_BATCH, 2),
(MESH_DIM_HEIGHT, 4),
(MESH_DIM_WIDTH, 2)],
'layout_specs': [MESH_DIM_BATCH, MESH_DIM_WIDTH, MESH_DIM_HEIGHT],
'batch_dim': MESH_DIM_BATCH,
'counts': [1, 2, 4],
})
def testShardCounts(self, mesh_dims, layout_specs, batch_dim, counts):
num_devices = np.prod([size for _, size in mesh_dims])
mesh = mesh_util.create_mesh(
mesh_dims=mesh_dims, devices=['CPU:%d' % i for i in range(num_devices)])
layout = Layout(layout_specs, mesh)
self.assertEqual(input_util._shard_counts(layout, batch_dim), counts)
| InputUtilHelpersTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 274,
"end": 394
} | class ____(Parent1, extra_items=int | None):
pass
# This should generate an error because of a type mismatch.
| Child1_1 |
python | spyder-ide__spyder | spyder/plugins/completion/providers/languageserver/transport/main.py | {
"start": 2990,
"end": 3076
} | class ____(Exception):
"""Terminal exception descriptor."""
pass
| TerminateSignal |
python | pennersr__django-allauth | allauth/socialaccount/providers/trainingpeaks/views.py | {
"start": 228,
"end": 1896
} | class ____(OAuth2Adapter):
# https://github.com/TrainingPeaks/PartnersAPI/wiki/OAuth
provider_id = "trainingpeaks"
def get_settings(self):
"""Provider settings"""
return app_settings.PROVIDERS.get(self.provider_id, {})
def get_hostname(self):
"""Return hostname depending on sandbox setting"""
settings = self.get_settings()
if settings.get("USE_PRODUCTION"):
return "trainingpeaks.com"
return "sandbox.trainingpeaks.com"
@property
def access_token_url(self):
return "https://oauth." + self.get_hostname() + "/oauth/token"
@property
def authorize_url(self):
return "https://oauth." + self.get_hostname() + "/OAuth/Authorize"
@property
def profile_url(self):
return "https://api." + self.get_hostname() + "/v1/athlete/profile"
@property
def api_hostname(self):
"""Return https://api.hostname.tld"""
return "https://api." + self.get_hostname()
# https://oauth.sandbox.trainingpeaks.com/oauth/deauthorize
scope_delimiter = " "
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "Bearer {0}".format(token.token)}
response = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
response.raise_for_status()
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(TrainingPeaksOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(TrainingPeaksOAuth2Adapter)
| TrainingPeaksOAuth2Adapter |
python | eth-brownie__brownie | brownie/_cli/console.py | {
"start": 3942,
"end": 12150
} | class ____(code.InteractiveConsole):
# This value is used as the `input` arg when initializing `prompt_toolkit.PromptSession`.
# During testing there is a conflict with how pytest suppresses stdin/out, so stdin is
# replaced with `prompt_toolkit.input.defaults.create_pipe_input`
prompt_input = None
def __init__(
self,
project: Optional[Project] = None,
extra_locals: Optional[Dict[str, Any]] = None,
exit_on_continue: bool = False,
):
"""
Launch the Brownie console.
Arguments
---------
project : `Project`, optional
Active Brownie project to include in the console's local namespace.
extra_locals: dict, optional
Additional variables to add to the console namespace.
exit_on_continue: bool, optional
If True, the `continue` command causes the console to
raise a SystemExit with error message "continue".
"""
console_settings: Dict[str, Any] = CONFIG.settings["console"]
locals_dict = {i: getattr(brownie, i) for i in brownie.__all__}
locals_dict.update(
_dir=dir, dir=self._dir, exit=_Quitter("exit"), quit=_Quitter("quit"), _console=self
)
self.exit_on_continue: Final = exit_on_continue
if exit_on_continue:
# add continue to the locals so we can quickly reach it via completion hints
locals_dict["continue"] = True
if project:
project._update_and_register(locals_dict) # type: ignore [arg-type]
# only make GUI available if Tkinter is installed
try:
Gui = import_module("brownie._gui").Gui
locals_dict["Gui"] = Gui
except ImportError:
pass
if extra_locals:
locals_dict.update(extra_locals)
# create prompt session object
history_file = str(_get_data_folder().joinpath(".history").absolute())
kwargs: Dict[str, Any] = {}
if console_settings["show_colors"]:
kwargs.update(
lexer=PygmentsLexer(PythonLexer),
style=style_from_pygments_cls(get_style_by_name(console_settings["color_style"])),
include_default_pygments_style=False,
)
if console_settings["auto_suggest"]:
kwargs["auto_suggest"] = ConsoleAutoSuggest(self, locals_dict)
if console_settings["completions"]:
kwargs["completer"] = ConsoleCompleter(self, locals_dict)
if console_settings["editing_mode"]:
kwargs["editing_mode"] = EditingMode(console_settings["editing_mode"].upper())
self.compile_mode = "single"
self.prompt_session = PromptSession( # type: ignore [var-annotated]
history=SanitizedFileHistory(history_file, locals_dict),
input=self.prompt_input,
key_bindings=KeyBindings(),
**kwargs,
)
# add custom bindings
key_bindings = self.prompt_session.key_bindings
key_bindings.add(Keys.BracketedPaste)(self.paste_event)
key_bindings.add("c-i")(self.tab_event)
key_bindings.get_bindings_for_keys(("c-i",))[-1].filter = lambda: not self.tab_filter()
# modify default bindings
key_bindings = load_key_bindings()
key_bindings.get_bindings_for_keys(("c-i",))[-1].filter = self.tab_filter
if console_settings["auto_suggest"]:
# remove the builtin binding for auto-suggest acceptance
key_bindings = self.prompt_session.app.key_bindings
accept_binding = key_bindings.get_bindings_for_keys(("right",))[0]
key_bindings._bindings2.remove(accept_binding.handler)
# this is required because of a pytest conflict when using the debugging console
if sys.platform == "win32":
import colorama
colorama.init()
self.console_printer = ConsolePrinter(self)
super().__init__(locals_dict)
def _dir(self, obj: Any = None) -> None:
# console dir method, for simplified and colorful output
if obj is None:
results = [(k, v) for k, v in self.locals.items() if not k.startswith("_")]
elif hasattr(obj, "__console_dir__"):
results = [(i, getattr(obj, i)) for i in obj.__console_dir__]
else:
results = [(i, getattr(obj, i)) for i in dir(obj) if not i.startswith("_")]
results = sorted(results, key=lambda k: k[0])
self.write(f"[{f'{color}, '.join(_dir_color(i[1]) + i[0] for i in results)}{color}]\n")
def _console_write(self, obj: Any) -> None:
text = repr(obj)
try:
if obj and isinstance(obj, dict):
text = color.pretty_dict(obj)
elif obj and isinstance(obj, (tuple, list, set)):
text = color.pretty_sequence(obj)
except (SyntaxError, NameError):
pass
if CONFIG.settings["console"]["show_colors"]:
text = color.highlight(text)
self.write(text)
def interact(self, *args: Any, **kwargs: Any) -> None:
# temporarily modify mode so that container repr's display correctly for console
cli_mode = CONFIG.argv["cli"]
CONFIG.argv["cli"] = "console"
try:
super().interact(*args, **kwargs)
finally:
CONFIG.argv["cli"] = cli_mode
def raw_input(self, prompt=""):
self.console_printer.start()
try:
return self.prompt_session.prompt(prompt)
finally:
self.console_printer.finish()
def showsyntaxerror(self, filename: Optional[str] = None) -> None: # type: ignore [override]
tb = color.format_tb(sys.exc_info()[1]) # type: ignore [arg-type]
self.write(tb + "\n")
def showtraceback(self) -> None:
tb = color.format_tb(sys.exc_info()[1], start=1) # type: ignore [arg-type]
self.write(tb + "\n")
def resetbuffer(self) -> None:
# reset the input buffer and parser cache
_parser_cache.clear()
return super().resetbuffer()
def runsource(self, source, filename="<input>", symbol="single"):
mode = self.compile_mode
self.compile_mode = "single"
if source == "continue" and self.exit_on_continue:
# used to differentiate exit and continue for pytest interactive debugging
raise SystemExit("continue")
try:
code = self.compile(source, filename, mode)
except (OverflowError, SyntaxError, ValueError):
self.showsyntaxerror(filename)
return False
if code is None:
# multiline statement
return True
try:
self.compile(source, filename, "eval")
code = self.compile(f"__ret_value__ = {source}", filename, "exec")
except Exception:
pass
self.runcode(code) # type: ignore [arg-type]
if "__ret_value__" in self.locals and self.locals["__ret_value__"] is not None:
return_value = self.locals.pop("__ret_value__") # type: ignore [attr-defined]
self._console_write(return_value)
return False
def paste_event(self, event):
# pasting multiline data temporarily switches to multiline mode
data: str = event.data
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
if "\n" in data:
self.compile_mode = "exec"
event.current_buffer.insert_text(data)
def tab_event(self, event):
# for multiline input, pressing tab at the start of a new line adds four spaces
event.current_buffer.insert_text(" ")
def tab_filter(self):
# detect multiline input with no meaningful text on the current line
return not self.buffer or self.prompt_session.app.current_buffer.text.strip()
def _dir_color(obj: Any) -> str:
if type(obj).__name__ == "module":
return color("brownie blue")
elif hasattr(obj, "_dir_color"):
return color(obj._dir_color)
return bright_cyan if callable(obj) else bright_blue
@final
@mypyc_attr(native_class=False)
| Console |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_sensors.py | {
"start": 25218,
"end": 27228
} | class ____(ReadonlyGraphQLContextTestMatrix):
def test_start_sensor_failure(self, graphql_context: WorkspaceRequestContext):
sensor_selector = infer_sensor_selector(
graphql_context, "always_no_config_sensor_with_tags_and_metadata"
)
result = execute_dagster_graphql(
graphql_context,
START_SENSORS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["startSensor"]["__typename"] == "UnauthorizedError"
def test_stop_sensor_failure(self, graphql_context: WorkspaceRequestContext):
sensor_selector = infer_sensor_selector(
graphql_context, "always_no_config_sensor_with_tags_and_metadata"
)
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_STATUS_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data["sensorOrError"]["sensorState"]["hasStartPermission"] is False
assert result.data["sensorOrError"]["sensorState"]["hasStopPermission"] is False
sensor_id = result.data["sensorOrError"]["sensorState"]["id"]
stop_result = execute_dagster_graphql(
graphql_context,
STOP_SENSORS_QUERY,
variables={
"id": sensor_id,
},
)
assert stop_result.data["stopSensor"]["__typename"] == "UnauthorizedError"
def test_set_cursor_failure(self, graphql_context: WorkspaceRequestContext):
selector = infer_sensor_selector(
graphql_context, "always_no_config_sensor_with_tags_and_metadata"
)
result = execute_dagster_graphql(
graphql_context,
SET_SENSOR_CURSOR_MUTATION,
variables={"sensorSelector": selector, "cursor": "foo"},
)
assert result.data
assert result.data["setSensorCursor"]["__typename"] == "UnauthorizedError"
| TestReadonlySensorPermissions |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 4860,
"end": 6395
} | class ____(ASTBase):
def __init__(
self,
identOrOp: ASTIdentifier | ASTOperator,
templateArgs: ASTTemplateArgs | None,
) -> None:
self.identOrOp = identOrOp
self.templateArgs = templateArgs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTNestedNameElement):
return NotImplemented
return (
self.identOrOp == other.identOrOp
and self.templateArgs == other.templateArgs
)
def __hash__(self) -> int:
return hash((self.identOrOp, self.templateArgs))
def is_operator(self) -> bool:
return False
def get_id(self, version: int) -> str:
res = self.identOrOp.get_id(version)
if self.templateArgs:
res += self.templateArgs.get_id(version)
return res
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.identOrOp)
if self.templateArgs:
res += transform(self.templateArgs)
return res
def describe_signature(
self,
signode: TextElement,
mode: str,
env: BuildEnvironment,
prefix: str,
symbol: Symbol,
) -> None:
t_args = str(self.templateArgs) if self.templateArgs is not None else ''
self.identOrOp.describe_signature(signode, mode, env, prefix, t_args, symbol)
if self.templateArgs is not None:
self.templateArgs.describe_signature(signode, 'markType', env, symbol)
| ASTNestedNameElement |
python | langchain-ai__langchain | libs/partners/openai/tests/unit_tests/chat_models/test_base.py | {
"start": 11607,
"end": 36379
} | class ____:
def __init__(self, chunk_list: list) -> None:
self.current_chunk = 0
self.chunk_list = chunk_list
self.chunk_num = len(chunk_list)
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
tb: TracebackType | None,
) -> None:
pass
def __iter__(self) -> MockSyncContextManager:
return self
def __next__(self) -> dict:
if self.current_chunk < self.chunk_num:
chunk = self.chunk_list[self.current_chunk]
self.current_chunk += 1
return chunk
raise StopIteration
GLM4_STREAM_META = """{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"\u4eba\u5de5\u667a\u80fd"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"\u52a9\u624b"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":","}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"\u4f60\u53ef\u4ee5"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"\u53eb\u6211"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"AI"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"\u52a9\u624b"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"delta":{"role":"assistant","content":"。"}}]}
{"id":"20240722102053e7277a4f94e848248ff9588ed37fb6e6","created":1721614853,"model":"glm-4","choices":[{"index":0,"finish_reason":"stop","delta":{"role":"assistant","content":""}}],"usage":{"prompt_tokens":13,"completion_tokens":10,"total_tokens":23}}
[DONE]""" # noqa: E501
@pytest.fixture
def mock_glm4_completion() -> list:
list_chunk_data = GLM4_STREAM_META.split("\n")
result_list = []
for msg in list_chunk_data:
if msg != "[DONE]":
result_list.append(json.loads(msg))
return result_list
async def test_glm4_astream(mock_glm4_completion: list) -> None:
llm_name = "glm-4"
llm = ChatOpenAI(model=llm_name, stream_usage=True)
mock_client = AsyncMock()
async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager:
return MockAsyncContextManager(mock_glm4_completion)
mock_client.create = mock_create
usage_chunk = mock_glm4_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "async_client", mock_client):
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
def test_glm4_stream(mock_glm4_completion: list) -> None:
llm_name = "glm-4"
llm = ChatOpenAI(model=llm_name, stream_usage=True)
mock_client = MagicMock()
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
return MockSyncContextManager(mock_glm4_completion)
mock_client.create = mock_create
usage_chunk = mock_glm4_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "client", mock_client):
for chunk in llm.stream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
DEEPSEEK_STREAM_DATA = """{"id":"d3610c24e6b42518a7883ea57c3ea2c3","choices":[{"index":0,"delta":{"content":"","role":"assistant"},"finish_reason":null,"logprobs":null}],"created":1721630271,"model":"deepseek-chat","system_fingerprint":"fp_7e0991cad4","object":"chat.completion.chunk","usage":null}
{"choices":[{"delta":{"content":"我是","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"Deep","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"Seek","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":" Chat","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":",","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"一个","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"由","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"深度","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"求","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"索","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"公司","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"开发的","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"智能","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"助手","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"。","role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":null}
{"choices":[{"delta":{"content":"","role":null},"finish_reason":"stop","index":0,"logprobs":null}],"created":1721630271,"id":"d3610c24e6b42518a7883ea57c3ea2c3","model":"deepseek-chat","object":"chat.completion.chunk","system_fingerprint":"fp_7e0991cad4","usage":{"completion_tokens":15,"prompt_tokens":11,"total_tokens":26}}
[DONE]""" # noqa: E501
@pytest.fixture
def mock_deepseek_completion() -> list[dict]:
list_chunk_data = DEEPSEEK_STREAM_DATA.split("\n")
result_list = []
for msg in list_chunk_data:
if msg != "[DONE]":
result_list.append(json.loads(msg))
return result_list
async def test_deepseek_astream(mock_deepseek_completion: list) -> None:
llm_name = "deepseek-chat"
llm = ChatOpenAI(model=llm_name, stream_usage=True)
mock_client = AsyncMock()
async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager:
return MockAsyncContextManager(mock_deepseek_completion)
mock_client.create = mock_create
usage_chunk = mock_deepseek_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "async_client", mock_client):
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
def test_deepseek_stream(mock_deepseek_completion: list) -> None:
llm_name = "deepseek-chat"
llm = ChatOpenAI(model=llm_name, stream_usage=True)
mock_client = MagicMock()
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
return MockSyncContextManager(mock_deepseek_completion)
mock_client.create = mock_create
usage_chunk = mock_deepseek_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "client", mock_client):
for chunk in llm.stream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
OPENAI_STREAM_DATA = """{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}],"usage":null}
{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[{"index":0,"delta":{"content":"我是"},"logprobs":null,"finish_reason":null}],"usage":null}
{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[{"index":0,"delta":{"content":"助手"},"logprobs":null,"finish_reason":null}],"usage":null}
{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[{"index":0,"delta":{"content":"。"},"logprobs":null,"finish_reason":null}],"usage":null}
{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
{"id":"chatcmpl-9nhARrdUiJWEMd5plwV1Gc9NCjb9M","object":"chat.completion.chunk","created":1721631035,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_18cc0f1fa0","choices":[],"usage":{"prompt_tokens":14,"completion_tokens":3,"total_tokens":17}}
[DONE]""" # noqa: E501
@pytest.fixture
def mock_openai_completion() -> list[dict]:
list_chunk_data = OPENAI_STREAM_DATA.split("\n")
result_list = []
for msg in list_chunk_data:
if msg != "[DONE]":
result_list.append(json.loads(msg))
return result_list
async def test_openai_astream(mock_openai_completion: list) -> None:
llm_name = "gpt-4o"
llm = ChatOpenAI(model=llm_name)
assert llm.stream_usage
mock_client = AsyncMock()
async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager:
return MockAsyncContextManager(mock_openai_completion)
mock_client.create = mock_create
usage_chunk = mock_openai_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "async_client", mock_client):
async for chunk in llm.astream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
def test_openai_stream(mock_openai_completion: list) -> None:
llm_name = "gpt-4o"
llm = ChatOpenAI(model=llm_name)
assert llm.stream_usage
mock_client = MagicMock()
call_kwargs = []
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
call_kwargs.append(kwargs)
return MockSyncContextManager(mock_openai_completion)
mock_client.create = mock_create
usage_chunk = mock_openai_completion[-1]
usage_metadata: UsageMetadata | None = None
with patch.object(llm, "client", mock_client):
for chunk in llm.stream("你的名字叫什么?只回答名字"):
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
usage_metadata = chunk.usage_metadata
assert call_kwargs[-1]["stream_options"] == {"include_usage": True}
assert usage_metadata is not None
assert usage_metadata["input_tokens"] == usage_chunk["usage"]["prompt_tokens"]
assert usage_metadata["output_tokens"] == usage_chunk["usage"]["completion_tokens"]
assert usage_metadata["total_tokens"] == usage_chunk["usage"]["total_tokens"]
# Verify no streaming outside of default base URL or clients
for param, value in {
"stream_usage": False,
"openai_proxy": "http://localhost:7890",
"openai_api_base": "https://example.com/v1",
"base_url": "https://example.com/v1",
"client": mock_client,
"root_client": mock_client,
"async_client": mock_client,
"root_async_client": mock_client,
"http_client": httpx.Client(),
"http_async_client": httpx.AsyncClient(),
}.items():
llm = ChatOpenAI(model=llm_name, **{param: value}) # type: ignore[arg-type]
assert not llm.stream_usage
with patch.object(llm, "client", mock_client):
_ = list(llm.stream("..."))
assert "stream_options" not in call_kwargs[-1]
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "chatcmpl-7fcZavknQda3SQ",
"object": "chat.completion",
"created": 1689989000,
"model": "gpt-3.5-turbo-0613",
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": "Bar Baz", "name": "Erick"},
"finish_reason": "stop",
}
],
}
@pytest.fixture
def mock_client(mock_completion: dict) -> MagicMock:
rtn = MagicMock()
mock_create = MagicMock()
mock_resp = MagicMock()
mock_resp.headers = {"content-type": "application/json"}
mock_resp.parse.return_value = mock_completion
mock_create.return_value = mock_resp
rtn.with_raw_response.create = mock_create
rtn.create.return_value = mock_completion
return rtn
@pytest.fixture
def mock_async_client(mock_completion: dict) -> AsyncMock:
rtn = AsyncMock()
mock_create = AsyncMock()
mock_resp = MagicMock()
mock_resp.parse.return_value = mock_completion
mock_create.return_value = mock_resp
rtn.with_raw_response.create = mock_create
rtn.create.return_value = mock_completion
return rtn
def test_openai_invoke(mock_client: MagicMock) -> None:
llm = ChatOpenAI()
with patch.object(llm, "client", mock_client):
res = llm.invoke("bar")
assert res.content == "Bar Baz"
# headers are not in response_metadata if include_response_headers not set
assert "headers" not in res.response_metadata
assert mock_client.with_raw_response.create.called
async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None:
llm = ChatOpenAI()
with patch.object(llm, "async_client", mock_async_client):
res = await llm.ainvoke("bar")
assert res.content == "Bar Baz"
# headers are not in response_metadata if include_response_headers not set
assert "headers" not in res.response_metadata
assert mock_async_client.with_raw_response.create.called
@pytest.mark.parametrize(
"model",
[
"gpt-3.5-turbo",
"gpt-4",
"gpt-3.5-0125",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
],
)
def test__get_encoding_model(model: str) -> None:
ChatOpenAI(model=model)._get_encoding_model()
def test_openai_invoke_name(mock_client: MagicMock) -> None:
llm = ChatOpenAI()
with patch.object(llm, "client", mock_client):
messages = [HumanMessage(content="Foo", name="Katie")]
res = llm.invoke(messages)
call_args, call_kwargs = mock_client.with_raw_response.create.call_args
assert len(call_args) == 0 # no positional args
call_messages = call_kwargs["messages"]
assert len(call_messages) == 1
assert call_messages[0]["role"] == "user"
assert call_messages[0]["content"] == "Foo"
assert call_messages[0]["name"] == "Katie"
# check return type has name
assert res.content == "Bar Baz"
assert res.name == "Erick"
def test_function_calls_with_tool_calls(mock_client: MagicMock) -> None:
# Test that we ignore function calls if tool_calls are present
llm = ChatOpenAI(model="gpt-4.1-mini")
tool_call_message = AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "get_weather",
"arguments": '{"location": "Boston"}',
}
},
tool_calls=[
{
"name": "get_weather",
"args": {"location": "Boston"},
"id": "abc123",
"type": "tool_call",
}
],
)
messages = [
HumanMessage("What's the weather in Boston?"),
tool_call_message,
ToolMessage(content="It's sunny.", name="get_weather", tool_call_id="abc123"),
]
with patch.object(llm, "client", mock_client):
_ = llm.invoke(messages)
_, call_kwargs = mock_client.with_raw_response.create.call_args
call_messages = call_kwargs["messages"]
tool_call_message_payload = call_messages[1]
assert "tool_calls" in tool_call_message_payload
assert "function_call" not in tool_call_message_payload
# Test we don't ignore function calls if tool_calls are not present
cast(AIMessage, messages[1]).tool_calls = []
with patch.object(llm, "client", mock_client):
_ = llm.invoke(messages)
_, call_kwargs = mock_client.with_raw_response.create.call_args
call_messages = call_kwargs["messages"]
tool_call_message_payload = call_messages[1]
assert "function_call" in tool_call_message_payload
assert "tool_calls" not in tool_call_message_payload
def test_custom_token_counting() -> None:
def token_encoder(text: str) -> list[int]:
return [1, 2, 3]
llm = ChatOpenAI(custom_get_token_ids=token_encoder)
assert llm.get_token_ids("foo") == [1, 2, 3]
def test_format_message_content() -> None:
content: Any = "hello"
assert content == _format_message_content(content)
content = None
assert content == _format_message_content(content)
content = []
assert content == _format_message_content(content)
content = [
{"type": "text", "text": "What is in this image?"},
{"type": "image_url", "image_url": {"url": "url.com"}},
]
assert content == _format_message_content(content)
content = [
{"type": "text", "text": "hello"},
{
"type": "tool_use",
"id": "toolu_01A09q90qw90lq917835lq9",
"name": "get_weather",
"input": {"location": "San Francisco, CA", "unit": "celsius"},
},
]
assert _format_message_content(content) == [{"type": "text", "text": "hello"}]
# Standard multi-modal inputs
contents = [
{"type": "image", "source_type": "url", "url": "https://..."}, # v0
{"type": "image", "url": "https://..."}, # v1
]
expected = [{"type": "image_url", "image_url": {"url": "https://..."}}]
for content in contents:
assert expected == _format_message_content([content])
contents = [
{
"type": "image",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "image/png",
},
{"type": "image", "base64": "<base64 data>", "mime_type": "image/png"},
]
expected = [
{
"type": "image_url",
"image_url": {"url": "data:image/png;base64,<base64 data>"},
}
]
for content in contents:
assert expected == _format_message_content([content])
contents = [
{
"type": "file",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "application/pdf",
"filename": "my_file",
},
{
"type": "file",
"base64": "<base64 data>",
"mime_type": "application/pdf",
"filename": "my_file",
},
]
expected = [
{
"type": "file",
"file": {
"filename": "my_file",
"file_data": "data:application/pdf;base64,<base64 data>",
},
}
]
for content in contents:
assert expected == _format_message_content([content])
# Test warn if PDF is missing a filename
pdf_block = {
"type": "file",
"base64": "<base64 data>",
"mime_type": "application/pdf",
}
expected = [
# N.B. this format is invalid for OpenAI
{
"type": "file",
"file": {"file_data": "data:application/pdf;base64,<base64 data>"},
}
]
with pytest.warns(match="filename"):
assert expected == _format_message_content([pdf_block])
contents = [
{"type": "file", "source_type": "id", "id": "file-abc123"},
{"type": "file", "file_id": "file-abc123"},
]
expected = [{"type": "file", "file": {"file_id": "file-abc123"}}]
for content in contents:
assert expected == _format_message_content([content])
| MockSyncContextManager |
python | getsentry__sentry | src/sentry/grouping/api.py | {
"start": 4887,
"end": 5088
} | class ____(ProjectGroupingConfigLoader):
"""The currently active grouping config"""
option_name = "sentry:grouping_config"
cache_prefix = "grouping-enhancements:"
| PrimaryGroupingConfigLoader |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/73_class_generic_tuple_default.py | {
"start": 0,
"end": 43
} | class ____[*T = *tuple[int, str]]:
x: T
| Foo |
python | lepture__authlib | authlib/oauth2/rfc6749/requests.py | {
"start": 4705,
"end": 5095
} | class ____:
def __init__(self, method, uri, headers=None):
self.method = method
self.uri = uri
self.headers = headers or {}
self.payload = None
@property
def data(self):
deprecate(
"'request.data' is deprecated in favor of 'request.payload.data'",
version="1.8",
)
return self.payload.data
| JsonRequest |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 144341,
"end": 145322
} | class ____(TestCase):
def test_defaults(self):
self.assertEqual(mi.only([]), None)
self.assertEqual(mi.only([1]), 1)
self.assertRaises(ValueError, lambda: mi.only([1, 2]))
def test_custom_value(self):
self.assertEqual(mi.only([], default='!'), '!')
self.assertEqual(mi.only([1], default='!'), 1)
self.assertRaises(ValueError, lambda: mi.only([1, 2], default='!'))
def test_custom_exception(self):
self.assertEqual(mi.only([], too_long=RuntimeError), None)
self.assertEqual(mi.only([1], too_long=RuntimeError), 1)
self.assertRaises(
RuntimeError, lambda: mi.only([1, 2], too_long=RuntimeError)
)
def test_default_exception_message(self):
self.assertRaisesRegex(
ValueError,
"Expected exactly one item in iterable, "
"but got 'foo', 'bar', and perhaps more",
lambda: mi.only(['foo', 'bar', 'baz']),
)
| OnlyTests |
python | falconry__falcon | falcon/errors.py | {
"start": 85023,
"end": 87373
} | class ____(HTTPError):
"""508 Loop Detected.
The 508 (Loop Detected) status code indicates that the server
terminated an operation because it encountered an infinite loop while
processing a request with "Depth: infinity". This status indicates
that the entire operation failed.
(See also: RFC 5842, Section 7.2)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '508 Loop Detected').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_508,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPLoopDetected |
python | explosion__spaCy | spacy/lang/zh/__init__.py | {
"start": 917,
"end": 1303
} | class ____(str, Enum):
char = "char"
jieba = "jieba"
pkuseg = "pkuseg"
@classmethod
def values(cls):
return list(cls.__members__.keys())
def create_chinese_tokenizer(segmenter: Segmenter = Segmenter.char):
def chinese_tokenizer_factory(nlp):
return ChineseTokenizer(nlp.vocab, segmenter=segmenter)
return chinese_tokenizer_factory
| Segmenter |
python | wandb__wandb | wandb/automations/_utils.py | {
"start": 2465,
"end": 4259
} | class ____(TriggeredActionConfig):
"""Prepares action configuration data for saving an automation."""
# NOTE: `QueueJobActionInput` for defining a Launch job is deprecated,
# so while it's allowed here to update EXISTING mutations, we don't
# currently expose it through the public API to create NEW automations.
queue_job_action_input: Optional[QueueJobActionInput] = None
notification_action_input: Optional[SendNotification] = None
generic_webhook_action_input: Optional[SendWebhook] = None
no_op_action_input: Optional[DoNothing] = None
def prepare_action_config_input(obj: SavedAction | InputAction) -> dict[str, Any]:
"""Nests the action input under the correct key for `TriggeredActionConfig`.
This is necessary to conform to the schemas for:
- `CreateFilterTriggerInput`
- `UpdateFilterTriggerInput`
"""
# Delegate to inner validators to convert SavedAction -> InputAction types, if needed.
obj = parse_input_action(obj)
return InputActionConfig(**{ACTION_CONFIG_KEYS[obj.action_type]: obj}).model_dump()
def prepare_event_filter_input(
obj: _WrappedSavedEventFilter | MongoLikeFilter | RunMetricFilter,
) -> str:
"""Unnests (if needed) and serializes an `EventFilter` input to JSON.
This is necessary to conform to the schemas for:
- `CreateFilterTriggerInput`
- `UpdateFilterTriggerInput`
"""
# Input event filters are nested one level deeper than saved event filters.
# Note that this is NOT the case for run/run metric filters.
#
# Yes, this is confusing. It's also necessary to conform to under-the-hood
# schemas and logic in the backend.
if isinstance(obj, _WrappedSavedEventFilter):
return to_json(obj.filter)
return to_json(obj)
| InputActionConfig |
python | walkccc__LeetCode | solutions/345. Reverse Vowels of a String/345.py | {
"start": 0,
"end": 372
} | class ____:
def reverseVowels(self, s: str) -> str:
chars = list(s)
VOWELS = 'aeiouAEIOU'
l = 0
r = len(s) - 1
while l < r:
while l < r and chars[l] not in VOWELS:
l += 1
while l < r and chars[r] not in VOWELS:
r -= 1
chars[l], chars[r] = chars[r], chars[l]
l += 1
r -= 1
return ''.join(chars)
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 394726,
"end": 396618
} | class ____(VegaLiteSchema):
"""
Feature schema wrapper.
A feature object which contains a geometry and associated properties.
https://tools.ietf.org/html/rfc7946#section-3.2
Parameters
----------
geometry : dict, :class:`Point`, :class:`Polygon`, :class:`Geometry`, :class:`LineString`, :class:`MultiPoint`, :class:`MultiPolygon`, :class:`MultiLineString`, :class:`GeometryCollection`
The feature's geometry
properties : dict, :class:`GeoJsonProperties`, None
Properties associated with this feature.
type : Literal['Feature']
Specifies the type of GeoJSON object.
bbox : :class:`BBox`, Sequence[float]
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
id : str, float
A value that uniquely identifies this feature in a
https://tools.ietf.org/html/rfc7946#section-3.2.
"""
_schema = {"$ref": "#/definitions/Feature"}
def __init__(
self,
geometry: Optional[SchemaBase | Map] = Undefined,
properties: Optional[SchemaBase | Map | None] = Undefined,
type: Optional[Literal["Feature"]] = Undefined,
bbox: Optional[SchemaBase | Sequence[float]] = Undefined,
id: Optional[str | float] = Undefined,
**kwds,
):
super().__init__(
geometry=geometry,
properties=properties,
type=type,
bbox=bbox,
id=id,
**kwds,
)
| Feature |
python | justquick__django-activity-stream | actstream/gfk.py | {
"start": 195,
"end": 478
} | class ____(Manager):
"""
A manager that returns a GFKQuerySet instead of a regular QuerySet.
"""
def get_query_set(self):
return GFKQuerySet(self.model)
get_queryset = get_query_set
def none(self):
return self.get_queryset().none()
| GFKManager |
python | huggingface__transformers | src/transformers/models/openai/modeling_openai.py | {
"start": 4421,
"end": 5286
} | class ____(nn.Module):
def __init__(self, n_positions, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_positions, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, output_attentions=False):
attn_outputs = self.attn(
x,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
# Copied from transformers.models.xlm.modeling_xlm.XLMSequenceSummary with XLM->OpenAIGPT
| Block |
python | sympy__sympy | sympy/polys/matrices/ddm.py | {
"start": 2957,
"end": 32362
} | class ____(list):
"""Dense matrix based on polys domain elements
This is a list subclass and is a wrapper for a list of lists that supports
basic matrix arithmetic +, -, *, **.
"""
fmt = 'dense'
is_DFM = False
is_DDM = True
def __init__(self, rowslist, shape, domain):
if not (isinstance(rowslist, list) and all(type(row) is list for row in rowslist)):
raise DMBadInputError("rowslist must be a list of lists")
m, n = shape
if len(rowslist) != m or any(len(row) != n for row in rowslist):
raise DMBadInputError("Inconsistent row-list/shape")
super().__init__([i.copy() for i in rowslist])
self.shape = (m, n)
self.rows = m
self.cols = n
self.domain = domain
def getitem(self, i, j):
return self[i][j]
def setitem(self, i, j, value):
self[i][j] = value
def extract_slice(self, slice1, slice2):
ddm = [row[slice2] for row in self[slice1]]
rows = len(ddm)
cols = len(ddm[0]) if ddm else len(range(self.shape[1])[slice2])
return DDM(ddm, (rows, cols), self.domain)
def extract(self, rows, cols):
ddm = []
for i in rows:
rowi = self[i]
ddm.append([rowi[j] for j in cols])
return DDM(ddm, (len(rows), len(cols)), self.domain)
@classmethod
def from_list(cls, rowslist, shape, domain):
"""
Create a :class:`DDM` from a list of lists.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.ddm import DDM
>>> A = DDM.from_list([[ZZ(0), ZZ(1)], [ZZ(-1), ZZ(0)]], (2, 2), ZZ)
>>> A
[[0, 1], [-1, 0]]
>>> A == DDM([[ZZ(0), ZZ(1)], [ZZ(-1), ZZ(0)]], (2, 2), ZZ)
True
See Also
========
from_list_flat
"""
return cls(rowslist, shape, domain)
@classmethod
def from_ddm(cls, other):
return other.copy()
def to_list(self):
"""
Convert to a list of lists.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.ddm import DDM
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_list()
[[1, 2], [3, 4]]
See Also
========
to_list_flat
sympy.polys.matrices.domainmatrix.DomainMatrix.to_list
"""
return [row[:] for row in self]
def to_list_flat(self):
"""
Convert to a flat list of elements.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.ddm import DDM
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_list_flat()
[1, 2, 3, 4]
>>> A == DDM.from_list_flat(A.to_list_flat(), A.shape, A.domain)
True
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.to_list_flat
"""
flat = []
for row in self:
flat.extend(row)
return flat
@classmethod
def from_list_flat(cls, flat, shape, domain):
"""
Create a :class:`DDM` from a flat list of elements.
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.ddm import DDM
>>> A = DDM.from_list_flat([1, 2, 3, 4], (2, 2), QQ)
>>> A
[[1, 2], [3, 4]]
>>> A == DDM.from_list_flat(A.to_list_flat(), A.shape, A.domain)
True
See Also
========
to_list_flat
sympy.polys.matrices.domainmatrix.DomainMatrix.from_list_flat
"""
assert type(flat) is list
rows, cols = shape
if not (len(flat) == rows*cols):
raise DMBadInputError("Inconsistent flat-list shape")
lol = [flat[i*cols:(i+1)*cols] for i in range(rows)]
return cls(lol, shape, domain)
def flatiter(self):
return chain.from_iterable(self)
def flat(self):
items = []
for row in self:
items.extend(row)
return items
def to_flat_nz(self):
"""
Convert to a flat list of nonzero elements and data.
Explanation
===========
This is used to operate on a list of the elements of a matrix and then
reconstruct a matrix using :meth:`from_flat_nz`. Zero elements are
included in the list but that may change in the future.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> elements, data = A.to_flat_nz()
>>> elements
[1, 2, 3, 4]
>>> A == DDM.from_flat_nz(elements, data, A.domain)
True
See Also
========
from_flat_nz
sympy.polys.matrices.sdm.SDM.to_flat_nz
sympy.polys.matrices.domainmatrix.DomainMatrix.to_flat_nz
"""
return self.to_sdm().to_flat_nz()
@classmethod
def from_flat_nz(cls, elements, data, domain):
"""
Reconstruct a :class:`DDM` after calling :meth:`to_flat_nz`.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> elements, data = A.to_flat_nz()
>>> elements
[1, 2, 3, 4]
>>> A == DDM.from_flat_nz(elements, data, A.domain)
True
See Also
========
to_flat_nz
sympy.polys.matrices.sdm.SDM.from_flat_nz
sympy.polys.matrices.domainmatrix.DomainMatrix.from_flat_nz
"""
return SDM.from_flat_nz(elements, data, domain).to_ddm()
def to_dod(self):
"""
Convert to a dictionary of dictionaries (dod) format.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_dod()
{0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}}
See Also
========
from_dod
sympy.polys.matrices.sdm.SDM.to_dod
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dod
"""
dod = {}
for i, row in enumerate(self):
row = {j:e for j, e in enumerate(row) if e}
if row:
dod[i] = row
return dod
@classmethod
def from_dod(cls, dod, shape, domain):
"""
Create a :class:`DDM` from a dictionary of dictionaries (dod) format.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> dod = {0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}}
>>> A = DDM.from_dod(dod, (2, 2), QQ)
>>> A
[[1, 2], [3, 4]]
See Also
========
to_dod
sympy.polys.matrices.sdm.SDM.from_dod
sympy.polys.matrices.domainmatrix.DomainMatrix.from_dod
"""
rows, cols = shape
lol = [[domain.zero] * cols for _ in range(rows)]
for i, row in dod.items():
for j, element in row.items():
lol[i][j] = element
return DDM(lol, shape, domain)
def to_dok(self):
"""
Convert :class:`DDM` to dictionary of keys (dok) format.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_dok()
{(0, 0): 1, (0, 1): 2, (1, 0): 3, (1, 1): 4}
See Also
========
from_dok
sympy.polys.matrices.sdm.SDM.to_dok
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dok
"""
dok = {}
for i, row in enumerate(self):
for j, element in enumerate(row):
if element:
dok[i, j] = element
return dok
@classmethod
def from_dok(cls, dok, shape, domain):
"""
Create a :class:`DDM` from a dictionary of keys (dok) format.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> dok = {(0, 0): 1, (0, 1): 2, (1, 0): 3, (1, 1): 4}
>>> A = DDM.from_dok(dok, (2, 2), QQ)
>>> A
[[1, 2], [3, 4]]
See Also
========
to_dok
sympy.polys.matrices.sdm.SDM.from_dok
sympy.polys.matrices.domainmatrix.DomainMatrix.from_dok
"""
rows, cols = shape
lol = [[domain.zero] * cols for _ in range(rows)]
for (i, j), element in dok.items():
lol[i][j] = element
return DDM(lol, shape, domain)
def iter_values(self):
"""
Iterate over the non-zero values of the matrix.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[QQ(1), QQ(0)], [QQ(3), QQ(4)]], (2, 2), QQ)
>>> list(A.iter_values())
[1, 3, 4]
See Also
========
iter_items
to_list_flat
sympy.polys.matrices.domainmatrix.DomainMatrix.iter_values
"""
for row in self:
yield from filter(None, row)
def iter_items(self):
"""
Iterate over indices and values of nonzero elements of the matrix.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[QQ(1), QQ(0)], [QQ(3), QQ(4)]], (2, 2), QQ)
>>> list(A.iter_items())
[((0, 0), 1), ((1, 0), 3), ((1, 1), 4)]
See Also
========
iter_values
to_dok
sympy.polys.matrices.domainmatrix.DomainMatrix.iter_items
"""
for i, row in enumerate(self):
for j, element in enumerate(row):
if element:
yield (i, j), element
def to_ddm(self):
"""
Convert to a :class:`DDM`.
This just returns ``self`` but exists to parallel the corresponding
method in other matrix types like :class:`~.SDM`.
See Also
========
to_sdm
to_dfm
to_dfm_or_ddm
sympy.polys.matrices.sdm.SDM.to_ddm
sympy.polys.matrices.domainmatrix.DomainMatrix.to_ddm
"""
return self
def to_sdm(self):
"""
Convert to a :class:`~.SDM`.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_sdm()
{0: {0: 1, 1: 2}, 1: {0: 3, 1: 4}}
>>> type(A.to_sdm())
<class 'sympy.polys.matrices.sdm.SDM'>
See Also
========
SDM
sympy.polys.matrices.sdm.SDM.to_ddm
"""
return SDM.from_list(self, self.shape, self.domain)
@doctest_depends_on(ground_types=['flint'])
def to_dfm(self):
"""
Convert to :class:`~.DDM` to :class:`~.DFM`.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_dfm()
[[1, 2], [3, 4]]
>>> type(A.to_dfm())
<class 'sympy.polys.matrices._dfm.DFM'>
See Also
========
DFM
sympy.polys.matrices._dfm.DFM.to_ddm
"""
return DFM(list(self), self.shape, self.domain)
@doctest_depends_on(ground_types=['flint'])
def to_dfm_or_ddm(self):
"""
Convert to :class:`~.DFM` if possible or otherwise return self.
Examples
========
>>> from sympy.polys.matrices.ddm import DDM
>>> from sympy import QQ
>>> A = DDM([[1, 2], [3, 4]], (2, 2), QQ)
>>> A.to_dfm_or_ddm()
[[1, 2], [3, 4]]
>>> type(A.to_dfm_or_ddm())
<class 'sympy.polys.matrices._dfm.DFM'>
See Also
========
to_dfm
to_ddm
sympy.polys.matrices.domainmatrix.DomainMatrix.to_dfm_or_ddm
"""
if DFM._supports_domain(self.domain):
return self.to_dfm()
return self
def convert_to(self, K):
Kold = self.domain
if K == Kold:
return self.copy()
rows = [[K.convert_from(e, Kold) for e in row] for row in self]
return DDM(rows, self.shape, K)
def __str__(self):
rowsstr = ['[%s]' % ', '.join(map(str, row)) for row in self]
return '[%s]' % ', '.join(rowsstr)
def __repr__(self):
cls = type(self).__name__
rows = list.__repr__(self)
return '%s(%s, %s, %s)' % (cls, rows, self.shape, self.domain)
def __eq__(self, other):
if not isinstance(other, DDM):
return False
return (super().__eq__(other) and self.domain == other.domain)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def zeros(cls, shape, domain):
z = domain.zero
m, n = shape
rowslist = [[z] * n for _ in range(m)]
return DDM(rowslist, shape, domain)
@classmethod
def ones(cls, shape, domain):
one = domain.one
m, n = shape
rowlist = [[one] * n for _ in range(m)]
return DDM(rowlist, shape, domain)
@classmethod
def eye(cls, size, domain):
if isinstance(size, tuple):
m, n = size
elif isinstance(size, int):
m = n = size
one = domain.one
ddm = cls.zeros((m, n), domain)
for i in range(min(m, n)):
ddm[i][i] = one
return ddm
def copy(self):
copyrows = [row[:] for row in self]
return DDM(copyrows, self.shape, self.domain)
def transpose(self):
rows, cols = self.shape
if rows:
ddmT = ddm_transpose(self)
else:
ddmT = [[]] * cols
return DDM(ddmT, (cols, rows), self.domain)
def __add__(a, b):
if not isinstance(b, DDM):
return NotImplemented
return a.add(b)
def __sub__(a, b):
if not isinstance(b, DDM):
return NotImplemented
return a.sub(b)
def __neg__(a):
return a.neg()
def __mul__(a, b):
if b in a.domain:
return a.mul(b)
else:
return NotImplemented
def __rmul__(a, b):
if b in a.domain:
return a.mul(b)
else:
return NotImplemented
def __matmul__(a, b):
if isinstance(b, DDM):
return a.matmul(b)
else:
return NotImplemented
@classmethod
def _check(cls, a, op, b, ashape, bshape):
if a.domain != b.domain:
msg = "Domain mismatch: %s %s %s" % (a.domain, op, b.domain)
raise DMDomainError(msg)
if ashape != bshape:
msg = "Shape mismatch: %s %s %s" % (a.shape, op, b.shape)
raise DMShapeError(msg)
def add(a, b):
"""a + b"""
a._check(a, '+', b, a.shape, b.shape)
c = a.copy()
ddm_iadd(c, b)
return c
def sub(a, b):
"""a - b"""
a._check(a, '-', b, a.shape, b.shape)
c = a.copy()
ddm_isub(c, b)
return c
def neg(a):
"""-a"""
b = a.copy()
ddm_ineg(b)
return b
def mul(a, b):
c = a.copy()
ddm_imul(c, b)
return c
def rmul(a, b):
c = a.copy()
ddm_irmul(c, b)
return c
def matmul(a, b):
"""a @ b (matrix product)"""
m, o = a.shape
o2, n = b.shape
a._check(a, '*', b, o, o2)
c = a.zeros((m, n), a.domain)
ddm_imatmul(c, a, b)
return c
def mul_elementwise(a, b):
assert a.shape == b.shape
assert a.domain == b.domain
c = [[aij * bij for aij, bij in zip(ai, bi)] for ai, bi in zip(a, b)]
return DDM(c, a.shape, a.domain)
def hstack(A, *B):
"""Horizontally stacks :py:class:`~.DDM` matrices.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import DDM
>>> A = DDM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DDM([[ZZ(5), ZZ(6)], [ZZ(7), ZZ(8)]], (2, 2), ZZ)
>>> A.hstack(B)
[[1, 2, 5, 6], [3, 4, 7, 8]]
>>> C = DDM([[ZZ(9), ZZ(10)], [ZZ(11), ZZ(12)]], (2, 2), ZZ)
>>> A.hstack(B, C)
[[1, 2, 5, 6, 9, 10], [3, 4, 7, 8, 11, 12]]
"""
Anew = list(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkrows == rows
assert Bk.domain == domain
cols += Bkcols
for i, Bki in enumerate(Bk):
Anew[i].extend(Bki)
return DDM(Anew, (rows, cols), A.domain)
def vstack(A, *B):
"""Vertically stacks :py:class:`~.DDM` matrices.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import DDM
>>> A = DDM([[ZZ(1), ZZ(2)], [ZZ(3), ZZ(4)]], (2, 2), ZZ)
>>> B = DDM([[ZZ(5), ZZ(6)], [ZZ(7), ZZ(8)]], (2, 2), ZZ)
>>> A.vstack(B)
[[1, 2], [3, 4], [5, 6], [7, 8]]
>>> C = DDM([[ZZ(9), ZZ(10)], [ZZ(11), ZZ(12)]], (2, 2), ZZ)
>>> A.vstack(B, C)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]
"""
Anew = list(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkcols == cols
assert Bk.domain == domain
rows += Bkrows
Anew.extend(Bk.copy())
return DDM(Anew, (rows, cols), A.domain)
def applyfunc(self, func, domain):
elements = [list(map(func, row)) for row in self]
return DDM(elements, self.shape, domain)
def nnz(a):
"""Number of non-zero entries in :py:class:`~.DDM` matrix.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.nnz
"""
return sum(sum(map(bool, row)) for row in a)
def scc(a):
"""Strongly connected components of a square matrix *a*.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import DDM
>>> A = DDM([[ZZ(1), ZZ(0)], [ZZ(0), ZZ(1)]], (2, 2), ZZ)
>>> A.scc()
[[0], [1]]
See also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.scc
"""
return a.to_sdm().scc()
@classmethod
def diag(cls, values, domain):
"""Returns a square diagonal matrix with *values* on the diagonal.
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import DDM
>>> DDM.diag([ZZ(1), ZZ(2), ZZ(3)], ZZ)
[[1, 0, 0], [0, 2, 0], [0, 0, 3]]
See also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.diag
"""
return SDM.diag(values, domain).to_ddm()
def rref(a):
"""Reduced-row echelon form of a and list of pivots.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.rref
Higher level interface to this function.
sympy.polys.matrices.dense.ddm_irref
The underlying algorithm.
"""
b = a.copy()
K = a.domain
partial_pivot = K.is_RealField or K.is_ComplexField
pivots = ddm_irref(b, _partial_pivot=partial_pivot)
return b, pivots
def rref_den(a):
"""Reduced-row echelon form of a with denominator and list of pivots
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den
Higher level interface to this function.
sympy.polys.matrices.dense.ddm_irref_den
The underlying algorithm.
"""
b = a.copy()
K = a.domain
denom, pivots = ddm_irref_den(b, K)
return b, denom, pivots
def nullspace(a):
"""Returns a basis for the nullspace of a.
The domain of the matrix must be a field.
See Also
========
rref
sympy.polys.matrices.domainmatrix.DomainMatrix.nullspace
"""
rref, pivots = a.rref()
return rref.nullspace_from_rref(pivots)
def nullspace_from_rref(a, pivots=None):
"""Compute the nullspace of a matrix from its rref.
The domain of the matrix can be any domain.
Returns a tuple (basis, nonpivots).
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.nullspace
The higher level interface to this function.
"""
m, n = a.shape
K = a.domain
if pivots is None:
pivots = []
last_pivot = -1
for i in range(m):
ai = a[i]
for j in range(last_pivot+1, n):
if ai[j]:
last_pivot = j
pivots.append(j)
break
if not pivots:
return (a.eye(n, K), list(range(n)))
# After rref the pivots are all one but after rref_den they may not be.
pivot_val = a[0][pivots[0]]
basis = []
nonpivots = []
for i in range(n):
if i in pivots:
continue
nonpivots.append(i)
vec = [pivot_val if i == j else K.zero for j in range(n)]
for ii, jj in enumerate(pivots):
vec[jj] -= a[ii][i]
basis.append(vec)
basis_ddm = DDM(basis, (len(basis), n), K)
return (basis_ddm, nonpivots)
def particular(a):
return a.to_sdm().particular().to_ddm()
def det(a):
"""Determinant of a"""
m, n = a.shape
if m != n:
raise DMNonSquareMatrixError("Determinant of non-square matrix")
b = a.copy()
K = b.domain
deta = ddm_idet(b, K)
return deta
def inv(a):
"""Inverse of a"""
m, n = a.shape
if m != n:
raise DMNonSquareMatrixError("Determinant of non-square matrix")
ainv = a.copy()
K = a.domain
ddm_iinv(ainv, a, K)
return ainv
def lu(a):
"""L, U decomposition of a"""
m, n = a.shape
K = a.domain
U = a.copy()
L = a.eye(m, K)
swaps = ddm_ilu_split(L, U, K)
return L, U, swaps
def _fflu(self):
"""
Private method for Phase 1 of fraction-free LU decomposition.
Performs row operations and elimination to compute U and permutation indices.
Returns:
LU : decomposition as a single matrix.
perm (list): Permutation indices for row swaps.
"""
rows, cols = self.shape
K = self.domain
LU = self.copy()
perm = list(range(rows))
rank = 0
for j in range(min(rows, cols)):
# Skip columns where all entries are zero
if all(LU[i][j] == K.zero for i in range(rows)):
continue
# Find the first non-zero pivot in the current column
pivot_row = -1
for i in range(rank, rows):
if LU[i][j] != K.zero:
pivot_row = i
break
# If no pivot is found, skip column
if pivot_row == -1:
continue
# Swap rows to bring the pivot to the current rank
if pivot_row != rank:
LU[rank], LU[pivot_row] = LU[pivot_row], LU[rank]
perm[rank], perm[pivot_row] = perm[pivot_row], perm[rank]
# Found pivot - (Gauss-Bareiss elimination)
pivot = LU[rank][j]
for i in range(rank + 1, rows):
multiplier = LU[i][j]
# Denominator is previous pivot or 1
denominator = LU[rank - 1][rank - 1] if rank > 0 else K.one
for k in range(j + 1, cols):
LU[i][k] = K.exquo(pivot * LU[i][k] - LU[rank][k] * multiplier, denominator)
# Keep the multiplier for L matrix
LU[i][j] = multiplier
rank += 1
return LU, perm
def fflu(self):
"""
Fraction-free LU decomposition of DDM.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.fflu
The higher-level interface to this function.
"""
rows, cols = self.shape
K = self.domain
# Phase 1: Perform row operations and get permutation
U, perm = self._fflu()
# Phase 2: Construct P, L, D matrices
# Create P from permutation
P = self.zeros((rows, rows), K)
for i, pi in enumerate(perm):
P[i][pi] = K.one
# Create L matrix
L = self.zeros((rows, rows), K)
i = j = 0
while i < rows and j < cols:
if U[i][j] != K.zero:
# Found non-zero pivot
# Diagonal entry is the pivot
L[i][i] = U[i][j]
for l in range(i + 1, rows):
# Off-diagonal entries are the multipliers
L[l][i] = U[l][j]
# zero out the entries in U
U[l][j] = K.zero
i += 1
j += 1
# Fill remaining diagonal of L with ones
for i in range(i, rows):
L[i][i] = K.one
# Create D matrix - using FLINT's approach with accumulator
D = self.zeros((rows, rows), K)
if rows >= 1:
D[0][0] = L[0][0]
di = K.one
for i in range(1, rows):
# Accumulate product of pivots
di = L[i - 1][i - 1] * L[i][i]
D[i][i] = di
return P, L, D, U
def qr(self):
"""
QR decomposition for DDM.
Returns:
- Q: Orthogonal matrix as a DDM.
- R: Upper triangular matrix as a DDM.
See Also
========
sympy.polys.matrices.domainmatrix.DomainMatrix.qr
The higher-level interface to this function.
"""
rows, cols = self.shape
K = self.domain
Q = self.copy()
R = self.zeros((min(rows, cols), cols), K)
# Check that the domain is a field
if not K.is_Field:
raise DMDomainError("QR decomposition requires a field (e.g. QQ).")
dot_cols = lambda i, j: K.sum(Q[k][i] * Q[k][j] for k in range(rows))
for j in range(cols):
for i in range(min(j, rows)):
dot_ii = dot_cols(i, i)
if dot_ii != K.zero:
R[i][j] = dot_cols(i, j) / dot_ii
for k in range(rows):
Q[k][j] -= R[i][j] * Q[k][i]
if j < rows:
dot_jj = dot_cols(j, j)
if dot_jj != K.zero:
R[j][j] = K.one
Q = Q.extract(range(rows), range(min(rows, cols)))
return Q, R
def lu_solve(a, b):
"""x where a*x = b"""
m, n = a.shape
m2, o = b.shape
a._check(a, 'lu_solve', b, m, m2)
if not a.domain.is_Field:
raise DMDomainError("lu_solve requires a field")
L, U, swaps = a.lu()
x = a.zeros((n, o), a.domain)
ddm_ilu_solve(x, L, U, swaps, b)
return x
def charpoly(a):
"""Coefficients of characteristic polynomial of a"""
K = a.domain
m, n = a.shape
if m != n:
raise DMNonSquareMatrixError("Charpoly of non-square matrix")
vec = ddm_berk(a, K)
coeffs = [vec[i][0] for i in range(n+1)]
return coeffs
def is_zero_matrix(self):
"""
Says whether this matrix has all zero entries.
"""
zero = self.domain.zero
return all(Mij == zero for Mij in self.flatiter())
def is_upper(self):
"""
Says whether this matrix is upper-triangular. True can be returned
even if the matrix is not square.
"""
zero = self.domain.zero
return all(Mij == zero for i, Mi in enumerate(self) for Mij in Mi[:i])
def is_lower(self):
"""
Says whether this matrix is lower-triangular. True can be returned
even if the matrix is not square.
"""
zero = self.domain.zero
return all(Mij == zero for i, Mi in enumerate(self) for Mij in Mi[i+1:])
def is_diagonal(self):
"""
Says whether this matrix is diagonal. True can be returned even if
the matrix is not square.
"""
return self.is_upper() and self.is_lower()
def diagonal(self):
"""
Returns a list of the elements from the diagonal of the matrix.
"""
m, n = self.shape
return [self[i][i] for i in range(min(m, n))]
def lll(A, delta=QQ(3, 4)):
return ddm_lll(A, delta=delta)
def lll_transform(A, delta=QQ(3, 4)):
return ddm_lll_transform(A, delta=delta)
from .sdm import SDM
from .dfm import DFM
| DDM |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 39299,
"end": 45984
} | class ____:
async def foo(self):
msg = ""
for candidate in CANDIDATES:
msg += (
"**{candidate.object_type} {candidate.rev}**"
" - {candidate.description}\n"
)
temp_msg = (
f"{f'{humanize_number(pos)}.': <{pound_len+2}} "
f"{balance: <{bal_len + 5}} "
f"<<{author.display_name}>>\n"
)
assert (
str(suffix_arr)
== "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
!= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
<= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
>= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
< "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
> "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert (
str(suffix_arr)
in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$',"
" 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$',"
" 'ykangaroo$']"
)
assert (
str(suffix_arr)
not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$',"
" 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$',"
" 'rykangaroo$', 'ykangaroo$']"
)
message = (
f"1. Go to Google Developers Console and log in with your Google account."
f"(https://console.developers.google.com/)"
f"2. You should be prompted to create a new project (name does not matter)."
f"3. Click on Enable APIs and Services at the top."
f"4. In the list of APIs choose or search for YouTube Data API v3 and "
f"click on it. Choose Enable."
f"5. Click on Credentials on the left navigation bar."
f"6. Click on Create Credential at the top."
f'7. At the top click the link for "API key".'
f"8. No application restrictions are needed. Click Create at the bottom."
f"9. You now have a key to add to `{{prefix}}set api youtube api_key`"
)
message = (
f"1. Go to Google Developers Console and log in with your Google account."
f"(https://console.developers.google.com/)"
f"2. You should be prompted to create a new project (name does not matter)."
f"3. Click on Enable APIs and Services at the top."
f"4. In the list of APIs choose or search for YouTube Data API v3 and "
f"click on it. Choose Enable."
f"5. Click on Credentials on the left navigation bar."
f"6. Click on Create Credential at the top."
f'7. At the top click the link for "API key".'
f"8. No application restrictions are needed. Click Create at the bottom."
f"9. You now have a key to add to `{{prefix}}set api youtube api_key`"
)
message = (
"1. Go to Google Developers Console and log in with your Google account."
"(https://console.developers.google.com/)"
"2. You should be prompted to create a new project (name does not matter)."
"3. Click on Enable APIs and Services at the top."
"4. In the list of APIs choose or search for YouTube Data API v3 and "
"click on it. Choose Enable."
"5. Click on Credentials on the left navigation bar."
"6. Click on Create Credential at the top."
'7. At the top click the link for "API key".'
"8. No application restrictions are needed. Click Create at the bottom."
f"9. You now have a key to add to `{prefix}set api youtube api_key`"
)
# It shouldn't matter if the string prefixes are capitalized.
temp_msg = (
f"{F'{humanize_number(pos)}.': <{pound_len+2}} "
f"{balance: <{bal_len + 5}} "
f"<<{author.display_name}>>\n"
)
fstring = f"We have to remember to escape {braces}. Like {{these}}. But not {this}."
welcome_to_programming = R"hello," R" world!"
fstring = (
f"f-strings definitely make things more {difficult} than they need to be for"
" {black}. But boy they sure are handy. The problem is that some lines will need"
f" to have the 'f' whereas others do not. This {line}, for example, needs one."
)
x = (
"This is a long string which contains an f-expr that should not split"
f" {{{[i for i in range(5)]}}}."
)
x = (
"\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}"
)
xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx(
xx_xxxxxx={
"x3_xxxxxxxx": (
"xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx"
),
},
)
# Regression test for https://github.com/psf/black/issues/3117.
some_dict = {
"something_something": (
r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t"
),
}
# Regression test for https://github.com/psf/black/issues/3459.
xxxx(
empty_str_as_first_split=(
""
f"xxxxxxx {xxxxxxxxxx} xxx xxxxxxxxxx xxxxx xxx xxx xx "
"xxxxx xxxxxxxxx xxxxxxx, xxx xxxxxxxxxxx xxx xxxxx. "
f"xxxxxxxxxxxxx xxxx xx xxxxxxxxxx. xxxxx: {x.xxx}"
),
empty_u_str_as_first_split=(
""
f"xxxxxxx {xxxxxxxxxx} xxx xxxxxxxxxx xxxxx xxx xxx xx "
"xxxxx xxxxxxxxx xxxxxxx, xxx xxxxxxxxxxx xxx xxxxx. "
f"xxxxxxxxxxxxx xxxx xx xxxxxxxxxx. xxxxx: {x.xxx}"
),
)
# Regression test for https://github.com/psf/black/issues/3455.
a_dict = {
"/this/is/a/very/very/very/very/very/very/very/very/very/very/long/key/without/spaces":
# And there is a comment before the value
("item1", "item2", "item3"),
}
# Regression test for https://github.com/psf/black/issues/3506.
# Regressed again by https://github.com/psf/black/pull/4498
s = (
"With single quote: ' "
f" {my_dict['foo']}"
' With double quote: " '
f' {my_dict["bar"]}'
)
s = (
"Lorem Ipsum is simply dummy text of the printing and typesetting"
f' industry:\'{my_dict["foo"]}\''
) | X |
python | ray-project__ray | python/ray/tune/tests/test_trial_scheduler.py | {
"start": 8188,
"end": 8370
} | class ____(_FutureTrainingResult):
def __init__(self, result):
self.result = result
def resolve(self, block: bool = True):
return self.result
| _FakeFutureResult |
python | ray-project__ray | python/ray/serve/tests/test_runtime_env.py | {
"start": 479,
"end": 1717
} | class ____:
def __call__(self, *args):
return open("hello").read()
handle = serve.run(Test.bind())
try:
handle.remote().result()
assert False, "Should not get here"
except FileNotFoundError:
pass
"""
run_string_as_driver(driver)
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_working_dir_basic(ray_start, tmp_dir, ray_shutdown):
with open("hello", "w") as f:
f.write("world")
print("Wrote file")
ray.init(address="auto", namespace="serve", runtime_env={"working_dir": "."})
print("Initialized Ray")
@serve.deployment
class Test:
def __call__(self, *args):
return open("hello").read()
handle = serve.run(Test.bind())
print("Deployed")
assert handle.remote().result() == "world"
@pytest.mark.skipif(sys.platform == "win32", reason="Fail to create temp dir.")
def test_working_dir_connect_from_new_driver(ray_start, tmp_dir):
with open("hello", "w") as f:
f.write("world")
driver1 = """
import ray
from ray import serve
job_config = ray.job_config.JobConfig(runtime_env={"working_dir": "."})
ray.init(address="auto", namespace="serve", job_config=job_config)
@serve.deployment
| Test |
python | huggingface__transformers | src/transformers/models/csm/processing_csm.py | {
"start": 1980,
"end": 16218
} | class ____(ProcessorMixin):
r"""
Constructs a Csm processor which wraps [`EncodecFeatureExtractor`] and
[`PretrainedTokenizerFast`] into a single processor that inherits both the audio feature extraction and
tokenizer functionalities. See the [`~CsmProcessor.__call__`] for more
information.
The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
```python
from transformers import CsmProcessor
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
audio = ds[0]["audio"]["array"]
processor = CsmProcessor.from_pretrained("sesame/csm-1b")
processor(
text=["<|begin_of_text|>[0]What are you working on?<|end_of_text|><|AUDIO|><|audio_eos|><|begin_of_text|>[1]I'm figuring out my budget.<|end_of_text|>"],
audio=audio,
text_kwargs = {"padding": False},
audio_kwargs = {"sampling_rate": 16000},
common_kwargs = {"return_tensors": "pt"},
)
# this should error out because EncodecFeatureExtractor expects a 24kHz audio :)
```
Args:
feature_extractor ([`EncodecFeatureExtractor`]):
The feature extractor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self,
feature_extractor,
tokenizer,
chat_template=None,
):
if not hasattr(tokenizer, "audio_token"):
self.audio_token = "<|AUDIO|>"
self.audio_token_id = tokenizer.convert_tokens_to_ids(self.audio_token)
else:
self.audio_token = tokenizer.audio_token
self.audio_token_id = tokenizer.audio_token_id
if not hasattr(tokenizer, "audio_eos_token"):
self.audio_eos_token = "<|audio_eos|>"
self.audio_eos_token_id = tokenizer.convert_tokens_to_ids(self.audio_eos_token)
else:
self.audio_eos_token = tokenizer.audio_eos_token
self.audio_eos_token_id = tokenizer.audio_eos_token_id
super().__init__(feature_extractor, tokenizer, chat_template=chat_template)
@staticmethod
def _get_encoded_length(audio_length, kernel_sizes=None, strides=None, dilations=None, use_causal_conv=None):
"""
Compute the length of the encoded audio sequence.
Args:
audio_length (int): The length of the audio sequence.
kernel_sizes (list[int]): The kernel sizes for the convolutional layers.
strides (list[int]): The strides for the convolutional layers.
use_causal_conv (bool): Whether to use causal convolutions.
"""
cur_length = audio_length
if kernel_sizes is None or strides is None or dilations is None or use_causal_conv is None:
return cur_length
for kernel_size, stride, dilation in zip(kernel_sizes, strides, dilations):
effective_kernel_size = (kernel_size - 1) * dilation + 1
padding_total = kernel_size - stride
padding_right = padding_total // 2
padding_left = padding_total - padding_right
n_frames = (cur_length - effective_kernel_size + padding_total) / stride + 1
n_frames = math.ceil(n_frames) - 1
ideal_length = n_frames * stride + kernel_size - padding_total
extra_padding = ideal_length - cur_length
if use_causal_conv:
padding_left = padding_total
padding_right = extra_padding
else:
padding_right = padding_right + extra_padding
cur_length = cur_length + padding_left + padding_right
cur_length = (cur_length - dilation * (kernel_size - 1) - 1) // stride + 1
return cur_length
def save_audio(
self,
audio: AudioInput,
saving_path: Union[str, Path, list[Union[str, Path]]],
**kwargs: Unpack[CsmProcessorKwargs],
):
# TODO: @eustlb, this should be in AudioProcessor
if not is_soundfile_available():
raise ImportError("Please install `soundfile` to save audio files.")
# ensure correct audio input
audio = make_list_of_audio(audio)
# ensure correct saving path
if isinstance(saving_path, (str, Path)):
saving_path = [saving_path]
elif not (isinstance(saving_path, (list, tuple)) and all(isinstance(p, (str, Path)) for p in saving_path)):
raise ValueError("Invalid input path. Please provide a string, or a list of strings")
if len(audio) != len(saving_path):
raise ValueError("The number of audio and saving paths must be the same")
output_kwargs = self._merge_kwargs(
CsmProcessorKwargs,
**kwargs,
)
audio_kwargs = output_kwargs["audio_kwargs"]
sampling_rate = audio_kwargs["sampling_rate"]
for audio_value, p in zip(audio, saving_path):
if isinstance(audio_value, torch.Tensor):
audio_value = audio_value.cpu().float().numpy()
sf.write(p, audio_value, sampling_rate)
def __call__(
self,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]],
audio: Optional[AudioInput] = None,
output_labels: Optional[bool] = False,
depth_decoder_labels_ratio: Optional[float] = 1.0,
**kwargs: Unpack[CsmProcessorKwargs],
):
r"""
Main method to prepare text(s) and audio to be fed as input to the model. This method forwards the `text`
arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode
the text. To prepare the audio, this method forwards the `audio` arguments to
EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`]. Please refer
to the docstring of the above two methods for more information.
Args:
audio (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The audio or batch of audio to be prepared. Each audio can be a NumPy array or PyTorch
tensor.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
output_labels (bool, *optional*, default=False):
Whether to return labels for training. Indices will be in `[config.audio_token_id, -100, -101]`.
- `config.audio_token_id` indicates an audio frame (considering sequence length elements as frames)
- `-100` will be ignored in the loss computation
- `-101` indicates the audio frame will be used only for the backbone model (using the first codebook token as labels)
depth_decoder_labels_ratio (float, *optional*, default=1.0):
The ratio of audio frames to keep for the depth decoder labels.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **input_values** -- List of audio values to be fed to a model. Returned when `audio` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **labels** -- List of labels for the audio frames. Returned when `output_labels=True`.
"""
output_kwargs = self._merge_kwargs(
CsmProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
text_kwargs = output_kwargs["text_kwargs"]
audio_kwargs = output_kwargs["audio_kwargs"]
return_tensors = text_kwargs.get("return_tensors", None)
if return_tensors != "pt":
raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.")
if isinstance(text, str):
text = [text]
elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_audio_in_text = [t.count(self.audio_token) for t in text]
n_audio = 0
if audio is not None:
audio = make_list_of_audio(audio)
n_audio = len(audio)
if sum(n_audio_in_text) > 0 and n_audio != sum(n_audio_in_text):
if audio is None:
raise ValueError("No audio were provided, but there are audio tokens in the prompt")
else:
raise ValueError(
f"The number of audio tokens in each text ({n_audio_in_text}) should be the same as the "
f"number of provided audios ({n_audio})."
)
if audio is not None:
encoded_length_kwargs = audio_kwargs.pop("encoded_length_kwargs", {})
num_audio_tokens_list = [
self._get_encoded_length(audio_array.shape[-1], **encoded_length_kwargs) for audio_array in audio
]
num_audio_tokens_list_copy = num_audio_tokens_list.copy()
# expand the text to repeat the audio token for the corresponding number of frames
expanded_text = []
for sample in text:
replace_str = []
while self.audio_token in sample:
num_audio_tokens = num_audio_tokens_list_copy.pop(0)
expanded_audio_token = self.audio_token * num_audio_tokens
replace_str.append(expanded_audio_token)
sample = sample.replace(self.audio_token, "<placeholder>", 1)
while "<placeholder>" in sample:
sample = sample.replace("<placeholder>", replace_str.pop(0), 1)
expanded_text.append(sample)
text = expanded_text
encoding = self.tokenizer(text, **text_kwargs)
data = {}
data.update(encoding)
if audio is not None:
audio_kwargs.pop("return_attention_mask", None) # not supported by the feature extractor
concatenated_audio, input_values_cutoffs = [], []
offset = 0
for n_audio in n_audio_in_text:
if n_audio == 0:
concatenated_audio.append(np.zeros(0))
input_values_cutoffs.append(torch.tensor([-1]))
else:
concatenated_audio.append(
np.concatenate(
[
el.cpu().numpy() if isinstance(el, torch.Tensor) else el
for el in audio[offset : offset + n_audio]
],
axis=-1,
)
)
input_values_cutoffs.append(
torch.tensor([el.shape[-1] for el in audio[offset : offset + n_audio]]).cumsum(dim=-1)
)
offset += n_audio
audio_inputs = self.feature_extractor(concatenated_audio, **audio_kwargs)
audio_inputs.pop("padding_mask", None) # not applicable here
data.update(audio_inputs)
# pad and stack the audio cut idxs
max_len = max(cut_idxs.shape[-1] for cut_idxs in input_values_cutoffs)
input_values_cutoffs = [
torch.nn.functional.pad(cut_idxs, (0, max_len - cut_idxs.shape[-1]), value=-1)
for cut_idxs in input_values_cutoffs
]
data["input_values_cutoffs"] = torch.stack(input_values_cutoffs, dim=0)
if output_labels:
audio_frame_idxs = (data["input_ids"] == self.audio_token_id).nonzero()
n_audio_frames = audio_frame_idxs.shape[0]
if depth_decoder_labels_ratio <= 1.0:
rand_idxs = torch.randperm(n_audio_frames)[: int(n_audio_frames * (1 - depth_decoder_labels_ratio))]
skip_frames_idxs = audio_frame_idxs[rand_idxs]
else:
skip_frames_idxs = audio_frame_idxs
labels = torch.where(
(data["input_ids"] == self.audio_token_id) | (data["input_ids"] == self.audio_eos_token_id),
data["input_ids"],
-100,
)
labels[skip_frames_idxs[:, 0], skip_frames_idxs[:, 1]] = -101
data["labels"] = labels
return BatchFeature(data=data, tensor_type=return_tensors)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
# Remove `padding_mask`, it is popped and not used when processing. Make a copy of list when removing
# otherwise `self.feature_extractor.model_input_names` is also modified
feature_extractor_input_names = [name for name in feature_extractor_input_names if name != "padding_mask"]
return list(tokenizer_input_names + feature_extractor_input_names + ["input_values_cutoffs"])
__all__ = ["CsmProcessor"]
| CsmProcessor |
python | great-expectations__great_expectations | great_expectations/checkpoint/checkpoint.py | {
"start": 18485,
"end": 20483
} | class ____(BaseModel):
"""
The result of running a Checkpoint.
Contains information about Expectation successes and failures from running
each Validation Definition in the Checkpoint.
"""
run_id: RunIdentifier
run_results: Dict[ValidationResultIdentifier, ExpectationSuiteValidationResult]
checkpoint_config: Checkpoint
success: Optional[bool] = None
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator
def _root_validate_result(cls, values: dict) -> dict:
run_results = values["run_results"]
if len(run_results) == 0:
raise ValueError("CheckpointResult must contain at least one run result") # noqa: TRY003 # FIXME CoP
if values["success"] is None:
values["success"] = all(result.success for result in run_results.values())
return values
@property
def name(self) -> str:
return self.checkpoint_config.name
def describe_dict(self) -> CheckpointDescriptionDict:
success_count = sum(1 for r in self.run_results.values() if r.success)
run_result_descriptions = [r.describe_dict() for r in self.run_results.values()]
num_results = len(run_result_descriptions)
return {
"success": success_count == num_results,
"statistics": {
"evaluated_validations": num_results,
"success_percent": success_count / num_results * 100,
"successful_validations": success_count,
"unsuccessful_validations": num_results - success_count,
},
"validation_results": run_result_descriptions,
}
@public_api
def describe(self) -> str:
"""JSON string description of this CheckpointResult"""
return json.dumps(self.describe_dict(), indent=4)
# Necessary due to cyclic dependencies between Checkpoint and CheckpointResult
CheckpointResult.update_forward_refs()
| CheckpointResult |
python | coleifer__peewee | peewee.py | {
"start": 22073,
"end": 22248
} | class ____(Node):
def __init__(self, source):
self.source = source
def __sql__(self, ctx):
return ctx.sql(QualifiedNames(self.source)).literal('.*')
| Star |
python | celery__celery | celery/local.py | {
"start": 1360,
"end": 8056
} | class ____:
"""Proxy to another object."""
# Code stolen from werkzeug.local.Proxy.
__slots__ = ('__local', '__args', '__kwargs', '__dict__')
def __init__(self, local,
args=None, kwargs=None, name=None, __doc__=None):
object.__setattr__(self, '_Proxy__local', local)
object.__setattr__(self, '_Proxy__args', args or ())
object.__setattr__(self, '_Proxy__kwargs', kwargs or {})
if name is not None:
object.__setattr__(self, '__custom_name__', name)
if __doc__ is not None:
object.__setattr__(self, '__doc__', __doc__)
@_default_cls_attr('name', str, __name__)
def __name__(self):
try:
return self.__custom_name__
except AttributeError:
return self._get_current_object().__name__
@_default_cls_attr('qualname', str, __name__)
def __qualname__(self):
try:
return self.__custom_name__
except AttributeError:
return self._get_current_object().__qualname__
@_default_cls_attr('module', str, __module__)
def __module__(self):
return self._get_current_object().__module__
@_default_cls_attr('doc', str, __doc__)
def __doc__(self):
return self._get_current_object().__doc__
def _get_class(self):
return self._get_current_object().__class__
@property
def __class__(self):
return self._get_class()
def _get_current_object(self):
"""Get current object.
This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
loc = object.__getattribute__(self, '_Proxy__local')
if not hasattr(loc, '__release_local__'):
return loc(*self.__args, **self.__kwargs)
try: # pragma: no cover
# not sure what this is about
return getattr(loc, self.__name__)
except AttributeError: # pragma: no cover
raise RuntimeError(f'no object bound to {self.__name__}')
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError: # pragma: no cover
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError: # pragma: no cover
return f'<{self.__class__.__name__} unbound>'
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError: # pragma: no cover
return False
__nonzero__ = __bool__ # Py2
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError: # pragma: no cover
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setattr__(self, name, value):
setattr(self._get_current_object(), name, value)
def __delattr__(self, name):
delattr(self._get_current_object(), name)
def __str__(self):
return str(self._get_current_object())
def __lt__(self, other):
return self._get_current_object() < other
def __le__(self, other):
return self._get_current_object() <= other
def __eq__(self, other):
return self._get_current_object() == other
def __ne__(self, other):
return self._get_current_object() != other
def __gt__(self, other):
return self._get_current_object() > other
def __ge__(self, other):
return self._get_current_object() >= other
def __hash__(self):
return hash(self._get_current_object())
def __call__(self, *a, **kw):
return self._get_current_object()(*a, **kw)
def __len__(self):
return len(self._get_current_object())
def __getitem__(self, i):
return self._get_current_object()[i]
def __iter__(self):
return iter(self._get_current_object())
def __contains__(self, i):
return i in self._get_current_object()
def __add__(self, other):
return self._get_current_object() + other
def __sub__(self, other):
return self._get_current_object() - other
def __mul__(self, other):
return self._get_current_object() * other
def __floordiv__(self, other):
return self._get_current_object() // other
def __mod__(self, other):
return self._get_current_object() % other
def __divmod__(self, other):
return self._get_current_object().__divmod__(other)
def __pow__(self, other):
return self._get_current_object() ** other
def __lshift__(self, other):
return self._get_current_object() << other
def __rshift__(self, other):
return self._get_current_object() >> other
def __and__(self, other):
return self._get_current_object() & other
def __xor__(self, other):
return self._get_current_object() ^ other
def __or__(self, other):
return self._get_current_object() | other
def __div__(self, other):
return self._get_current_object().__div__(other)
def __truediv__(self, other):
return self._get_current_object().__truediv__(other)
def __neg__(self):
return -(self._get_current_object())
def __pos__(self):
return +(self._get_current_object())
def __abs__(self):
return abs(self._get_current_object())
def __invert__(self):
return ~(self._get_current_object())
def __complex__(self):
return complex(self._get_current_object())
def __int__(self):
return int(self._get_current_object())
def __float__(self):
return float(self._get_current_object())
def __oct__(self):
return oct(self._get_current_object())
def __hex__(self):
return hex(self._get_current_object())
def __index__(self):
return self._get_current_object().__index__()
def __coerce__(self, other):
return self._get_current_object().__coerce__(other)
def __enter__(self):
return self._get_current_object().__enter__()
def __exit__(self, *a, **kw):
return self._get_current_object().__exit__(*a, **kw)
def __reduce__(self):
return self._get_current_object().__reduce__()
| Proxy |
python | apache__avro | lang/py/avro/test/test_schema.py | {
"start": 39104,
"end": 41094
} | class ____(unittest.TestCase):
"""Enable generating attribute test cases over all the other-prop test schema."""
_type_map = {
"cp_array": list,
"cp_boolean": bool,
"cp_float": float,
"cp_int": int,
"cp_null": type(None),
"cp_object": dict,
"cp_string": str,
}
def __init__(self, test_schema):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("check_attributes")
self.test_schema = test_schema
def _check_props(self, props):
for k, v in props.items():
self.assertIsInstance(v, self._type_map[k])
def check_attributes(self):
"""Other attributes and their types on a schema should be preserved."""
sch = self.test_schema.parse()
try:
self.assertNotEqual(sch, object(), "A schema is never equal to a non-schema instance.")
except AttributeError: # pragma: no coverage
self.fail("Comparing a schema to a non-schema should be False, but not error.")
round_trip = avro.schema.parse(str(sch))
self.assertEqual(
sch,
round_trip,
"A schema should be equal to another schema parsed from the same json.",
)
self.assertEqual(
sch.other_props,
round_trip.other_props,
"Properties were not preserved in a round-trip parse.",
)
self._check_props(sch.other_props)
if sch.type == "record":
field_props = [f.other_props for f in sch.fields if f.other_props]
self.assertEqual(len(field_props), len(sch.fields))
for p in field_props:
self._check_props(p)
| OtherAttributesTestCase |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 162943,
"end": 164095
} | class ____(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
unicode_type, bytes_type, bytearray_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
| _IndexingBaseNode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 1472,
"end": 1678
} | class ____(IncrementalShopifyStreamWithDeletedEvents):
data_field = "articles"
cursor_field = "id"
order_field = "id"
filter_field = "since_id"
deleted_events_api_name = "Article"
| Articles |
python | scrapy__scrapy | tests/test_utils_log.py | {
"start": 4771,
"end": 6593
} | class ____:
@pytest.fixture
def log_stream(self) -> StringIO:
return StringIO()
@pytest.fixture
def spider(self) -> LogSpider:
return LogSpider()
@pytest.fixture(autouse=True)
def logger(self, log_stream: StringIO) -> Generator[logging.Logger]:
handler = logging.StreamHandler(log_stream)
logger = logging.getLogger("log_spider")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
yield logger
logger.removeHandler(handler)
def test_debug_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo message"
spider.log_debug(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_info_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Bar message"
spider.log_info(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_warning_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Baz message"
spider.log_warning(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_error_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar message"
spider.log_error(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_critical_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar baz message"
spider.log_critical(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
| TestLogging |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/delete/tutorial001.py | {
"start": 475,
"end": 2609
} | class ____(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroPublic])
def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate):
with Session(engine) as session:
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.delete("/heroes/{hero_id}")
def delete_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
| HeroUpdate |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-gemini-live/llama_index/voice_agents/gemini_live/events.py | {
"start": 219,
"end": 277
} | class ____(BaseVoiceAgentEvent):
text: str
| TextSentEvent |
python | sqlalchemy__sqlalchemy | test/orm/test_assorted_eager.py | {
"start": 22574,
"end": 26123
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"design_types",
metadata,
Column(
"design_type_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
)
Table(
"design",
metadata,
Column(
"design_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"parts",
metadata,
Column(
"part_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("design_id", Integer, ForeignKey("design.design_id")),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"inherited_part",
metadata,
Column(
"ip_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("part_id", Integer, ForeignKey("parts.part_id")),
Column("design_id", Integer, ForeignKey("design.design_id")),
)
@classmethod
def setup_classes(cls):
class Part(cls.Basic):
pass
class Design(cls.Basic):
pass
class DesignType(cls.Basic):
pass
class InheritedPart(cls.Basic):
pass
def test_one(self):
(
Part,
inherited_part,
design_types,
DesignType,
parts,
design,
Design,
InheritedPart,
) = (
self.classes.Part,
self.tables.inherited_part,
self.tables.design_types,
self.classes.DesignType,
self.tables.parts,
self.tables.design,
self.classes.Design,
self.classes.InheritedPart,
)
p_m = self.mapper_registry.map_imperatively(Part, parts)
self.mapper_registry.map_imperatively(
InheritedPart,
inherited_part,
properties=dict(part=relationship(Part, lazy="joined")),
)
d_m = self.mapper_registry.map_imperatively(
Design,
design,
properties=dict(
inheritedParts=relationship(
InheritedPart,
cascade="all, delete-orphan",
backref="design",
)
),
)
self.mapper_registry.map_imperatively(DesignType, design_types)
d_m.add_property(
"type", relationship(DesignType, lazy="joined", backref="designs")
)
p_m.add_property(
"design",
relationship(
Design,
lazy="joined",
backref=backref("parts", cascade="all, delete-orphan"),
),
)
d = Design()
sess = fixture_session()
sess.add(d)
sess.flush()
sess.expunge_all()
x = sess.get(Design, 1)
x.inheritedParts
| EagerTest6 |
python | django__django | django/db/migrations/executor.py | {
"start": 250,
"end": 19029
} | class ____:
"""
End-to-end migration execution - load migrations and run them up or down
to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, return a list of (Migration instance,
backwards?).
"""
plan = []
if clean_start:
applied = {}
else:
applied = dict(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate
# everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.pop(migration)
# If the target is missing, it's likely a replaced migration.
# Reload the graph without replacements.
elif (
self.loader.replace_migrations
and target not in self.loader.graph.node_map
):
self.loader.replace_migrations = False
self.loader.build_graph()
return self.migration_plan(targets, clean_start=clean_start)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n
for n in self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.pop(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied[migration] = self.loader.graph.nodes[migration]
return plan
def _create_project_state(self, with_applied_migrations=False):
"""
Create a project state including all the applications without
migrations and applied migrations if with_applied_migrations=True.
"""
state = ProjectState(real_apps=self.loader.unmigrated_apps)
if with_applied_migrations:
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
self.loader.graph.leaf_nodes(), clean_start=True
)
applied_migrations = {
self.loader.graph.nodes[key]
for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
for migration, _ in full_plan:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
return state
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrate the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
# The django_migrations table must be present to record applied
# migrations, but don't create it if there are no migrations to apply.
if plan == []:
if not self.recorder.has_table():
return self._create_project_state(with_applied_migrations=False)
else:
self.recorder.ensure_schema()
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(
self.loader.graph.leaf_nodes(), clean_start=True
)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
if state is None:
# The resulting state should include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan,
)
elif all_forwards:
if state is None:
# The resulting state should still include applied migrations.
state = self._create_project_state(with_applied_migrations=True)
state = self._migrate_all_forwards(
state, plan, full_plan, fake=fake, fake_initial=fake_initial
)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
return state
def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from these sets so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if "apps" not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(
state, migration, fake=fake, fake_initial=fake_initial
)
migrations_to_run.remove(migration)
return state
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = self._create_project_state()
applied_migrations = {
self.loader.graph.nodes[key]
for key in self.loader.applied_migrations
if key in self.loader.graph.nodes
}
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if "apps" not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
elif migration in applied_migrations:
# Only mutate the state if the migration is actually applied
# to make sure the resulting state doesn't include changes
# from unrelated migrations.
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
applied_migrations.remove(migration)
# Generate the post migration state by starting from the state before
# the last migration is unapplied and mutating it to include all the
# remaining applied migrations.
last_unapplied_migration = plan[-1][0]
state = states[last_unapplied_migration]
# Avoid mutating state with apps rendered as it's an expensive
# operation.
del state.apps
for index, (migration, _) in enumerate(full_plan):
if migration == last_unapplied_migration:
for migration, _ in full_plan[index:]:
if migration in applied_migrations:
migration.mutate_state(state, preserve=False)
break
return state
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""Run a migration forwards."""
migration_recorded = False
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor(
atomic=migration.atomic
) as schema_editor:
state = migration.apply(state, schema_editor)
if not schema_editor.deferred_sql:
self.record_migration(migration.app_label, migration.name)
migration_recorded = True
if not migration_recorded:
self.record_migration(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def record_migration(self, app_label, name, forward=True):
migration = self.loader.disk_migrations.get((app_label, name))
# For replacement migrations, record individual statuses
if migration and migration.replaces:
for replaced_app_label, replaced_name in migration.replaces:
self.record_migration(replaced_app_label, replaced_name, forward)
if forward:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_unapplied(app_label, name)
def unapply_migration(self, state, migration, fake=False):
"""Run a migration backwards."""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor(
atomic=migration.atomic
) as schema_editor:
state = migration.unapply(state, schema_editor)
self.record_migration(migration.app_label, migration.name, forward=False)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
Do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but the applied state of the squashed migration must be
maintained.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
if key not in applied and self.loader.all_replaced_applied(key, applied):
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Test whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
def should_skip_detecting_model(migration, model):
"""
No need to detect tables for proxy models, unmanaged models, or
models that can't be migrated on the current database.
"""
return (
model._meta.proxy
or not model._meta.managed
or not router.allow_migrate(
self.connection.alias,
migration.app_label,
model_name=model._meta.model_name,
)
)
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state(
(migration.app_label, migration.name), at_end=True
)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
fold_identifier_case = self.connection.features.ignores_table_name_case
with self.connection.cursor() as cursor:
existing_table_names = set(
self.connection.introspection.table_names(cursor)
)
if fold_identifier_case:
existing_table_names = {
name.casefold() for name in existing_table_names
}
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
db_table = model._meta.db_table
if fold_identifier_case:
db_table = db_table.casefold()
if db_table not in existing_table_names:
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if should_skip_detecting_model(migration, model):
continue
table = model._meta.db_table
field = model._meta.get_field(operation.name)
# Handle implicit many-to-many tables created by AddField.
if field.many_to_many:
through_db_table = field.remote_field.through._meta.db_table
if fold_identifier_case:
through_db_table = through_db_table.casefold()
if through_db_table not in existing_table_names:
return False, project_state
else:
found_add_field_migration = True
continue
with self.connection.cursor() as cursor:
columns = self.connection.introspection.get_table_description(
cursor, table
)
for column in columns:
field_column = field.column
column_name = column.name
if fold_identifier_case:
column_name = column_name.casefold()
field_column = field_column.casefold()
if column_name == field_column:
found_add_field_migration = True
break
else:
return False, project_state
# If we get this far and we found at least one CreateModel or AddField
# migration, the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
| MigrationExecutor |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 7961,
"end": 9238
} | class ____(AppKeyRecorder):
"""An application with bindings."""
BINDINGS = AppKeyRecorder.make_bindings()
async def test_pressing_alpha_on_app() -> None:
"""Test that pressing the alpha key, when it's bound on the app, results in an action fire."""
async with AppWithMovementKeysBound().run_test() as pilot:
await pilot.press(*AppKeyRecorder.ALPHAS)
await pilot.pause()
assert pilot.app.pressed_keys == [*AppKeyRecorder.ALPHAS]
async def test_pressing_movement_keys_app() -> None:
"""Test that pressing the movement keys, when they're bound on the app, results in an action fire."""
async with AppWithMovementKeysBound().run_test() as pilot:
await pilot.press(*AppKeyRecorder.ALL_KEYS)
await pilot.pause()
pilot.app.all_recorded()
##############################################################################
# An app with a focused child widget with bindings.
#
# Now let's spin up an application, using the default screen, where the app
# itself is composing in a widget that can have, and has, focus. The widget
# also has bindings for all of the test keys. That child widget should be
# able to handle all of the test keys on its own and nothing else should
# grab them.
| AppWithMovementKeysBound |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/single_xla.py | {
"start": 1101,
"end": 3250
} | class ____(SingleDeviceStrategy):
"""Strategy for training on a single XLA device."""
def __init__(
self,
device: _DEVICE,
accelerator: Optional[Accelerator] = None,
checkpoint_io: Optional[XLACheckpointIO] = None,
precision: Optional[XLAPrecision] = None,
):
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
if isinstance(device, torch.device):
# unwrap the `torch.device` in favor of `xla_device`
device = device.index
import torch_xla.core.xla_model as xm
super().__init__(
accelerator=accelerator,
device=xm.xla_device(device),
checkpoint_io=checkpoint_io,
precision=precision,
)
@property
@override
def checkpoint_io(self) -> XLACheckpointIO:
plugin = self._checkpoint_io
if plugin is not None:
assert isinstance(plugin, XLACheckpointIO)
return plugin
return XLACheckpointIO()
@checkpoint_io.setter
@override
def checkpoint_io(self, io: Optional[CheckpointIO]) -> None:
if io is not None and not isinstance(io, XLACheckpointIO):
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
self._checkpoint_io = io
@property
@override
def precision(self) -> XLAPrecision:
plugin = self._precision
if plugin is not None:
assert isinstance(plugin, XLAPrecision)
return plugin
return XLAPrecision("32-true")
@precision.setter
@override
def precision(self, precision: Optional[Precision]) -> None:
if precision is not None and not isinstance(precision, XLAPrecision):
raise TypeError(f"The XLA strategy can only work with the `XLAPrecision` plugin, found {precision}")
self._precision = precision
@classmethod
@override
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
strategy_registry.register("single_xla", cls, description=cls.__name__)
| SingleDeviceXLAStrategy |
python | tensorflow__tensorflow | tensorflow/python/saved_model/model_utils/mode_keys.py | {
"start": 1200,
"end": 1765
} | class ____(object):
"""Standard names for Estimator model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `EVAL`: testing/evaluation mode.
* `PREDICT`: predication/inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
def is_predict(mode):
return mode in [KerasModeKeys.PREDICT, EstimatorModeKeys.PREDICT]
def is_eval(mode):
return mode in [KerasModeKeys.TEST, EstimatorModeKeys.EVAL]
def is_train(mode):
return mode in [KerasModeKeys.TRAIN, EstimatorModeKeys.TRAIN]
| EstimatorModeKeys |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 38203,
"end": 40524
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `RTDetrV2FrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = RTDetrV2FrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
| RTDetrV2FrozenBatchNorm2d |
python | encode__django-rest-framework | tests/test_generics.py | {
"start": 705,
"end": 889
} | class ____(RESTFrameworkModel):
email = models.EmailField()
content = models.CharField(max_length=200)
created = models.DateTimeField(auto_now_add=True)
# Serializers
| Comment |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 375619,
"end": 376194
} | class ____:
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.str_))
def test_whitespace_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.str_)
a[0] = ' \0\0'
assert_(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.str_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.str_)
a[0] = ' \0 \0'
assert_(a)
| TestUnicodeArrayNonzero |
python | plotly__plotly.py | plotly/graph_objs/scattersmith/_unselected.py | {
"start": 233,
"end": 3419
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattersmith"
_path_str = "scattersmith.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scattersmith.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scattersmith.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattersmith.unselected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattersmith.unselected.Te
xtfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Unselected`
marker
:class:`plotly.graph_objects.scattersmith.unselected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattersmith.unselected.Te
xtfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattersmith.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | pytorch__pytorch | test/dynamo/test_callback.py | {
"start": 517,
"end": 5770
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._on_compile_start = Mock()
self._on_compile_end = Mock()
callback_handler.register_start_callback(self._on_compile_start)
callback_handler.register_end_callback(self._on_compile_end)
def tearDown(self) -> None:
callback_handler.clear()
return super().tearDown()
def test_callbacks_with_duplicate_prevention(self) -> None:
trigger = CallbackTrigger.DYNAMO
compile_id = CompileId(frame_id=0, frame_compile_id=0)
with (
callback_handler.install_callbacks(trigger, compile_id),
callback_handler.install_callbacks(trigger, compile_id),
):
self._on_compile_start.assert_called_once()
self._on_compile_end.assert_called_once()
def test_counter(self) -> None:
trigger = CallbackTrigger.DYNAMO
compile_id = CompileId(frame_id=0, frame_compile_id=0)
with callback_handler.install_callbacks(trigger, compile_id):
self.assertEqual(
callback_handler._CompilationCallbackHandler__pending_callbacks_counter,
1,
)
self.assertEqual(
callback_handler._CompilationCallbackHandler__pending_callbacks_counter, 0
)
def test_counter_assertion(self) -> None:
callback_handler._CompilationCallbackHandler__pending_callbacks_counter -= 1
with self.assertRaisesRegex(
AssertionError, "Pending callbacks counter cannot become negative."
):
trigger = CallbackTrigger.DYNAMO
compile_id = CompileId(frame_id=0, frame_compile_id=0)
with callback_handler.install_callbacks(trigger, str(compile_id)):
pass
self.assertEqual(
callback_handler._CompilationCallbackHandler__pending_callbacks_counter, 0
)
@unittest.skipIf(
TEST_WITH_ROCM, "ROCm outputs a different number of autotuning logs"
)
@requires_gpu
@torch._inductor.config.patch(force_disable_caches=True)
def test_triggers(self) -> None:
torch._dynamo.reset()
order = []
def on_start(args: CallbackArgs):
nonlocal order
order.append(f"start={args}")
def on_end(args: CallbackArgs):
nonlocal order
order.append(f"end={args}")
torch._dynamo.callback.on_compile_start(on_start)
torch._dynamo.callback.on_compile_start(on_end)
class TinyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(10, 10)
def forward(self, x):
temp = self.fc1(x)
temp = self.relu(temp)
torch._dynamo.graph_break()
return self.fc2(temp)
model = TinyModel().to(device_type)
compiled_model = torch.compile(model, mode="max-autotune")
x = torch.randn(10, 10, device=device_type)
loss = compiled_model(x).sum()
loss.backward()
self.assertExpectedInline(
"\n".join(order),
"""\
start=CallbackArgs(callback_trigger=<CallbackTrigger.DYNAMO: 1>, compile_id='0/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.DYNAMO: 1>, compile_id='0/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.DYNAMO: 1>, compile_id='1/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.DYNAMO: 1>, compile_id='1/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.LAZY_BACKWARD: 2>, compile_id='1/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.LAZY_BACKWARD: 2>, compile_id='1/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.LAZY_BACKWARD: 2>, compile_id='0/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.LAZY_BACKWARD: 2>, compile_id='0/0')""", # noqa: B950
)
order.clear()
if not HAS_CUDA_AND_TRITON:
return
compiled_model.zero_grad()
loss = compiled_model(x).sum()
loss.backward()
self.assertExpectedInline(
"\n".join(order),
"""\
start=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='0/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='0/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='1/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='1/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='1/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='1/0')
start=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='0/0')
end=CallbackArgs(callback_trigger=<CallbackTrigger.CUDAGRAPH_RECORDING: 4>, compile_id='0/0')""", # noqa: B950
)
order.clear()
compiled_model.zero_grad()
loss = compiled_model(x).sum()
loss.backward()
self.assertEqual(len(order), 0)
if __name__ == "__main__":
run_tests()
| CallbackTests |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 20227,
"end": 20581
} | class ____(Helper):
"""A key, value pair for dicts."""
fields = ("key", "value")
key: Expr
value: Expr
def as_const(
self, eval_ctx: t.Optional[EvalContext] = None
) -> t.Tuple[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
| Pair |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 4039,
"end": 4302
} | class ____(Exception):
pass
ForeignKeyIndicator = Union[
str,
ForeignKey,
Tuple[str, str],
Tuple[str, str, str],
Tuple[str, str, str, str],
]
ForeignKeysType = Union[Iterable[ForeignKeyIndicator], List[ForeignKeyIndicator]]
| TransformError |
python | django__django | tests/queryset_pickle/models.py | {
"start": 1958,
"end": 2057
} | class ____(Event):
class Meta:
abstract = True
ordering = ["title"]
| AbstractEvent |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 2752,
"end": 4085
} | class ____(Exception):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('A server error occurred.')
default_code = 'error'
def __init__(self, detail=None, code=None):
if detail is None:
detail = self.default_detail
if code is None:
code = self.default_code
self.detail = _get_error_details(detail, code)
def __str__(self):
return str(self.detail)
def get_codes(self):
"""
Return only the code part of the error details.
Eg. {"name": ["required"]}
"""
return _get_codes(self.detail)
def get_full_details(self):
"""
Return both the message & code parts of the error details.
Eg. {"name": [{"message": "This field is required.", "code": "required"}]}
"""
return _get_full_details(self.detail)
# The recommended style for using `ValidationError` is to keep it namespaced
# under `serializers`, in order to minimize potential confusion with Django's
# built in `ValidationError`. For example:
#
# from rest_framework import serializers
# raise serializers.ValidationError('Value was invalid')
| APIException |
python | walkccc__LeetCode | solutions/2122. Recover the Original Array/2122.py | {
"start": 0,
"end": 633
} | class ____:
def recoverArray(self, nums: list[int]) -> list[int]:
nums = sorted(nums)
def getArray(x: int, count: collections.Counter) -> list[int]:
arr = []
for num in nums:
if count[num] == 0:
continue
if count[num + x] == 0:
return []
count[num] -= 1
count[num + x] -= 1
arr.append(num + x // 2)
return arr
count = collections.Counter(nums)
for i in range(1, len(nums)):
x = nums[i] - nums[0] # 2 * k
if x <= 0 or x % 2 == 1:
continue
arr = getArray(x, count.copy())
if arr:
return arr
| Solution |
python | plotly__plotly.py | plotly/graph_objs/heatmap/legendgrouptitle/_font.py | {
"start": 233,
"end": 9927
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap.legendgrouptitle"
_path_str = "heatmap.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmap.legend
grouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | django__django | tests/admin_views/tests.py | {
"start": 90128,
"end": 145291
} | class ____(TestCase):
"""Tests for Admin Views Permissions."""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super", password="secret", email="super@example.com"
)
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
cls.joepublicuser = User.objects.create_user(
username="joepublic", password="secret"
)
cls.nostaffuser = User.objects.create_user(
username="nostaff", password="secret"
)
cls.s1 = Section.objects.create(name="Test section")
cls.a1 = Article.objects.create(
content="<p>Middle content</p>",
date=datetime.datetime(2008, 3, 18, 11, 54, 58),
section=cls.s1,
another_section=cls.s1,
)
cls.a2 = Article.objects.create(
content="<p>Oldest content</p>",
date=datetime.datetime(2000, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.a3 = Article.objects.create(
content="<p>Newest content</p>",
date=datetime.datetime(2009, 3, 18, 11, 54, 58),
section=cls.s1,
)
cls.p1 = PrePopulatedPost.objects.create(
title="A Long Title", published=True, slug="a-long-title"
)
# Setup permissions, for our users who can add, change, and delete.
opts = Article._meta
# User who can view Articles
cls.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("view", opts))
)
# User who can add Articles
cls.adduser.user_permissions.add(
get_perm(Article, get_permission_codename("add", opts))
)
# User who can change Articles
cls.changeuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
cls.nostaffuser.user_permissions.add(
get_perm(Article, get_permission_codename("change", opts))
)
# User who can delete Articles
cls.deleteuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(Section, get_permission_codename("delete", Section._meta))
)
# login POST dicts
cls.index_url = reverse("admin:index")
cls.super_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super",
"password": "secret",
}
cls.super_email_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super@example.com",
"password": "secret",
}
cls.super_email_bad_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "super@example.com",
"password": "notsecret",
}
cls.adduser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "adduser",
"password": "secret",
}
cls.changeuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "changeuser",
"password": "secret",
}
cls.deleteuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "deleteuser",
"password": "secret",
}
cls.nostaff_login = {
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "nostaff",
"password": "secret",
}
cls.joepublic_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "joepublic",
"password": "secret",
}
cls.viewuser_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"username": "viewuser",
"password": "secret",
}
cls.no_username_login = {
REDIRECT_FIELD_NAME: cls.index_url,
"password": "secret",
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
# Super User
response = self.client.get(self.index_url)
self.assertRedirects(response, login_url)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Test if user enters email address
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username="jondoe", password="secret", email="super@example.com")
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't
# get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# View User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.viewuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Add User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Change User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Delete User
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
# Regular User should not be able to login.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
self.assertFormError(
login.context["form"], "username", ["This field is required."]
)
def test_login_redirect_for_direct_get(self):
"""
Login redirect should be to the admin index page when going directly to
/admin/login/.
"""
response = self.client.get(reverse("admin:login"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
def test_login_redirect_when_logged_in(self):
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin:login"))
self.assertRedirects(response, reverse("admin:index"))
def test_login_redirect_to_next_url_when_logged_in(self):
self.client.force_login(self.superuser)
next_url = reverse("admin:admin_views_article_add")
response = self.client.get(
reverse("admin:login"),
query_params={REDIRECT_FIELD_NAME: next_url},
)
self.assertRedirects(response, next_url)
def test_login_redirect_unsafe_next_url_when_logged_in(self):
self.client.force_login(self.superuser)
response = self.client.get(
reverse("admin:login"),
query_params={
REDIRECT_FIELD_NAME: "https://example.com/bad",
},
)
self.assertRedirects(
response, reverse("admin:index"), fetch_redirect_response=False
)
def test_login_has_permission(self):
# Regular User should not be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.joepublic_login
)
self.assertContains(login, "permission denied")
# User with permissions should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"), self.nostaff_login
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
# Staff should be able to login.
response = self.client.get(reverse("has_permission_admin:index"))
self.assertEqual(response.status_code, 302)
login = self.client.post(
reverse("has_permission_admin:login"),
{
REDIRECT_FIELD_NAME: reverse("has_permission_admin:index"),
"username": "deleteuser",
"password": "secret",
},
)
self.assertRedirects(login, reverse("has_permission_admin:index"))
self.assertFalse(login.context)
self.client.post(reverse("has_permission_admin:logout"))
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
query_string = "the-answer=42"
redirect_url = "%s?%s" % (self.index_url, query_string)
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
"%s?%s" % (reverse("admin:login"), urlencode(new_next)), post_data
)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = "%s?next=%s" % (reverse("admin:login"), reverse("admin:index"))
response = self.client.get(self.index_url)
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, self.index_url)
self.assertFalse(login.context)
self.client.post(reverse("admin:logout"))
def test_login_page_notice_for_non_staff_users(self):
"""
A logged-in non-staff user trying to access the admin index should be
presented with the login page and a hint indicating that the current
user doesn't have access to it.
"""
hint_template = "You are authenticated as {}"
# Anonymous user should not be shown the hint
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertNotContains(response, hint_template.format(""), status_code=200)
# Non-staff user should be shown the hint
self.client.force_login(self.nostaffuser)
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, "login-form")
self.assertContains(
response, hint_template.format(self.nostaffuser.username), status_code=200
)
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
add_dict = {
"title": "Døm ikke",
"content": "<p>great article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
# Change User should not have access to add articles
self.client.force_login(self.changeuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.post(reverse("admin:logout"))
# View User should not have access to add articles
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
# Now give the user permission to add but not change.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
response = self.client.get(reverse("admin:admin_views_article_add"))
self.assertEqual(response.context["title"], "Add article")
self.assertContains(response, "<title>Add article | Django site admin</title>")
self.assertContains(
response, '<input type="submit" value="Save and view" name="_continue">'
)
self.assertContains(
response,
'<h2 id="fieldset-0-0-heading" class="fieldset-heading">Some fields</h2>',
)
self.assertContains(
response,
'<h2 id="fieldset-0-1-heading" class="fieldset-heading">'
"Some other fields</h2>",
)
self.assertContains(
response,
'<h2 id="fieldset-0-2-heading" class="fieldset-heading">이름</h2>',
)
post = self.client.post(
reverse("admin:admin_views_article_add"), add_dict, follow=False
)
self.assertEqual(post.status_code, 302)
self.assertEqual(Article.objects.count(), 4)
article = Article.objects.latest("pk")
response = self.client.get(
reverse("admin:admin_views_article_change", args=(article.pk,))
)
self.assertContains(
response,
'<li class="success">The article “Døm ikke” was added successfully.</li>',
)
article.delete()
self.client.post(reverse("admin:logout"))
# Add user may login and POST to add view, then redirect to admin root
self.client.force_login(self.adduser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
change_list_link = '<a href="%s">Articles</a>' % reverse(
"admin:admin_views_article_changelist"
)
self.assertNotContains(
addpage,
change_list_link,
msg_prefix=(
"User restricted to add permission is given link to change list view "
"in breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 4)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "Greetings from a created object")
self.client.post(reverse("admin:logout"))
# The addition was logged correctly
addition_log = LogEntry.objects.all()[0]
new_article = Article.objects.last()
article_ct = ContentType.objects.get_for_model(Article)
self.assertEqual(addition_log.user_id, self.adduser.pk)
self.assertEqual(addition_log.content_type_id, article_ct.pk)
self.assertEqual(addition_log.object_id, str(new_article.pk))
self.assertEqual(addition_log.object_repr, "Døm ikke")
self.assertEqual(addition_log.action_flag, ADDITION)
self.assertEqual(addition_log.get_change_message(), "Added.")
# Super can add too, but is redirected to the change list view
self.client.force_login(self.superuser)
addpage = self.client.get(reverse("admin:admin_views_article_add"))
self.assertContains(
addpage,
change_list_link,
msg_prefix=(
"Unrestricted user is not given link to change list view in "
"breadcrumbs."
),
)
post = self.client.post(reverse("admin:admin_views_article_add"), add_dict)
self.assertRedirects(post, reverse("admin:admin_views_article_changelist"))
self.assertEqual(Article.objects.count(), 5)
self.client.post(reverse("admin:logout"))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.force_login(self.joepublicuser)
# Check and make sure that if user expires, data still persists
self.client.force_login(self.superuser)
# make sure the view removes test cookie
self.assertIs(self.client.session.test_cookie_worked(), False)
@mock.patch("django.contrib.admin.options.InlineModelAdmin.has_change_permission")
def test_add_view_with_view_only_inlines(self, has_change_permission):
"""User with add permission to a section but view-only for inlines."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("add", Section._meta))
)
self.client.force_login(self.viewuser)
# Valid POST creates a new section.
data = {
"name": "New obj",
"article_set-TOTAL_FORMS": 0,
"article_set-INITIAL_FORMS": 0,
}
response = self.client.post(reverse("admin:admin_views_section_add"), data)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(Section.objects.latest("id").name, data["name"])
# InlineModelAdmin.has_change_permission()'s obj argument is always
# None during object add.
self.assertEqual(
[obj for (request, obj), _ in has_change_permission.call_args_list],
[None, None],
)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
article_changelist_url = reverse("admin:admin_views_article_changelist")
# add user should not be able to view the list of article or change any
# of them
self.client.force_login(self.adduser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(article_change_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view articles but not make changes.
self.client.force_login(self.viewuser)
response = self.client.get(article_changelist_url)
self.assertContains(
response,
"<title>Select article to view | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to view</h1>")
self.assertEqual(response.context["title"], "Select article to view")
response = self.client.get(article_change_url)
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(response, "<label>Extra form field:</label>")
self.assertContains(
response,
'<a role="button" href="/test_admin/admin/admin_views/article/" '
'class="closelink">Close</a>',
)
self.assertEqual(response.context["title"], "View article")
post = self.client.post(article_change_url, change_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>Middle content</p>"
)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(article_changelist_url)
self.assertEqual(response.context["title"], "Select article to change")
self.assertContains(
response,
"<title>Select article to change | Django site admin</title>",
)
self.assertContains(response, "<h1>Select article to change</h1>")
response = self.client.get(article_change_url)
self.assertEqual(response.context["title"], "Change article")
self.assertContains(
response,
"<title>Change article | Django site admin</title>",
)
self.assertContains(response, "<h1>Change article</h1>")
post = self.client.post(article_change_url, change_dict)
self.assertRedirects(post, article_changelist_url)
self.assertEqual(
Article.objects.get(pk=self.a1.pk).content, "<p>edited article</p>"
)
# one error in form should produce singular error message, multiple
# errors plural.
change_dict["title"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the error below.",
msg_prefix=(
"Singular error message not found in response to post with one error"
),
)
change_dict["content"] = ""
post = self.client.post(article_change_url, change_dict)
self.assertContains(
post,
"Please correct the errors below.",
msg_prefix=(
"Plural error message not found in response to post with multiple "
"errors"
),
)
self.client.post(reverse("admin:logout"))
# Test redirection when using row-level change permissions. Refs
# #11513.
r1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
r2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
r3 = RowLevelChangePermissionModel.objects.create(id=3, name="odd id mult 3")
r6 = RowLevelChangePermissionModel.objects.create(id=6, name="even id mult 3")
change_url_1 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r1.pk,)
)
change_url_2 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r2.pk,)
)
change_url_3 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r3.pk,)
)
change_url_6 = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_change", args=(r6.pk,)
)
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1)
self.assertEqual(response.status_code, 403)
response = self.client.post(change_url_1, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertEqual(response.status_code, 403)
response = self.client.get(change_url_2)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_2, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertRedirects(response, self.index_url)
response = self.client.get(change_url_3)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_3, {"name": "changed"})
self.assertEqual(response.status_code, 403)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=3).name,
"odd id mult 3",
)
response = self.client.get(change_url_6)
self.assertEqual(response.status_code, 200)
response = self.client.post(change_url_6, {"name": "changed"})
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=6).name, "changed"
)
self.assertRedirects(response, self.index_url)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
response = self.client.get(change_url_1, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_1, {"name": "changed"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=1).name, "odd id"
)
self.assertContains(response, "login-form")
response = self.client.get(change_url_2, follow=True)
self.assertContains(response, "login-form")
response = self.client.post(
change_url_2, {"name": "changed again"}, follow=True
)
self.assertEqual(
RowLevelChangePermissionModel.objects.get(id=2).name, "changed"
)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_change_view_without_object_change_permission(self):
"""
The object should be read-only if the user has permission to view it
and change objects of that type but not to change the current object.
"""
change_url = reverse("admin9:admin_views_article_change", args=(self.a1.pk,))
self.client.force_login(self.viewuser)
response = self.client.get(change_url)
self.assertEqual(response.context["title"], "View article")
self.assertContains(response, "<title>View article | Django site admin</title>")
self.assertContains(response, "<h1>View article</h1>")
self.assertContains(
response,
'<a role="button" href="/test_admin/admin9/admin_views/article/" '
'class="closelink">Close</a>',
)
def test_change_view_save_as_new(self):
"""
'Save as new' should raise PermissionDenied for users without the 'add'
permission.
"""
change_dict_save_as_new = {
"_saveasnew": "Save as new",
"title": "Ikke fordømt",
"content": "<p>edited article</p>",
"date_0": "2008-03-18",
"date_1": "10:54:39",
"section": self.s1.pk,
}
article_change_url = reverse(
"admin:admin_views_article_change", args=(self.a1.pk,)
)
# Add user can perform "Save as new".
article_count = Article.objects.count()
self.client.force_login(self.adduser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), article_count + 1)
self.client.logout()
# Change user cannot perform "Save as new" (no 'add' permission).
article_count = Article.objects.count()
self.client.force_login(self.changeuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), article_count)
# User with both add and change permissions should be redirected to the
# change page for the newly created object.
article_count = Article.objects.count()
self.client.force_login(self.superuser)
post = self.client.post(article_change_url, change_dict_save_as_new)
self.assertEqual(Article.objects.count(), article_count + 1)
new_article = Article.objects.latest("id")
self.assertRedirects(
post, reverse("admin:admin_views_article_change", args=(new_article.pk,))
)
def test_change_view_with_view_only_inlines(self):
"""
User with change permission to a section but view-only for inlines.
"""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# Valid POST changes the name.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 3,
"article_set-INITIAL_FORMS": 3,
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
def test_change_view_with_view_only_last_inline(self):
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("view", Section._meta))
)
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 3)
# The last inline is not marked as empty.
self.assertContains(response, 'id="article_set-2"')
def test_change_view_with_view_and_add_inlines(self):
"""User has view and add permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("add", Article._meta))
)
self.client.force_login(self.viewuser)
# GET shows inlines.
response = self.client.get(
reverse("admin:admin_views_section_change", args=(self.s1.pk,))
)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
# Valid POST creates a new article.
data = {
"name": "Can edit name with view-only inlines",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-3-id": [""],
"article_set-3-title": ["A title"],
"article_set-3-content": ["Added content"],
"article_set-3-date_0": ["2008-3-18"],
"article_set-3-date_1": ["11:54:58"],
"article_set-3-section": [str(self.s1.pk)],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Section.objects.get(pk=self.s1.pk).name, data["name"])
self.assertEqual(Article.objects.count(), 4)
# Invalid POST reshows inlines.
del data["name"]
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context["inline_admin_formsets"]), 1)
formset = response.context["inline_admin_formsets"][0]
self.assertEqual(len(formset.forms), 6)
def test_change_view_with_view_and_delete_inlines(self):
"""User has view and delete permissions on the inline model."""
self.viewuser.user_permissions.add(
get_perm(Section, get_permission_codename("change", Section._meta))
)
self.client.force_login(self.viewuser)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
# Inline POST details are ignored without delete permission.
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 3)
# Deletion successful when delete permission is added.
self.viewuser.user_permissions.add(
get_perm(Article, get_permission_codename("delete", Article._meta))
)
data = {
"name": "Name is required.",
"article_set-TOTAL_FORMS": 6,
"article_set-INITIAL_FORMS": 3,
"article_set-0-id": [str(self.a1.pk)],
"article_set-0-DELETE": ["on"],
}
response = self.client.post(
reverse("admin:admin_views_section_change", args=(self.s1.pk,)), data
)
self.assertRedirects(response, reverse("admin:admin_views_section_changelist"))
self.assertEqual(Article.objects.count(), 2)
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {"post": "yes"}
delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,))
# add user should not be able to delete articles
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# view user should not be able to delete articles
self.client.force_login(self.viewuser)
response = self.client.get(delete_url)
self.assertEqual(response.status_code, 403)
post = self.client.post(delete_url, delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.count(), 3)
self.client.logout()
# Delete user can delete
self.client.force_login(self.deleteuser)
response = self.client.get(
reverse("admin:admin_views_section_delete", args=(self.s1.pk,))
)
self.assertContains(response, "<h1>Delete</h1>")
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/article/%s/" % self.a1.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
post = self.client.post(delete_url, delete_dict)
self.assertRedirects(post, self.index_url)
self.assertEqual(Article.objects.count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object")
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, str(self.a1.pk))
def test_delete_view_with_no_default_permissions(self):
"""
The delete view allows users to delete collected objects without a
'delete' permission (ReadOnlyPizza.Meta.default_permissions is empty).
"""
pizza = ReadOnlyPizza.objects.create(name="Double Cheese")
delete_url = reverse("admin:admin_views_readonlypizza_delete", args=(pizza.pk,))
self.client.force_login(self.adduser)
response = self.client.get(delete_url)
self.assertContains(response, "admin_views/readonlypizza/%s/" % pizza.pk)
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Read only pizzas: 1</li>")
post = self.client.post(delete_url, {"post": "yes"})
self.assertRedirects(
post, reverse("admin:admin_views_readonlypizza_changelist")
)
self.assertEqual(ReadOnlyPizza.objects.count(), 0)
def test_delete_view_nonexistent_obj(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_article_delete", args=("nonexistent",))
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “nonexistent” doesn’t exist. Perhaps it was deleted?"],
)
def test_history_view(self):
"""History view should restrict access."""
# add user should not be able to view the list of article or change any
# of them
self.client.force_login(self.adduser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 403)
self.client.post(reverse("admin:logout"))
# view user can view all items
self.client.force_login(self.viewuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
# change user can view all items and edit them
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=(self.a1.pk,))
)
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs
# #11513.
rl1 = RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
rl2 = RowLevelChangePermissionModel.objects.create(id=2, name="even id")
logins = [
self.superuser,
self.viewuser,
self.adduser,
self.changeuser,
self.deleteuser,
]
for login_user in logins:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(reverse("admin:logout"))
for login_user in [self.joepublicuser, self.nostaffuser]:
with self.subTest(login_user.username):
self.client.force_login(login_user)
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl1.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
url = reverse(
"admin:admin_views_rowlevelchangepermissionmodel_history",
args=(rl2.pk,),
)
response = self.client.get(url, follow=True)
self.assertContains(response, "login-form")
self.client.post(reverse("admin:logout"))
def test_history_view_bad_url(self):
self.client.force_login(self.changeuser)
response = self.client.get(
reverse("admin:admin_views_article_history", args=("foo",)), follow=True
)
self.assertRedirects(response, reverse("admin:index"))
self.assertEqual(
[m.message for m in response.context["messages"]],
["article with ID “foo” doesn’t exist. Perhaps it was deleted?"],
)
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
self.client.force_login(self.adduser)
# The user can't add sections yet, so they shouldn't see the "add
# section" link.
url = reverse("admin:admin_views_article_add")
add_link_text = "add_id_section"
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the user to add sections too. Now they can see the "add
# section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("add", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_conditionally_show_change_section_link(self):
"""
The foreign key widget should only show the "change related" button if
the user has permission to change that related item.
"""
def get_change_related(response):
return (
response.context["adminform"]
.form.fields["section"]
.widget.can_change_related
)
self.client.force_login(self.adduser)
# The user can't change sections yet, so they shouldn't see the
# "change section" link.
url = reverse("admin:admin_views_article_add")
change_link_text = "change_id_section"
response = self.client.get(url)
self.assertFalse(get_change_related(response))
self.assertNotContains(response, change_link_text)
# Allow the user to change sections too. Now they can see the
# "change section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("change", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_change_related(response))
self.assertContains(response, change_link_text)
def test_conditionally_show_delete_section_link(self):
"""
The foreign key widget should only show the "delete related" button if
the user has permission to delete that related item.
"""
def get_delete_related(response):
return (
response.context["adminform"]
.form.fields["sub_section"]
.widget.can_delete_related
)
self.client.force_login(self.adduser)
# The user can't delete sections yet, so they shouldn't see the
# "delete section" link.
url = reverse("admin:admin_views_article_add")
delete_link_text = "delete_id_sub_section"
response = self.client.get(url)
self.assertFalse(get_delete_related(response))
self.assertNotContains(response, delete_link_text)
# Allow the user to delete sections too. Now they can see the
# "delete section" link.
user = User.objects.get(username="adduser")
perm = get_perm(Section, get_permission_codename("delete", Section._meta))
user.user_permissions.add(perm)
response = self.client.get(url)
self.assertTrue(get_delete_related(response))
self.assertContains(response, delete_link_text)
def test_disabled_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_active = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.force_login(self.superuser)
superuser = User.objects.get(username="super")
superuser.is_staff = False
superuser.save()
response = self.client.get(self.index_url, follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, "Log out")
response = self.client.get(reverse("secure_view"), follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_list_permissions(self):
"""
If a user has no module perms, the app list returns a 404.
"""
opts = Article._meta
change_user = User.objects.get(username="changeuser")
permission = get_perm(Article, get_permission_codename("change", opts))
self.client.force_login(self.changeuser)
# the user has no module permissions
change_user.user_permissions.remove(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 404)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get(reverse("admin:app_list", args=("admin_views",)))
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk="foo")
shortcut_url = reverse("admin:view_on_site", args=(model_ctype.pk, obj.pk))
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, "admin/login.html")
# Logged in? Redirect.
self.client.force_login(self.superuser)
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is
# silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
self.assertRegex(response.url, "http://(testserver|example.com)/dummy/foo/")
def test_has_module_permission(self):
"""
has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
self.client.force_login(self.superuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(self.index_url)
self.assertContains(response, "admin_views")
self.assertContains(response, "Articles")
def test_overriding_has_module_permission(self):
"""
If has_module_permission() always returns False, the module shouldn't
be displayed on the admin index page for any users.
"""
articles = Article._meta.verbose_name_plural.title()
sections = Section._meta.verbose_name_plural.title()
index_url = reverse("admin7:index")
self.client.force_login(self.superuser)
response = self.client.get(index_url)
self.assertContains(response, sections)
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.viewuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.adduser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.changeuser)
response = self.client.get(index_url)
self.assertNotContains(response, "admin_views")
self.assertNotContains(response, articles)
self.client.logout()
self.client.force_login(self.deleteuser)
response = self.client.get(index_url)
self.assertNotContains(response, articles)
# The app list displays Sections but not Articles as the latter has
# ModelAdmin.has_module_permission() = False.
self.client.force_login(self.superuser)
response = self.client.get(reverse("admin7:app_list", args=("admin_views",)))
self.assertContains(response, sections)
self.assertNotContains(response, articles)
def test_post_save_message_no_forbidden_links_visible(self):
"""
Post-save message shouldn't contain a link to the change form if the
user doesn't have the change permission.
"""
self.client.force_login(self.adduser)
# Emulate Article creation for user with add-only permission.
post_data = {
"title": "Fun & games",
"content": "Some content",
"date_0": "2015-10-31",
"date_1": "16:35:00",
"_save": "Save",
}
response = self.client.post(
reverse("admin:admin_views_article_add"), post_data, follow=True
)
self.assertContains(
response,
'<li class="success">The article “Fun & games” was added successfully.'
"</li>",
html=True,
)
@override_settings(
ROOT_URLCONF="admin_views.urls",
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
],
)
| AdminViewPermissionsTest |
python | milvus-io__pymilvus | pymilvus/bulk_writer/remote_bulk_writer.py | {
"start": 1138,
"end": 13652
} | class ____(LocalBulkWriter):
class S3ConnectParam:
def __init__(
self,
bucket_name: str = DEFAULT_BUCKET_NAME,
endpoint: Optional[str] = None,
access_key: Optional[str] = None,
secret_key: Optional[str] = None,
secure: bool = False,
session_token: Optional[str] = None,
region: Optional[str] = None,
http_client: Any = None,
credentials: Any = None,
):
self._bucket_name = bucket_name
self._endpoint = endpoint
self._access_key = access_key
self._secret_key = secret_key
self._secure = (secure,)
self._session_token = (session_token,)
self._region = (region,)
self._http_client = (http_client,) # urllib3.poolmanager.PoolManager
self._credentials = (credentials,) # minio.credentials.Provider
ConnectParam = S3ConnectParam # keep the ConnectParam for compatible with user's legacy code
class AzureConnectParam:
def __init__(
self,
container_name: str,
conn_str: str,
account_url: Optional[str] = None,
credential: Optional[Union[str, Dict[str, str]]] = None,
upload_chunk_size: int = 8 * 1024 * 1024,
upload_concurrency: int = 4,
):
"""Connection parameters for Azure blob storage
Args:
container_name(str): The target container name
conn_str(str): A connection string to an Azure Storage account,
which can be parsed to an account_url and a credential.
To generate a connection string, read this link:
https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string
account_url(str): A string in format like https://<storage-account>.blob.core.windows.net
Read this link for more info:
https://learn.microsoft.com/en-us/azure/storage/common/storage-account-overview
credential: Account access key for the account, read this link for more info:
https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys
upload_chunk_size: If the blob size is larger than this value or unknown,
the blob is uploaded in chunks by parallel connections. This parameter is
passed to max_single_put_size of Azure. Read this link for more info:
https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blob-upload-python#specify-data-transfer-options-for-upload
upload_concurrency: The maximum number of parallel connections to use when uploading
in chunks. This parameter is passed to max_concurrency of Azure.
Read this link for more info:
https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blob-upload-python#specify-data-transfer-options-for-upload
"""
self._container_name = container_name
self._conn_str = conn_str
self._account_url = account_url
self._credential = credential
self._upload_chunk_size = upload_chunk_size
self._upload_concurrency = upload_concurrency
def __init__(
self,
schema: CollectionSchema,
remote_path: str,
connect_param: Optional[Union[S3ConnectParam, AzureConnectParam]],
chunk_size: int = 1024 * MB,
file_type: BulkFileType = BulkFileType.PARQUET,
config: Optional[dict] = None,
**kwargs,
):
temp_local_path = str(Path(sys.argv[0]).resolve().parent.joinpath("bulk_writer"))
if TEMP_LOCAL_PATH in kwargs:
temp_local_path = kwargs.get(TEMP_LOCAL_PATH)
kwargs.pop(TEMP_LOCAL_PATH)
super().__init__(schema, temp_local_path, chunk_size, file_type, config, **kwargs)
self._remote_path = Path("/").joinpath(remote_path).joinpath(super().uuid)
self._connect_param = connect_param
self._client = None
self._get_client()
self._remote_files = []
logger.info(f"Remote buffer writer initialized, target path: {self._remote_path}")
def __enter__(self):
return self
def __exit__(self, exc_type: object, exc_val: object, exc_tb: object):
super().__exit__(exc_type, exc_val, exc_tb)
# remove the temp folder "bulk_writer"
if Path(self._local_path).parent.exists() and not any(
Path(self._local_path).parent.iterdir()
):
Path(self._local_path).parent.rmdir()
logger.info(f"Delete empty directory '{Path(self._local_path).parent}'")
def _get_client(self):
if self._client is not None:
return self._client
if isinstance(self._connect_param, RemoteBulkWriter.S3ConnectParam):
try:
def arg_parse(arg: Any):
return arg[0] if isinstance(arg, tuple) else arg
self._client = Minio(
endpoint=arg_parse(self._connect_param._endpoint),
access_key=arg_parse(self._connect_param._access_key),
secret_key=arg_parse(self._connect_param._secret_key),
secure=arg_parse(self._connect_param._secure),
session_token=arg_parse(self._connect_param._session_token),
region=arg_parse(self._connect_param._region),
http_client=arg_parse(self._connect_param._http_client),
credentials=arg_parse(self._connect_param._credentials),
)
logger.info("Minio/S3 blob storage client successfully initialized")
except Exception as err:
logger.error(f"Failed to connect MinIO/S3, error: {err}")
raise
elif isinstance(self._connect_param, RemoteBulkWriter.AzureConnectParam):
try:
if (
self._connect_param._conn_str is not None
and len(self._connect_param._conn_str) > 0
):
self._client = BlobServiceClient.from_connection_string(
conn_str=self._connect_param._conn_str,
credential=self._connect_param._credential,
max_block_size=self._connect_param._upload_chunk_size,
max_single_put_size=self._connect_param._upload_chunk_size,
)
elif (
self._connect_param._account_url is not None
and len(self._connect_param._account_url) > 0
):
self._client = BlobServiceClient(
account_url=self._connect_param._account_url,
credential=self._connect_param._credential,
max_block_size=self._connect_param._upload_chunk_size,
max_single_put_size=self._connect_param._upload_chunk_size,
)
else:
raise MilvusException(message="Illegal connection parameters")
logger.info("Azure blob storage client successfully initialized")
except Exception as err:
logger.error(f"Failed to connect Azure, error: {err}")
raise
return self._client
def _stat_object(self, object_name: str):
if isinstance(self._client, Minio):
return self._client.stat_object(
bucket_name=self._connect_param._bucket_name, object_name=object_name
)
if isinstance(self._client, BlobServiceClient):
blob = self._client.get_blob_client(
container=self._connect_param._container_name, blob=object_name
)
return blob.get_blob_properties()
raise MilvusException(message="Blob storage client is not initialized")
def _object_exists(self, object_name: str) -> bool:
try:
self._stat_object(object_name=object_name)
except S3Error as s3err:
if s3err.code == "NoSuchKey":
return False
self._throw(f"Failed to stat MinIO/S3 object '{object_name}', error: {s3err}")
except AzureError as azure_err:
if azure_err.error_code == "BlobNotFound":
return False
self._throw(f"Failed to stat Azure object '{object_name}', error: {azure_err}")
return True
def _bucket_exists(self) -> bool:
if isinstance(self._client, Minio):
return self._client.bucket_exists(self._connect_param._bucket_name)
if isinstance(self._client, BlobServiceClient):
containers = self._client.list_containers()
for container in containers:
if self._connect_param._container_name == container["name"]:
return True
return False
raise MilvusException(message="Blob storage client is not initialized")
def _upload_object(self, file_path: str, object_name: str):
logger.info(f"Prepare to upload '{file_path}' to '{object_name}'")
if isinstance(self._client, Minio):
logger.info(f"Target bucket: '{self._connect_param._bucket_name}'")
self._client.fput_object(
bucket_name=self._connect_param._bucket_name,
object_name=object_name,
file_path=file_path,
)
elif isinstance(self._client, BlobServiceClient):
logger.info(f"Target bucket: '{self._connect_param._container_name}'")
container_client = self._client.get_container_client(
self._connect_param._container_name
)
with Path(file_path).open("rb") as data:
container_client.upload_blob(
name=object_name,
data=data,
overwrite=True,
max_concurrency=self._connect_param._upload_concurrency,
connection_timeout=600,
)
else:
raise MilvusException(message="Blob storage client is not initialized")
logger.info(f"Upload file '{file_path}' to '{object_name}'")
def append_row(self, row: dict, **kwargs):
super().append_row(row, **kwargs)
def commit(self, **kwargs):
super().commit(call_back=self._upload)
def _local_rm(self, file: str):
try:
Path(file).unlink()
parent_dir = Path(file).parent
if parent_dir != self._local_path and (not any(Path(parent_dir).iterdir())):
Path(parent_dir).rmdir()
logger.info(f"Delete empty directory '{parent_dir}'")
except Exception:
logger.warning(f"Failed to delete local file: {file}")
def _upload(self, file_list: list):
remote_files = []
try:
if not self._bucket_exists():
self._throw("Blob storage bucket/container doesn't exist")
for file_path in file_list:
ext = Path(file_path).suffix
if ext not in [".json", ".npy", ".parquet", ".csv"]:
continue
relative_file_path = str(file_path).replace(str(super().data_path), "")
minio_file_path = str(
Path.joinpath(self._remote_path, relative_file_path.lstrip("/"))
).lstrip("/")
if self._object_exists(minio_file_path):
logger.info(
f"Remote file '{minio_file_path}' already exists, will overwrite it"
)
self._upload_object(object_name=minio_file_path, file_path=file_path)
remote_files.append(str(minio_file_path))
self._local_rm(file_path)
except Exception as e:
self._throw(f"Failed to upload files, error: {e}")
logger.info(f"Successfully upload files: {file_list}")
self._remote_files.append(remote_files)
return remote_files
@property
def data_path(self):
return self._remote_path
@property
def batch_files(self):
return self._remote_files
| RemoteBulkWriter |
python | joke2k__faker | faker/providers/job/de_DE/__init__.py | {
"start": 42,
"end": 952
} | class ____(BaseProvider):
"""
Source: http://planet-beruf.de/schuelerinnen/mein-beruf/berufe-von-a-z/
"""
jobs = [
"Altenpfleger",
"Asphaltbauer",
"Artist",
"Augenoptiker",
"Ausbaufacharbeiter",
"Bäcker",
"Bankkaufmann",
"Beamter",
"Binnenschiffer",
"Chemikant",
"Chirurgiemechaniker",
"Designer",
"Drogist",
"Erzieher",
"Elektroniker",
"Fachinformatiker",
"Fleischer",
"Florist",
"Forstwirt",
"Friseur",
"Informatiker",
"Programmierer",
"Techniker",
"Mathematiker",
"Koreanistiker",
"Mechaniker",
"Pfleger",
"Polizist",
"Pilot",
"Arzt",
"Krankenschwester",
"Medizininformatiker",
"Schornsteinfeger",
"Winzer",
]
| Provider |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 77300,
"end": 78256
} | class ____(BaseField):
"""A list storing a longitude and latitude coordinate.
.. note:: this represents a generic point in a 2D plane and a legacy way of
representing a geo point. It admits 2d indexes but not "2dsphere" indexes
in MongoDB > 2.4 which are more natural for modeling geospatial points.
See :ref:`geospatial-indexes`
"""
_geo_index = pymongo.GEO2D
def validate(self, value):
"""Make sure that a geo-value is of type (x, y)"""
if not isinstance(value, (list, tuple)):
self.error("GeoPointField can only accept tuples or lists of (x, y)")
if not len(value) == 2:
self.error("Value (%s) must be a two-dimensional point" % repr(value))
elif not isinstance(value[0], (float, int)) or not isinstance(
value[1], (float, int)
):
self.error("Both values (%s) in point must be float or int" % repr(value))
| GeoPointField |
python | google__jax | jax/experimental/pallas/ops/gpu/hopper_matmul_mgpu.py | {
"start": 1034,
"end": 1212
} | class ____(enum.IntEnum):
M = 0
N = 1
def __str__(self):
return self.name
def __repr__(self):
return self.name
@dataclasses.dataclass(frozen=True)
| MatmulDimension |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 5193,
"end": 5298
} | class ____:
def f(self):
return self.g()
@abstractmethod
def g(self):
pass
| A13 |
python | docker__docker-py | docker/utils/proxy.py | {
"start": 40,
"end": 2246
} | class ____(dict):
'''
Hold the client's proxy configuration
'''
@property
def http(self):
return self.get('http')
@property
def https(self):
return self.get('https')
@property
def ftp(self):
return self.get('ftp')
@property
def no_proxy(self):
return self.get('no_proxy')
@staticmethod
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
)
def get_environment(self):
'''
Return a dictionary representing the environment variables used to
set the proxy settings.
'''
env = {}
if self.http:
env['http_proxy'] = env['HTTP_PROXY'] = self.http
if self.https:
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
if self.ftp:
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
if self.no_proxy:
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
return env
def inject_proxy_environment(self, environment):
'''
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
'''
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self):
return (
'ProxyConfig('
f'http={self.http}, https={self.https}, '
f'ftp={self.ftp}, no_proxy={self.no_proxy}'
')'
)
| ProxyConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.